ogl_beamforming

Ultrasound Beamforming Implemented with OpenGL
git clone anongit@rnpnr.xyz:ogl_beamforming.git
Log | Files | Refs | Feed | Submodules | README | LICENSE

ogl_beamformer_lib.c (20771B)


      1 /* See LICENSE for license details. */
      2 #include "../compiler.h"
      3 
      4 #include "../util.h"
      5 #include "../beamformer_parameters.h"
      6 #include "ogl_beamformer_lib_base.h"
      7 
      8 #if OS_LINUX
      9 #include "../os_linux.c"
     10 #elif OS_WINDOWS
     11 #include "../os_win32.c"
     12 
     13 W32(iptr) OpenFileMappingA(u32, b32, c8 *);
     14 
     15 #else
     16 #error Unsupported Platform
     17 #endif
     18 
     19 #include "../beamformer_shared_memory.c"
     20 
     21 global struct {
     22 	SharedMemoryRegion      shared_memory;
     23 	BeamformerSharedMemory *bp;
     24 	i32                     timeout_ms;
     25 	BeamformerLibErrorKind  last_error;
     26 } g_beamformer_library_context;
     27 
     28 #if OS_LINUX
     29 
     30 function b32
     31 os_reserve_region_locks(iptr os_context, u32 count)
     32 {
     33 	b32 result = count <= BeamformerMaxParameterBlockSlots;
     34 	return result;
     35 }
     36 
     37 function SharedMemoryRegion
     38 os_open_shared_memory_area(char *name)
     39 {
     40 	SharedMemoryRegion result = {0};
     41 	i32 fd = shm_open(name, O_RDWR, S_IRUSR|S_IWUSR);
     42 	if (fd > 0) {
     43 		void *new = mmap(0, BEAMFORMER_SHARED_MEMORY_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
     44 		if (new != MAP_FAILED) result.region = new;
     45 		close(fd);
     46 	}
     47 	return result;
     48 }
     49 
     50 #elif OS_WINDOWS
     51 
     52 function b32
     53 os_reserve_region_locks(iptr os_context, u32 count)
     54 {
     55 	local_persist iptr semaphores[BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots];
     56 	w32_shared_memory_context *ctx = (typeof(ctx))os_context;
     57 
     58 	b32 result = count <= BeamformerMaxParameterBlockSlots;
     59 	if (result) {
     60 		count += BeamformerSharedMemoryLockKind_Count;
     61 		if (count > ctx->reserved_count) {
     62 			u8 buffer[1024];
     63 			Stream sb = {.data = buffer, .cap = countof(buffer)};
     64 			stream_append_s8(&sb, s8(OS_SHARED_MEMORY_NAME "_lock_"));
     65 
     66 			for (u32 i = ctx->reserved_count; i < count; i++) {
     67 				Stream lb = sb;
     68 				stream_append_u64(&lb, i);
     69 				stream_append_byte(&lb, 0);
     70 				semaphores[i]  = CreateSemaphoreA(0, 1, 1, (c8 *)lb.data);
     71 				result        &= semaphores[i] != INVALID_FILE;
     72 			}
     73 
     74 			if (result) {
     75 				ctx->semaphores     = semaphores;
     76 				ctx->reserved_count = count;
     77 			}
     78 		} else if (count < ctx->reserved_count) {
     79 			for (u32 i = ctx->reserved_count; i >= count;)
     80 				CloseHandle(semaphores[--i]);
     81 			ctx->reserved_count = count;
     82 		}
     83 	}
     84 	return result;
     85 }
     86 
     87 function SharedMemoryRegion
     88 os_open_shared_memory_area(char *name)
     89 {
     90 	local_persist w32_shared_memory_context ctx = {0};
     91 	SharedMemoryRegion result = {0};
     92 	iptr h = OpenFileMappingA(FILE_MAP_ALL_ACCESS, 0, name);
     93 	if (h != INVALID_FILE) {
     94 		void *new = MapViewOfFile(h, FILE_MAP_ALL_ACCESS, 0, 0, BEAMFORMER_SHARED_MEMORY_SIZE);
     95 		if (new && os_reserve_region_locks((iptr)&ctx, 1)) {
     96 			result.region     = new;
     97 			result.os_context = (iptr)&ctx;
     98 		}
     99 		CloseHandle(h);
    100 	}
    101 	return result;
    102 }
    103 
    104 #endif
    105 
    106 function b32
    107 lib_error_check(b32 condition, BeamformerLibErrorKind error_kind)
    108 {
    109 	b32 result = condition;
    110 	if (!result) g_beamformer_library_context.last_error = error_kind;
    111 	return result;
    112 }
    113 
    114 function b32
    115 check_shared_memory(void)
    116 {
    117 	if (!g_beamformer_library_context.shared_memory.region) {
    118 		g_beamformer_library_context.shared_memory = os_open_shared_memory_area(OS_SHARED_MEMORY_NAME);
    119 		if (lib_error_check(g_beamformer_library_context.shared_memory.region != 0, BF_LIB_ERR_KIND_SHARED_MEMORY)) {
    120 			u32 version = ((BeamformerSharedMemory *)g_beamformer_library_context.shared_memory.region)->version;
    121 			if (lib_error_check(version == BEAMFORMER_SHARED_MEMORY_VERSION, BF_LIB_ERR_KIND_VERSION_MISMATCH))
    122 				g_beamformer_library_context.bp = g_beamformer_library_context.shared_memory.region;
    123 		}
    124 	}
    125 
    126 	b32 result = 0;
    127 	if (g_beamformer_library_context.bp)
    128 		result = lib_error_check(!g_beamformer_library_context.bp->invalid, BF_LIB_ERR_KIND_INVALID_ACCESS);
    129 	return result;
    130 }
    131 
    132 function b32
    133 valid_parameter_block(u32 block)
    134 {
    135 	b32 result = check_shared_memory();
    136 	if (result) {
    137 		result = lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
    138 		                         BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED);
    139 	}
    140 	return result;
    141 }
    142 
    143 function BeamformWork *
    144 try_push_work_queue(void)
    145 {
    146 	BeamformWork *result = beamform_work_queue_push(&g_beamformer_library_context.bp->external_work_queue);
    147 	lib_error_check(result != 0, BF_LIB_ERR_KIND_WORK_QUEUE_FULL);
    148 	return result;
    149 }
    150 
    151 function b32
    152 lib_try_lock(i32 lock, i32 timeout_ms)
    153 {
    154 	b32 result = os_shared_memory_region_lock(&g_beamformer_library_context.shared_memory,
    155 	                                          g_beamformer_library_context.bp->locks,
    156 	                                          lock, (u32)timeout_ms);
    157 	lib_error_check(result, BF_LIB_ERR_KIND_SYNC_VARIABLE);
    158 	return result;
    159 }
    160 
    161 function void
    162 lib_release_lock(i32 lock)
    163 {
    164 	os_shared_memory_region_unlock(&g_beamformer_library_context.shared_memory,
    165 	                               g_beamformer_library_context.bp->locks, (i32)lock);
    166 }
    167 
    168 u32
    169 beamformer_get_api_version(void)
    170 {
    171 	return BEAMFORMER_SHARED_MEMORY_VERSION;
    172 }
    173 
    174 const char *
    175 beamformer_error_string(BeamformerLibErrorKind kind)
    176 {
    177 	#define X(type, num, string) string,
    178 	local_persist const char *error_string_table[] = {BEAMFORMER_LIB_ERRORS "invalid error kind"};
    179 	#undef X
    180 	return error_string_table[MIN(kind, countof(error_string_table) - 1)];
    181 }
    182 
    183 BeamformerLibErrorKind
    184 beamformer_get_last_error(void)
    185 {
    186 	return g_beamformer_library_context.last_error;
    187 }
    188 
    189 const char *
    190 beamformer_get_last_error_string(void)
    191 {
    192 	return beamformer_error_string(beamformer_get_last_error());
    193 }
    194 
    195 b32
    196 beamformer_set_global_timeout(i32 timeout_ms)
    197 {
    198 	b32 result = lib_error_check(timeout_ms >= -1, BF_LIB_ERR_KIND_INVALID_TIMEOUT);
    199 	if (result) g_beamformer_library_context.timeout_ms = timeout_ms;
    200 	return result;
    201 }
    202 
    203 b32
    204 beamformer_reserve_parameter_blocks(uint32_t count)
    205 {
    206 	b32 result = 0;
    207 	if (check_shared_memory() &&
    208 	    lib_error_check(os_reserve_region_locks(g_beamformer_library_context.shared_memory.os_context, count),
    209 	                    BF_LIB_ERR_KIND_PARAMETER_BLOCK_OVERFLOW))
    210 	{
    211 		u32 old_count = g_beamformer_library_context.bp->reserved_parameter_blocks;
    212 		g_beamformer_library_context.bp->reserved_parameter_blocks = count;
    213 		for (u32 i = old_count; i < count; i++)
    214 			zero_struct(beamformer_parameter_block(g_beamformer_library_context.bp, i));
    215 		result = 1;
    216 	}
    217 	return result;
    218 }
    219 
    220 function b32
    221 validate_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
    222 {
    223 	b32 result = 1;
    224 	if (lib_error_check(shader_count <= BeamformerMaxComputeShaderStages,
    225 	                    BF_LIB_ERR_KIND_COMPUTE_STAGE_OVERFLOW))
    226 	{
    227 		for (u32 i = 0; i < shader_count; i++)
    228 			result &= BETWEEN(shaders[i], 0, BeamformerShaderKind_ComputeCount);
    229 		if (!result) {
    230 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_COMPUTE_STAGE;
    231 		} else if (shaders[0] != BeamformerShaderKind_Demodulate &&
    232 		           shaders[0] != BeamformerShaderKind_Decode)
    233 		{
    234 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_START_SHADER;
    235 			result = 0;
    236 		} else if (shaders[0] == BeamformerShaderKind_Demodulate &&
    237 		           !(data_kind == BeamformerDataKind_Int16 || data_kind == BeamformerDataKind_Float32))
    238 		{
    239 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_DEMOD_DATA_KIND;
    240 			result = 0;
    241 		}
    242 	}
    243 	return result;
    244 }
    245 
    246 function b32
    247 parameter_block_region_upload_explicit(void *data, u32 size, u32 block, BeamformerParameterBlockRegions region_id,
    248                                        u32 block_offset, i32 timeout_ms)
    249 {
    250 	i32 lock   = BeamformerSharedMemoryLockKind_Count + (i32)block;
    251 	b32 result = valid_parameter_block(block) && lib_try_lock(lock, timeout_ms);
    252 	if (result) {
    253 		mem_copy((u8 *)beamformer_parameter_block(g_beamformer_library_context.bp, block) + block_offset,
    254 		         data, size);
    255 		mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block, region_id);
    256 		lib_release_lock(lock);
    257 	}
    258 	return result;
    259 }
    260 
    261 
    262 function b32
    263 parameter_block_region_upload(void *data, u32 size, u32 block,
    264                               BeamformerParameterBlockRegions region_id, i32 timeout_ms)
    265 {
    266 	assert(region_id < BeamformerParameterBlockRegion_Count);
    267 	b32 result = parameter_block_region_upload_explicit(data, size, block, region_id,
    268 	                                                    BeamformerParameterBlockRegionOffsets[region_id],
    269 	                                                    timeout_ms);
    270 	return result;
    271 }
    272 
    273 b32
    274 beamformer_set_pipeline_stage_parameters_at(u32 stage_index, i32 parameter, u32 block)
    275 {
    276 	u32 offset  = BeamformerParameterBlockRegionOffsets[BeamformerParameterBlockRegion_ComputePipeline];
    277 	offset     += offsetof(BeamformerComputePipeline, parameters);
    278 	offset     += (stage_index % BeamformerMaxComputeShaderStages) * sizeof(BeamformerShaderParameters);
    279 	b32 result  = parameter_block_region_upload_explicit(&parameter, sizeof(BeamformerShaderParameters), block,
    280 	                                                     BeamformerParameterBlockRegion_ComputePipeline, offset,
    281 	                                                     g_beamformer_library_context.timeout_ms);
    282 	return result;
    283 }
    284 
    285 b32
    286 beamformer_set_pipeline_stage_parameters(u32 stage_index, i32 parameter)
    287 {
    288 	b32 result = beamformer_set_pipeline_stage_parameters_at(stage_index, parameter, 0);
    289 	return result;
    290 }
    291 
    292 b32
    293 beamformer_push_pipeline_at(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind, u32 block)
    294 {
    295 	b32 result = 0;
    296 	if (validate_pipeline(shaders, shader_count, data_kind)) {
    297 		i32 lock = BeamformerSharedMemoryLockKind_Count + (i32)block;
    298 		if (valid_parameter_block(block) && lib_try_lock(lock, g_beamformer_library_context.timeout_ms)) {
    299 			BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, block);
    300 			mem_copy(&b->pipeline.shaders, shaders, shader_count * sizeof(*shaders));
    301 			mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block,
    302 			                                  BeamformerParameterBlockRegion_ComputePipeline);
    303 			b->pipeline.shader_count = shader_count;
    304 			b->pipeline.data_kind    = data_kind;
    305 			lib_release_lock(lock);
    306 			result = 1;
    307 		}
    308 	}
    309 	return result;
    310 }
    311 
    312 b32
    313 beamformer_push_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
    314 {
    315 	b32 result = beamformer_push_pipeline_at(shaders, shader_count, data_kind, 0);
    316 	return result;
    317 }
    318 
    319 function b32
    320 beamformer_create_filter_base(BeamformerFilterKind kind, BeamformerFilterParameters params, u8 filter_slot, u8 parameter_block)
    321 {
    322 	b32 result = 0;
    323 	if (check_shared_memory()) {
    324 		BeamformWork *work = try_push_work_queue();
    325 		if (work) {
    326 			BeamformerCreateFilterContext *ctx = &work->create_filter_context;
    327 			work->kind = BeamformerWorkKind_CreateFilter;
    328 			ctx->kind            = kind;
    329 			ctx->parameters      = params;
    330 			ctx->filter_slot     = filter_slot     % BeamformerFilterSlots;
    331 			ctx->parameter_block = parameter_block % BeamformerMaxParameterBlockSlots;
    332 			beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    333 			result = 1;
    334 		}
    335 	}
    336 	return result;
    337 }
    338 
    339 b32
    340 beamformer_create_filter(BeamformerFilterKind kind, f32 *filter_parameters, u32 filter_parameter_count,
    341                          f32 sampling_frequency, b32 complex, u8 filter_slot, u8 parameter_block)
    342 {
    343 	b32 result = 0;
    344 	if (lib_error_check(kind >= 0 && kind < BeamformerFilterKind_Count, BF_LIB_ERR_KIND_INVALID_FILTER_KIND)) {
    345 		BeamformerFilterParameters fp = {.sampling_frequency = sampling_frequency, .complex = complex != 0};
    346 		#define X(kind, ...) sizeof(fp.kind),
    347 		read_only local_persist u32 kind_sizes[] = {BEAMFORMER_FILTER_KIND_LIST(,)};
    348 		#undef X
    349 		if (lib_error_check(kind_sizes[kind] == sizeof(f32) * filter_parameter_count,
    350 		                    BF_LIB_ERR_KIND_INVALID_FILTER_PARAM_COUNT))
    351 		{
    352 			/* NOTE(rnp): any filter kind struct works as base offset of union */
    353 			mem_copy(&fp.Kaiser, filter_parameters, kind_sizes[kind]);
    354 			result = beamformer_create_filter_base(kind, fp, filter_slot, parameter_block);
    355 		}
    356 	}
    357 	return result;
    358 }
    359 
    360 function b32
    361 beamformer_flush_commands(i32 timeout_ms)
    362 {
    363 	b32 result = lib_try_lock(BeamformerSharedMemoryLockKind_DispatchCompute, timeout_ms);
    364 	return result;
    365 }
    366 
    367 function b32
    368 beamformer_compute_indirect(BeamformerViewPlaneTag tag, u32 block)
    369 {
    370 	b32 result = 0;
    371 	if (check_shared_memory() &&
    372 	    lib_error_check(tag   < BeamformerViewPlaneTag_Count, BF_LIB_ERR_KIND_INVALID_IMAGE_PLANE) &&
    373 	    lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
    374 	                    BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED))
    375 	{
    376 		BeamformWork *work = try_push_work_queue();
    377 		if (work) {
    378 			work->kind = BeamformerWorkKind_ComputeIndirect;
    379 			work->compute_indirect_context.view_plane      = tag;
    380 			work->compute_indirect_context.parameter_block = block;
    381 			beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    382 			beamformer_flush_commands(0);
    383 			result = 1;
    384 		}
    385 	}
    386 	return result;
    387 }
    388 
    389 b32
    390 beamformer_start_compute(void)
    391 {
    392 	b32 result = beamformer_compute_indirect(0, 0);
    393 	return result;
    394 }
    395 
    396 b32
    397 beamformer_wait_for_compute_dispatch(i32 timeout_ms)
    398 {
    399 	b32 result = beamformer_flush_commands(timeout_ms);
    400 	/* NOTE(rnp): if you are calling this function you are probably about
    401 	 * to start some other work and it might be better to not do this... */
    402 	if (result) lib_release_lock(BeamformerSharedMemoryLockKind_DispatchCompute);
    403 	return result;
    404 }
    405 
    406 #define BEAMFORMER_UPLOAD_FNS \
    407 	X(channel_mapping, i16, 1, ChannelMapping) \
    408 	X(sparse_elements, i16, 1, SparseElements) \
    409 	X(focal_vectors,   f32, 2, FocalVectors)
    410 
    411 #define X(name, dtype, elements, region_name) \
    412 b32 beamformer_push_##name ##_at(dtype *data, u32 count, u32 block) { \
    413 	b32 result = 0; \
    414 	if (lib_error_check(count <= countof(((BeamformerParameterBlock *)0)->name), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) { \
    415 		result = parameter_block_region_upload(data, count * elements * sizeof(dtype), block, \
    416 		                                       BeamformerParameterBlockRegion_##region_name,  \
    417 		                                       g_beamformer_library_context.timeout_ms);      \
    418 	} \
    419 	return result; \
    420 }
    421 BEAMFORMER_UPLOAD_FNS
    422 #undef X
    423 
    424 #define X(name, dtype, ...) \
    425 b32 beamformer_push_##name (dtype *data, u32 count) { \
    426 	b32 result = beamformer_push_##name ##_at(data, count, 0); \
    427 	return result; \
    428 }
    429 BEAMFORMER_UPLOAD_FNS
    430 #undef X
    431 
    432 function b32
    433 beamformer_push_data_base(void *data, u32 data_size, i32 timeout_ms)
    434 {
    435 	b32 result = 0;
    436 	if (check_shared_memory()) {
    437 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    438 		if (lib_error_check(data_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) {
    439 			if (lib_try_lock(BeamformerSharedMemoryLockKind_UploadRF, timeout_ms)) {
    440 				if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
    441 					mem_copy(scratch.beg, data, data_size);
    442 					/* TODO(rnp): need a better way to communicate this */
    443 					g_beamformer_library_context.bp->scratch_rf_size = data_size;
    444 					lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
    445 					result = 1;
    446 				}
    447 			}
    448 		}
    449 	}
    450 	return result;
    451 }
    452 
    453 b32
    454 beamformer_push_data(void *data, u32 data_size)
    455 {
    456 	return beamformer_push_data_base(data, data_size, g_beamformer_library_context.timeout_ms);
    457 }
    458 
    459 b32
    460 beamformer_push_data_with_compute(void *data, u32 data_size, u32 image_plane_tag, u32 parameter_slot)
    461 {
    462 	b32 result = beamformer_push_data_base(data, data_size, g_beamformer_library_context.timeout_ms);
    463 	if (result) result = beamformer_compute_indirect(image_plane_tag, parameter_slot);
    464 	return result;
    465 }
    466 
    467 b32
    468 beamformer_push_parameters_at(BeamformerParameters *bp, u32 block)
    469 {
    470 	b32 result = parameter_block_region_upload(bp, sizeof(*bp), block,
    471 	                                           BeamformerParameterBlockRegion_Parameters,
    472 	                                           g_beamformer_library_context.timeout_ms);
    473 	return result;
    474 }
    475 
    476 b32
    477 beamformer_push_parameters(BeamformerParameters *bp)
    478 {
    479 	b32 result = beamformer_push_parameters_at(bp, 0);
    480 	return result;
    481 }
    482 
    483 b32
    484 beamformer_push_parameters_ui(BeamformerUIParameters *bp)
    485 {
    486 	b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
    487 	                                                    offsetof(BeamformerParameterBlock, parameters_ui),
    488 	                                                    g_beamformer_library_context.timeout_ms);
    489 	return result;
    490 }
    491 
    492 b32
    493 beamformer_push_parameters_head(BeamformerParametersHead *bp)
    494 {
    495 	b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
    496 	                                                    offsetof(BeamformerParameterBlock, parameters_head),
    497 	                                                    g_beamformer_library_context.timeout_ms);
    498 	return result;
    499 }
    500 
    501 function b32
    502 beamformer_export_buffer(BeamformerExportContext export_context)
    503 {
    504 	BeamformWork *work = try_push_work_queue();
    505 	b32 result = work && lib_try_lock(BeamformerSharedMemoryLockKind_ExportSync, 0);
    506 	if (result) {
    507 		work->export_context = export_context;
    508 		work->kind = BeamformerWorkKind_ExportBuffer;
    509 		work->lock = BeamformerSharedMemoryLockKind_ScratchSpace;
    510 		beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    511 	}
    512 	return result;
    513 }
    514 
    515 function b32
    516 beamformer_read_output(void *out, iz size, i32 timeout_ms)
    517 {
    518 	b32 result = 0;
    519 	if (lib_try_lock(BeamformerSharedMemoryLockKind_ExportSync, timeout_ms)) {
    520 		if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
    521 			Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    522 			mem_copy(out, scratch.beg, (uz)size);
    523 			lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
    524 			result = 1;
    525 		}
    526 		lib_release_lock(BeamformerSharedMemoryLockKind_ExportSync);
    527 	}
    528 	return result;
    529 }
    530 
    531 b32
    532 beamform_data_synchronized(void *data, u32 data_size, i32 output_points[3], f32 *out_data, i32 timeout_ms)
    533 {
    534 	b32 result = 0;
    535 	if (check_shared_memory()) {
    536 		output_points[0] = MAX(1, output_points[0]);
    537 		output_points[1] = MAX(1, output_points[1]);
    538 		output_points[2] = MAX(1, output_points[2]);
    539 
    540 		BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, 0);
    541 		b->parameters.output_points[0] = output_points[0];
    542 		b->parameters.output_points[1] = output_points[1];
    543 		b->parameters.output_points[2] = output_points[2];
    544 
    545 		iz output_size = output_points[0] * output_points[1] * output_points[2] * (i32)sizeof(f32) * 2;
    546 
    547 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    548 		if (lib_error_check(output_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)
    549 		    && beamformer_push_data_with_compute(data, data_size, 0, 0))
    550 		{
    551 			BeamformerExportContext export;
    552 			export.kind = BeamformerExportKind_BeamformedData;
    553 			export.size = (u32)output_size;
    554 			if (beamformer_export_buffer(export)) {
    555 				/* NOTE(rnp): if this fails it just means that the work from push_data hasn't
    556 				 * started yet. This is here to catch the other case where the work started
    557 				 * and finished before we finished queuing the export work item */
    558 				beamformer_flush_commands(0);
    559 
    560 				result = beamformer_read_output(out_data, output_size, timeout_ms);
    561 			}
    562 		}
    563 	}
    564 	return result;
    565 }
    566 
    567 b32
    568 beamformer_compute_timings(BeamformerComputeStatsTable *output, i32 timeout_ms)
    569 {
    570 	static_assert(sizeof(*output) <= BEAMFORMER_SHARED_MEMORY_MAX_SCRATCH_SIZE,
    571 	              "timing table size exceeds scratch space");
    572 
    573 	b32 result = 0;
    574 	if (check_shared_memory()) {
    575 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    576 		if (lib_error_check(arena_capacity(&scratch, u8) <= (iz)sizeof(*output), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)) {
    577 			BeamformerExportContext export;
    578 			export.kind = BeamformerExportKind_Stats;
    579 			export.size = sizeof(*output);
    580 			if (beamformer_export_buffer(export) && beamformer_flush_commands(0))
    581 				result = beamformer_read_output(output, sizeof(*output), timeout_ms);
    582 		}
    583 	}
    584 	return result;
    585 }
    586 
    587 i32
    588 beamformer_live_parameters_get_dirty_flag(void)
    589 {
    590 	i32 result = -1;
    591 	if (check_shared_memory()) {
    592 		u32 flag = ctz_u32(g_beamformer_library_context.bp->live_imaging_dirty_flags);
    593 		if (flag != 32) {
    594 			atomic_and_u32(&g_beamformer_library_context.bp->live_imaging_dirty_flags, ~(1 << flag));
    595 			result = (i32)flag;
    596 		}
    597 	}
    598 	return result;
    599 }
    600 
    601 BeamformerLiveImagingParameters *
    602 beamformer_get_live_parameters(void)
    603 {
    604 	BeamformerLiveImagingParameters *result = 0;
    605 	if (check_shared_memory()) result = &g_beamformer_library_context.bp->live_imaging_parameters;
    606 	return result;
    607 }
    608 
    609 b32
    610 beamformer_set_live_parameters(BeamformerLiveImagingParameters *new)
    611 {
    612 	b32 result = 0;
    613 	if (check_shared_memory()) {
    614 		mem_copy(&g_beamformer_library_context.bp->live_imaging_parameters, new, sizeof(*new));
    615 		memory_write_barrier();
    616 		result = 1;
    617 	}
    618 	return result;
    619 }