ogl_beamforming

Ultrasound Beamforming Implemented with OpenGL
git clone anongit@rnpnr.xyz:ogl_beamforming.git
Log | Files | Refs | Feed | Submodules | README | LICENSE

ogl_beamformer_lib.c (20313B)


      1 /* See LICENSE for license details. */
      2 #include "../compiler.h"
      3 
      4 #include "../util.h"
      5 #include "../beamformer_parameters.h"
      6 #include "ogl_beamformer_lib_base.h"
      7 
      8 #if OS_LINUX
      9 #include "../os_linux.c"
     10 #elif OS_WINDOWS
     11 #include "../os_win32.c"
     12 
     13 W32(iptr) OpenFileMappingA(u32, b32, c8 *);
     14 
     15 #else
     16 #error Unsupported Platform
     17 #endif
     18 
     19 #include "../beamformer_shared_memory.c"
     20 
     21 global struct {
     22 	SharedMemoryRegion      shared_memory;
     23 	BeamformerSharedMemory *bp;
     24 	i32                     timeout_ms;
     25 	BeamformerLibErrorKind  last_error;
     26 } g_beamformer_library_context;
     27 
     28 #if OS_LINUX
     29 
     30 function b32
     31 os_reserve_region_locks(iptr os_context, u32 count)
     32 {
     33 	b32 result = count <= BeamformerMaxParameterBlockSlots;
     34 	return result;
     35 }
     36 
     37 function SharedMemoryRegion
     38 os_open_shared_memory_area(char *name)
     39 {
     40 	SharedMemoryRegion result = {0};
     41 	i32 fd = shm_open(name, O_RDWR, S_IRUSR|S_IWUSR);
     42 	if (fd > 0) {
     43 		void *new = mmap(0, BEAMFORMER_SHARED_MEMORY_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
     44 		if (new != MAP_FAILED) result.region = new;
     45 		close(fd);
     46 	}
     47 	return result;
     48 }
     49 
     50 #elif OS_WINDOWS
     51 
     52 function b32
     53 os_reserve_region_locks(iptr os_context, u32 count)
     54 {
     55 	local_persist iptr semaphores[BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots];
     56 	w32_shared_memory_context *ctx = (typeof(ctx))os_context;
     57 
     58 	b32 result = count <= BeamformerMaxParameterBlockSlots;
     59 	if (result) {
     60 		count += BeamformerSharedMemoryLockKind_Count;
     61 		if (count > ctx->reserved_count) {
     62 			u8 buffer[1024];
     63 			Stream sb = {.data = buffer, .cap = countof(buffer)};
     64 			stream_append_s8(&sb, s8(OS_SHARED_MEMORY_NAME "_lock_"));
     65 
     66 			for (u32 i = ctx->reserved_count; i < count; i++) {
     67 				Stream lb = sb;
     68 				stream_append_u64(&lb, i);
     69 				stream_append_byte(&lb, 0);
     70 				semaphores[i]  = CreateSemaphoreA(0, 1, 1, (c8 *)lb.data);
     71 				result        &= semaphores[i] != INVALID_FILE;
     72 			}
     73 
     74 			if (result) {
     75 				ctx->semaphores     = semaphores;
     76 				ctx->reserved_count = count;
     77 			}
     78 		} else if (count < ctx->reserved_count) {
     79 			for (u32 i = ctx->reserved_count; i >= count;)
     80 				CloseHandle(semaphores[--i]);
     81 			ctx->reserved_count = count;
     82 		}
     83 	}
     84 	return result;
     85 }
     86 
     87 function SharedMemoryRegion
     88 os_open_shared_memory_area(char *name)
     89 {
     90 	local_persist w32_shared_memory_context ctx = {0};
     91 	SharedMemoryRegion result = {0};
     92 	iptr h = OpenFileMappingA(FILE_MAP_ALL_ACCESS, 0, name);
     93 	if (h != INVALID_FILE) {
     94 		void *new = MapViewOfFile(h, FILE_MAP_ALL_ACCESS, 0, 0, BEAMFORMER_SHARED_MEMORY_SIZE);
     95 		if (new && os_reserve_region_locks((iptr)&ctx, 1)) {
     96 			result.region     = new;
     97 			result.os_context = (iptr)&ctx;
     98 		}
     99 		CloseHandle(h);
    100 	}
    101 	return result;
    102 }
    103 
    104 #endif
    105 
    106 function b32
    107 lib_error_check(b32 condition, BeamformerLibErrorKind error_kind)
    108 {
    109 	b32 result = condition;
    110 	if (!result) g_beamformer_library_context.last_error = error_kind;
    111 	return result;
    112 }
    113 
    114 function b32
    115 check_shared_memory(void)
    116 {
    117 	if (!g_beamformer_library_context.shared_memory.region) {
    118 		g_beamformer_library_context.shared_memory = os_open_shared_memory_area(OS_SHARED_MEMORY_NAME);
    119 		if (lib_error_check(g_beamformer_library_context.shared_memory.region != 0, BF_LIB_ERR_KIND_SHARED_MEMORY)) {
    120 			u32 version = ((BeamformerSharedMemory *)g_beamformer_library_context.shared_memory.region)->version;
    121 			if (lib_error_check(version == BEAMFORMER_SHARED_MEMORY_VERSION, BF_LIB_ERR_KIND_VERSION_MISMATCH))
    122 				g_beamformer_library_context.bp = g_beamformer_library_context.shared_memory.region;
    123 		}
    124 	}
    125 
    126 	b32 result = 0;
    127 	if (g_beamformer_library_context.bp)
    128 		result = lib_error_check(!g_beamformer_library_context.bp->invalid, BF_LIB_ERR_KIND_INVALID_ACCESS);
    129 	return result;
    130 }
    131 
    132 function b32
    133 valid_parameter_block(u32 block)
    134 {
    135 	b32 result = check_shared_memory();
    136 	if (result) {
    137 		result = lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
    138 		                         BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED);
    139 	}
    140 	return result;
    141 }
    142 
    143 function BeamformWork *
    144 try_push_work_queue(void)
    145 {
    146 	BeamformWork *result = beamform_work_queue_push(&g_beamformer_library_context.bp->external_work_queue);
    147 	lib_error_check(result != 0, BF_LIB_ERR_KIND_WORK_QUEUE_FULL);
    148 	return result;
    149 }
    150 
    151 function b32
    152 lib_try_lock(i32 lock, i32 timeout_ms)
    153 {
    154 	b32 result = os_shared_memory_region_lock(&g_beamformer_library_context.shared_memory,
    155 	                                          g_beamformer_library_context.bp->locks,
    156 	                                          lock, (u32)timeout_ms);
    157 	lib_error_check(result, BF_LIB_ERR_KIND_SYNC_VARIABLE);
    158 	return result;
    159 }
    160 
    161 function void
    162 lib_release_lock(i32 lock)
    163 {
    164 	os_shared_memory_region_unlock(&g_beamformer_library_context.shared_memory,
    165 	                               g_beamformer_library_context.bp->locks, (i32)lock);
    166 }
    167 
    168 u32
    169 beamformer_get_api_version(void)
    170 {
    171 	return BEAMFORMER_SHARED_MEMORY_VERSION;
    172 }
    173 
    174 const char *
    175 beamformer_error_string(BeamformerLibErrorKind kind)
    176 {
    177 	#define X(type, num, string) string,
    178 	local_persist const char *error_string_table[] = {BEAMFORMER_LIB_ERRORS "invalid error kind"};
    179 	#undef X
    180 	return error_string_table[MIN(kind, countof(error_string_table) - 1)];
    181 }
    182 
    183 BeamformerLibErrorKind
    184 beamformer_get_last_error(void)
    185 {
    186 	return g_beamformer_library_context.last_error;
    187 }
    188 
    189 const char *
    190 beamformer_get_last_error_string(void)
    191 {
    192 	return beamformer_error_string(beamformer_get_last_error());
    193 }
    194 
    195 b32
    196 beamformer_set_global_timeout(i32 timeout_ms)
    197 {
    198 	b32 result = lib_error_check(timeout_ms >= -1, BF_LIB_ERR_KIND_INVALID_TIMEOUT);
    199 	if (result) g_beamformer_library_context.timeout_ms = timeout_ms;
    200 	return result;
    201 }
    202 
    203 b32
    204 beamformer_reserve_parameter_blocks(uint32_t count)
    205 {
    206 	b32 result = 0;
    207 	if (check_shared_memory() &&
    208 	    lib_error_check(os_reserve_region_locks(g_beamformer_library_context.shared_memory.os_context, count),
    209 	                    BF_LIB_ERR_KIND_PARAMETER_BLOCK_OVERFLOW))
    210 	{
    211 		u32 old_count = g_beamformer_library_context.bp->reserved_parameter_blocks;
    212 		g_beamformer_library_context.bp->reserved_parameter_blocks = count;
    213 		for (u32 i = old_count; i < count; i++)
    214 			zero_struct(beamformer_parameter_block(g_beamformer_library_context.bp, i));
    215 		result = 1;
    216 	}
    217 	return result;
    218 }
    219 
    220 function b32
    221 validate_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
    222 {
    223 	b32 result = 1;
    224 	if (lib_error_check(shader_count <= BeamformerMaxComputeShaderStages,
    225 	                    BF_LIB_ERR_KIND_COMPUTE_STAGE_OVERFLOW))
    226 	{
    227 		for (u32 i = 0; i < shader_count; i++)
    228 			result &= BETWEEN(shaders[i], 0, BeamformerShaderKind_ComputeCount);
    229 		if (!result) {
    230 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_COMPUTE_STAGE;
    231 		} else if (shaders[0] != BeamformerShaderKind_Demodulate &&
    232 		           shaders[0] != BeamformerShaderKind_Decode)
    233 		{
    234 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_START_SHADER;
    235 			result = 0;
    236 		} else if (shaders[0] == BeamformerShaderKind_Demodulate &&
    237 		           !(data_kind == BeamformerDataKind_Int16 || data_kind == BeamformerDataKind_Float32))
    238 		{
    239 			g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_DEMOD_DATA_KIND;
    240 			result = 0;
    241 		}
    242 	}
    243 	return result;
    244 }
    245 
    246 function b32
    247 parameter_block_region_upload_explicit(void *data, u32 size, u32 block, BeamformerParameterBlockRegions region_id,
    248                                        u32 block_offset, i32 timeout_ms)
    249 {
    250 	i32 lock   = BeamformerSharedMemoryLockKind_Count + (i32)block;
    251 	b32 result = valid_parameter_block(block) && lib_try_lock(lock, timeout_ms);
    252 	if (result) {
    253 		mem_copy((u8 *)beamformer_parameter_block(g_beamformer_library_context.bp, block) + block_offset,
    254 		         data, size);
    255 		mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block, region_id);
    256 		lib_release_lock(lock);
    257 	}
    258 	return result;
    259 }
    260 
    261 
    262 function b32
    263 parameter_block_region_upload(void *data, u32 size, u32 block,
    264                               BeamformerParameterBlockRegions region_id, i32 timeout_ms)
    265 {
    266 	assert(region_id < BeamformerParameterBlockRegion_Count);
    267 	b32 result = parameter_block_region_upload_explicit(data, size, block, region_id,
    268 	                                                    BeamformerParameterBlockRegionOffsets[region_id],
    269 	                                                    timeout_ms);
    270 	return result;
    271 }
    272 
    273 b32
    274 beamformer_set_pipeline_stage_parameters_at(u32 stage_index, i32 parameter, u32 block)
    275 {
    276 	u32 offset  = BeamformerParameterBlockRegionOffsets[BeamformerParameterBlockRegion_ComputePipeline];
    277 	offset     += offsetof(BeamformerComputePipeline, parameters);
    278 	offset     += (stage_index % BeamformerMaxComputeShaderStages) * sizeof(BeamformerShaderParameters);
    279 	b32 result  = parameter_block_region_upload_explicit(&parameter, sizeof(BeamformerShaderParameters), block,
    280 	                                                     BeamformerParameterBlockRegion_ComputePipeline, offset,
    281 	                                                     g_beamformer_library_context.timeout_ms);
    282 	return result;
    283 }
    284 
    285 b32
    286 beamformer_set_pipeline_stage_parameters(u32 stage_index, i32 parameter)
    287 {
    288 	b32 result = beamformer_set_pipeline_stage_parameters_at(stage_index, parameter, 0);
    289 	return result;
    290 }
    291 
    292 b32
    293 beamformer_push_pipeline_at(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind, u32 block)
    294 {
    295 	b32 result = 0;
    296 	if (validate_pipeline(shaders, shader_count, data_kind)) {
    297 		i32 lock = BeamformerSharedMemoryLockKind_Count + (i32)block;
    298 		if (valid_parameter_block(block) && lib_try_lock(lock, g_beamformer_library_context.timeout_ms)) {
    299 			BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, block);
    300 			mem_copy(&b->pipeline.shaders, shaders, shader_count * sizeof(*shaders));
    301 			mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block,
    302 			                                  BeamformerParameterBlockRegion_ComputePipeline);
    303 			b->pipeline.shader_count = shader_count;
    304 			b->pipeline.data_kind    = data_kind;
    305 			lib_release_lock(lock);
    306 			result = 1;
    307 		}
    308 	}
    309 	return result;
    310 }
    311 
    312 b32
    313 beamformer_push_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
    314 {
    315 	b32 result = beamformer_push_pipeline_at(shaders, shader_count, data_kind, 0);
    316 	return result;
    317 }
    318 
    319 function b32
    320 beamformer_create_filter(BeamformerFilterKind kind, BeamformerFilterParameters params, u8 filter_slot, u8 parameter_block)
    321 {
    322 	b32 result = 0;
    323 	if (check_shared_memory()) {
    324 		BeamformWork *work = try_push_work_queue();
    325 		if (work) {
    326 			BeamformerCreateFilterContext *ctx = &work->create_filter_context;
    327 			work->kind = BeamformerWorkKind_CreateFilter;
    328 			ctx->kind            = kind;
    329 			ctx->parameters      = params;
    330 			ctx->filter_slot     = filter_slot     % BeamformerFilterSlots;
    331 			ctx->parameter_block = parameter_block % BeamformerMaxParameterBlockSlots;
    332 			beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    333 			result = 1;
    334 		}
    335 	}
    336 	return result;
    337 }
    338 
    339 b32
    340 beamformer_create_kaiser_low_pass_filter(f32 beta, f32 cutoff_frequency, f32 sampling_frequency,
    341                                          i16 length, u8 filter_slot, u8 parameter_block)
    342 {
    343 	BeamformerFilterParameters params = {
    344 		.beta               = beta,
    345 		.cutoff_frequency   = cutoff_frequency,
    346 		.sampling_frequency = sampling_frequency,
    347 		.length             = length,
    348 	};
    349 	b32 result = beamformer_create_filter(BeamformerFilterKind_Kaiser, params, filter_slot, parameter_block);
    350 	return result;
    351 }
    352 
    353 function b32
    354 beamformer_flush_commands(i32 timeout_ms)
    355 {
    356 	b32 result = lib_try_lock(BeamformerSharedMemoryLockKind_DispatchCompute, timeout_ms);
    357 	return result;
    358 }
    359 
    360 function b32
    361 beamformer_compute_indirect(BeamformerViewPlaneTag tag, u32 block)
    362 {
    363 	b32 result = 0;
    364 	if (check_shared_memory() &&
    365 	    lib_error_check(tag   < BeamformerViewPlaneTag_Count, BF_LIB_ERR_KIND_INVALID_IMAGE_PLANE) &&
    366 	    lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
    367 	                    BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED))
    368 	{
    369 		BeamformWork *work = try_push_work_queue();
    370 		if (work) {
    371 			work->kind = BeamformerWorkKind_ComputeIndirect;
    372 			work->compute_indirect_context.view_plane      = tag;
    373 			work->compute_indirect_context.parameter_block = block;
    374 			beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    375 			beamformer_flush_commands(0);
    376 			result = 1;
    377 		}
    378 	}
    379 	return result;
    380 }
    381 
    382 b32
    383 beamformer_start_compute(void)
    384 {
    385 	b32 result = beamformer_compute_indirect(0, 0);
    386 	return result;
    387 }
    388 
    389 b32
    390 beamformer_wait_for_compute_dispatch(i32 timeout_ms)
    391 {
    392 	b32 result = beamformer_flush_commands(timeout_ms);
    393 	/* NOTE(rnp): if you are calling this function you are probably about
    394 	 * to start some other work and it might be better to not do this... */
    395 	if (result) lib_release_lock(BeamformerSharedMemoryLockKind_DispatchCompute);
    396 	return result;
    397 }
    398 
    399 #define BEAMFORMER_UPLOAD_FNS \
    400 	X(channel_mapping, i16, 1, ChannelMapping) \
    401 	X(sparse_elements, i16, 1, SparseElements) \
    402 	X(focal_vectors,   f32, 2, FocalVectors)
    403 
    404 #define X(name, dtype, elements, region_name) \
    405 b32 beamformer_push_##name ##_at(dtype *data, u32 count, u32 block) { \
    406 	b32 result = 0; \
    407 	if (lib_error_check(count <= countof(((BeamformerParameterBlock *)0)->name), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) { \
    408 		result = parameter_block_region_upload(data, count * elements * sizeof(dtype), block, \
    409 		                                       BeamformerParameterBlockRegion_##region_name,  \
    410 		                                       g_beamformer_library_context.timeout_ms);      \
    411 	} \
    412 	return result; \
    413 }
    414 BEAMFORMER_UPLOAD_FNS
    415 #undef X
    416 
    417 #define X(name, dtype, ...) \
    418 b32 beamformer_push_##name (dtype *data, u32 count) { \
    419 	b32 result = beamformer_push_##name ##_at(data, count, 0); \
    420 	return result; \
    421 }
    422 BEAMFORMER_UPLOAD_FNS
    423 #undef X
    424 
    425 function b32
    426 beamformer_push_data_base(void *data, u32 data_size, i32 timeout_ms)
    427 {
    428 	b32 result = 0;
    429 	if (check_shared_memory()) {
    430 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    431 		if (lib_error_check(data_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) {
    432 			if (lib_try_lock(BeamformerSharedMemoryLockKind_UploadRF, timeout_ms)) {
    433 				if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
    434 					mem_copy(scratch.beg, data, data_size);
    435 					/* TODO(rnp): need a better way to communicate this */
    436 					g_beamformer_library_context.bp->scratch_rf_size = data_size;
    437 					lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
    438 					result = 1;
    439 				}
    440 			}
    441 		}
    442 	}
    443 	return result;
    444 }
    445 
    446 b32
    447 beamformer_push_data(void *data, u32 data_size)
    448 {
    449 	return beamformer_push_data_base(data, data_size, g_beamformer_library_context.timeout_ms);
    450 }
    451 
    452 b32
    453 beamformer_push_data_with_compute(void *data, u32 data_size, u32 image_plane_tag, u32 parameter_slot)
    454 {
    455 	b32 result = beamformer_push_data_base(data, data_size, g_beamformer_library_context.timeout_ms);
    456 	if (result) result = beamformer_compute_indirect(image_plane_tag, parameter_slot);
    457 	return result;
    458 }
    459 
    460 b32
    461 beamformer_push_parameters_at(BeamformerParameters *bp, u32 block)
    462 {
    463 	b32 result = parameter_block_region_upload(bp, sizeof(*bp), block,
    464 	                                           BeamformerParameterBlockRegion_Parameters,
    465 	                                           g_beamformer_library_context.timeout_ms);
    466 	return result;
    467 }
    468 
    469 b32
    470 beamformer_push_parameters(BeamformerParameters *bp)
    471 {
    472 	b32 result = beamformer_push_parameters_at(bp, 0);
    473 	return result;
    474 }
    475 
    476 b32
    477 beamformer_push_parameters_ui(BeamformerUIParameters *bp)
    478 {
    479 	b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
    480 	                                                    offsetof(BeamformerParameterBlock, parameters_ui),
    481 	                                                    g_beamformer_library_context.timeout_ms);
    482 	return result;
    483 }
    484 
    485 b32
    486 beamformer_push_parameters_head(BeamformerParametersHead *bp)
    487 {
    488 	b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
    489 	                                                    offsetof(BeamformerParameterBlock, parameters_head),
    490 	                                                    g_beamformer_library_context.timeout_ms);
    491 	return result;
    492 }
    493 
    494 function b32
    495 beamformer_export_buffer(BeamformerExportContext export_context)
    496 {
    497 	BeamformWork *work = try_push_work_queue();
    498 	b32 result = work && lib_try_lock(BeamformerSharedMemoryLockKind_ExportSync, 0);
    499 	if (result) {
    500 		work->export_context = export_context;
    501 		work->kind = BeamformerWorkKind_ExportBuffer;
    502 		work->lock = BeamformerSharedMemoryLockKind_ScratchSpace;
    503 		beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
    504 	}
    505 	return result;
    506 }
    507 
    508 function b32
    509 beamformer_read_output(void *out, iz size, i32 timeout_ms)
    510 {
    511 	b32 result = 0;
    512 	if (lib_try_lock(BeamformerSharedMemoryLockKind_ExportSync, timeout_ms)) {
    513 		if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
    514 			Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    515 			mem_copy(out, scratch.beg, (uz)size);
    516 			lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
    517 			result = 1;
    518 		}
    519 		lib_release_lock(BeamformerSharedMemoryLockKind_ExportSync);
    520 	}
    521 	return result;
    522 }
    523 
    524 b32
    525 beamform_data_synchronized(void *data, u32 data_size, i32 output_points[3], f32 *out_data, i32 timeout_ms)
    526 {
    527 	b32 result = 0;
    528 	if (check_shared_memory()) {
    529 		output_points[0] = MAX(1, output_points[0]);
    530 		output_points[1] = MAX(1, output_points[1]);
    531 		output_points[2] = MAX(1, output_points[2]);
    532 
    533 		BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, 0);
    534 		b->parameters.output_points[0] = output_points[0];
    535 		b->parameters.output_points[1] = output_points[1];
    536 		b->parameters.output_points[2] = output_points[2];
    537 
    538 		iz output_size = output_points[0] * output_points[1] * output_points[2] * (i32)sizeof(f32) * 2;
    539 
    540 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    541 		if (lib_error_check(output_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)
    542 		    && beamformer_push_data_with_compute(data, data_size, 0, 0))
    543 		{
    544 			BeamformerExportContext export;
    545 			export.kind = BeamformerExportKind_BeamformedData;
    546 			export.size = (u32)output_size;
    547 			if (beamformer_export_buffer(export)) {
    548 				/* NOTE(rnp): if this fails it just means that the work from push_data hasn't
    549 				 * started yet. This is here to catch the other case where the work started
    550 				 * and finished before we finished queuing the export work item */
    551 				beamformer_flush_commands(0);
    552 
    553 				result = beamformer_read_output(out_data, output_size, timeout_ms);
    554 			}
    555 		}
    556 	}
    557 	return result;
    558 }
    559 
    560 b32
    561 beamformer_compute_timings(BeamformerComputeStatsTable *output, i32 timeout_ms)
    562 {
    563 	static_assert(sizeof(*output) <= BEAMFORMER_SHARED_MEMORY_MIN_SCRATCH_SIZE,
    564 	              "timing table size exceeds scratch space");
    565 
    566 	b32 result = 0;
    567 	if (check_shared_memory()) {
    568 		Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
    569 		if (lib_error_check(arena_capacity(&scratch, u8) <= (iz)sizeof(*output), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)) {
    570 			BeamformerExportContext export;
    571 			export.kind = BeamformerExportKind_Stats;
    572 			export.size = sizeof(*output);
    573 			if (beamformer_export_buffer(export) && beamformer_flush_commands(0))
    574 				result = beamformer_read_output(output, sizeof(*output), timeout_ms);
    575 		}
    576 	}
    577 	return result;
    578 }
    579 
    580 i32
    581 beamformer_live_parameters_get_dirty_flag(void)
    582 {
    583 	i32 result = -1;
    584 	if (check_shared_memory()) {
    585 		u32 flag = ctz_u32(g_beamformer_library_context.bp->live_imaging_dirty_flags);
    586 		if (flag != 32) {
    587 			atomic_and_u32(&g_beamformer_library_context.bp->live_imaging_dirty_flags, ~(1 << flag));
    588 			result = (i32)flag;
    589 		}
    590 	}
    591 	return result;
    592 }
    593 
    594 BeamformerLiveImagingParameters *
    595 beamformer_get_live_parameters(void)
    596 {
    597 	BeamformerLiveImagingParameters *result = 0;
    598 	if (check_shared_memory()) result = &g_beamformer_library_context.bp->live_imaging_parameters;
    599 	return result;
    600 }
    601 
    602 b32
    603 beamformer_set_live_parameters(BeamformerLiveImagingParameters *new)
    604 {
    605 	b32 result = 0;
    606 	if (check_shared_memory()) {
    607 		mem_copy(&g_beamformer_library_context.bp->live_imaging_parameters, new, sizeof(*new));
    608 		memory_write_barrier();
    609 		result = 1;
    610 	}
    611 	return result;
    612 }