ogl_beamforming

Ultrasound Beamforming Implemented with OpenGL
git clone anongit@rnpnr.xyz:ogl_beamforming.git
Log | Files | Refs | Feed | Submodules | README | LICENSE

Commit: c0122cf9c70f95fae833ef99de6462914da519a9
Parent: ec59851530f81a795e6e3b53556d3f638a2598a9
Author: Randy Palamar
Date:   Wed, 23 Jul 2025 12:27:28 -0600

core/lib: rework pipeline handling

There are cases such as with DASFast where the compute thread
wants to modify the pipeline on its own. However this caused
problems with the library preempting the compute thread during
operation leading to issues such as the incorrect shader being
used.

This lead to the realization that the compute thread should have
its own stable pipeline to operate on which it can setup during a
'planning' phase. During this planning phase the compute thread
can modify the parameters as necessary to achieve optimal
performance before uploading its own copy to the GPU.

This can also aid in simplifying the library API. For example the
planning phase accounts for the time delay introduced by any
applied filters.

Diffstat:
Mbeamformer.c | 182++++++++++++++++++++++++++++++++++++++++++++++++++++++++-----------------------
Mbeamformer.h | 21++++++++++++++++++++-
Mbeamformer_parameters.h | 40++++++++++++++++++++++++++++++----------
Mbeamformer_work_queue.h | 19++++++++-----------
Mhelpers/ogl_beamformer_lib.c | 155++++++++++++++++++++++++++++++++++++++++++++++++-------------------------------
Mhelpers/ogl_beamformer_lib_base.h | 37++++++++++++++++++++++---------------
Mshaders/das.glsl | 11++++-------
Mshaders/decode.glsl | 6++++++
Mshaders/demod.glsl | 2+-
Mstatic.c | 6+++---
Mtests/throughput.c | 2+-
Mui.c | 30++++++++++++++----------------
12 files changed, 333 insertions(+), 178 deletions(-)

diff --git a/beamformer.c b/beamformer.c @@ -42,6 +42,42 @@ typedef struct { u32 needed_frames; } ComputeFrameIterator; +function void +beamformer_filter_update(BeamformerFilter *f, BeamformerCreateFilterContext *cfc, + f32 sampling_frequency, Arena arena) +{ + glDeleteTextures(1, &f->texture); + glCreateTextures(GL_TEXTURE_1D, 1, &f->texture); + glTextureStorage1D(f->texture, 1, GL_R32F, cfc->length); + + f32 *filter = 0; + switch (cfc->kind) { + case BeamformerFilterKind_Kaiser:{ + filter = kaiser_low_pass_filter(&arena, cfc->cutoff_frequency, sampling_frequency, + cfc->beta, cfc->length); + }break; + InvalidDefaultCase; + } + + f->kind = cfc->kind; + f->length = cfc->length; + f->sampling_frequency = sampling_frequency; + glTextureSubImage1D(f->texture, 0, 0, f->length, GL_RED, GL_FLOAT, filter); +} + +function f32 +beamformer_filter_time_offset(BeamformerFilter *f) +{ + f32 result = 0; + switch (f->kind) { + case BeamformerFilterKind_Kaiser:{ + result = -(f32)f->length / 2.0f / f->sampling_frequency; + }break; + InvalidDefaultCase; + } + return result; +} + function iv3 make_valid_test_dim(i32 in[3]) { @@ -266,11 +302,70 @@ compute_cursor_finished(struct compute_cursor *cursor) return result; } -function b32 -das_can_use_fast(BeamformerParameters *bp) +function void +plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp, BeamformerFilter *filters) { - b32 result = !bp->coherency_weighting; - return result; + BeamformerSharedMemory *sm = os_sm->region; + BeamformerParameters *bp = &sm->parameters; + + i32 compute_lock = BeamformerSharedMemoryLockKind_ComputePipeline; + i32 params_lock = BeamformerSharedMemoryLockKind_Parameters; + os_shared_memory_region_lock(os_sm, sm->locks, compute_lock, (u32)-1); + + b32 decode_first = sm->shaders[0] == BeamformerShaderKind_Decode; + b32 demod_first = sm->shaders[0] == BeamformerShaderKind_Demodulate; + + os_shared_memory_region_lock(os_sm, sm->locks, params_lock, (u32)-1); + mem_copy(&cp->parameters, &sm->parameters, sizeof(cp->parameters)); + os_shared_memory_region_unlock(os_sm, sm->locks, params_lock); + + b32 demodulating = 0; + BeamformerDataKind data_kind = sm->data_kind; + for (cp->shader_count = 0; cp->shader_count < sm->shader_count; cp->shader_count++) { + BeamformerShaderParameters *sp = sm->shader_parameters + cp->shader_count; + u32 shader = sm->shaders[cp->shader_count]; + switch (shader) { + case BeamformerShaderKind_Decode:{ + BeamformerShaderKind decode_table[] = { + [BeamformerDataKind_Int16] = BeamformerShaderKind_Decode, + [BeamformerDataKind_Int16Complex] = BeamformerShaderKind_DecodeInt16Complex, + [BeamformerDataKind_Float32] = BeamformerShaderKind_DecodeFloat, + [BeamformerDataKind_Float32Complex] = BeamformerShaderKind_DecodeFloatComplex, + }; + if (decode_first) { + shader = decode_table[CLAMP(data_kind, 0, countof(decode_table) - 1)]; + } else { + if (data_kind == BeamformerDataKind_Int16) + shader = BeamformerShaderKind_DecodeInt16Complex; + else + shader = BeamformerShaderKind_DecodeFloatComplex; + } + }break; + case BeamformerShaderKind_Demodulate:{ + if (demod_first && data_kind == BeamformerDataKind_Float32) + shader = BeamformerShaderKind_DemodulateFloat; + cp->parameters.time_offset += beamformer_filter_time_offset(filters + sp->filter_slot); + demodulating = 1; + }break; + case BeamformerShaderKind_DAS:{ + if (!bp->coherency_weighting) + shader = BeamformerShaderKind_DASFast; + }break; + default:{}break; + } + + cp->shaders[cp->shader_count] = shader; + cp->shader_parameters[cp->shader_count] = *sp; + } + + if (demodulating) { + cp->parameters.sampling_frequency /= (f32)cp->parameters.decimation_rate; + } else { + cp->parameters.center_frequency = 0; + cp->parameters.decimation_rate = 1; + } + + os_shared_memory_region_unlock(os_sm, sm->locks, compute_lock); } function m4 @@ -314,8 +409,8 @@ function void do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerComputeFrame *frame, BeamformerShaderKind shader, BeamformerShaderParameters *sp) { - ComputeShaderCtx *csctx = &ctx->csctx; - BeamformerSharedMemory *sm = ctx->shared_memory.region; + ComputeShaderCtx *csctx = &ctx->csctx; + BeamformerComputePipeline *cp = &csctx->compute_pipeline; u32 program = csctx->programs[shader]; glUseProgram(program); @@ -325,6 +420,7 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerComputeFrame *frame switch (shader) { case BeamformerShaderKind_Decode: + case BeamformerShaderKind_DecodeInt16Complex: case BeamformerShaderKind_DecodeFloat: case BeamformerShaderKind_DecodeFloatComplex:{ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, csctx->rf_data_ssbos[output_ssbo_idx]); @@ -366,9 +462,9 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerComputeFrame *frame glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, csctx->rf_data_ssbos[input_ssbo_idx]); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, csctx->rf_data_ssbos[output_ssbo_idx]); - glBindImageTexture(0, csctx->filter_textures[sp->filter_slot], 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32F); + glBindImageTexture(0, csctx->filters[sp->filter_slot].texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32F); - f32 local_size_x = (f32)(DEMOD_LOCAL_SIZE_X * (f32)sm->parameters.decimation_rate); + f32 local_size_x = (f32)(DEMOD_LOCAL_SIZE_X * (f32)cp->parameters.decimation_rate); glDispatchCompute((u32)ceil_f32((f32)csctx->dec_data_dim.x / local_size_x), (u32)ceil_f32((f32)csctx->dec_data_dim.y / DEMOD_LOCAL_SIZE_Y), (u32)ceil_f32((f32)csctx->dec_data_dim.z / DEMOD_LOCAL_SIZE_Z)); @@ -405,21 +501,21 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerComputeFrame *frame glBindImageTexture(1, csctx->sparse_elements_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R16I); glBindImageTexture(2, csctx->focal_vectors_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RG32F); - m4 voxel_transform = das_voxel_transform_matrix(&sm->parameters); + m4 voxel_transform = das_voxel_transform_matrix(&cp->parameters); glProgramUniform1ui(program, DAS_CYCLE_T_UNIFORM_LOC, cycle_t++); glProgramUniformMatrix4fv(program, DAS_VOXEL_MATRIX_LOC, 1, 0, voxel_transform.E); iv3 dim = frame->frame.dim; if (shader == BeamformerShaderKind_DASFast) { i32 loop_end; - if (sm->parameters.das_shader_id == DASShaderKind_RCA_VLS || - sm->parameters.das_shader_id == DASShaderKind_RCA_TPW) + if (cp->parameters.das_shader_id == DASShaderKind_RCA_VLS || + cp->parameters.das_shader_id == DASShaderKind_RCA_TPW) { /* NOTE(rnp): to avoid repeatedly sampling the whole focal vectors * texture we loop over transmits for VLS/TPW */ - loop_end = (i32)sm->parameters.dec_data_dim[2]; + loop_end = (i32)cp->parameters.dec_data_dim[2]; } else { - loop_end = (i32)sm->parameters.dec_data_dim[1]; + loop_end = (i32)cp->parameters.dec_data_dim[1]; } f32 percent_per_step = 1.0f / (f32)loop_end; csctx->processing_progress = -percent_per_step; @@ -473,7 +569,7 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerComputeFrame *frame assert(frame >= ctx->beamform_frames); assert(frame < ctx->beamform_frames + countof(ctx->beamform_frames)); u32 base_index = (u32)(frame - ctx->beamform_frames); - u32 to_average = (u32)sm->parameters.output_points[3]; + u32 to_average = (u32)cp->parameters.output_points[3]; u32 frame_count = 0; u32 *in_textures = push_array(&arena, u32, MAX_BEAMFORMED_SAVED_FRAMES); ComputeFrameIterator cfi = compute_frame_iterator(ctx, 1 + base_index - to_average, @@ -537,10 +633,14 @@ shader_text_with_header(ShaderReloadContext *ctx, OS *os, Arena *arena) #undef X }break; case BeamformerShaderKind_Decode: + case BeamformerShaderKind_DecodeInt16Complex: case BeamformerShaderKind_DecodeFloat: case BeamformerShaderKind_DecodeFloatComplex: { switch (ctx->kind) { + case BeamformerShaderKind_DecodeInt16Complex:{ + stream_append_s8(&sb, s8("#define INPUT_DATA_TYPE_INT16_COMPLEX\n\n")); + }break; case BeamformerShaderKind_DecodeFloat:{ stream_append_s8(&sb, s8("#define INPUT_DATA_TYPE_FLOAT\n\n")); }break; @@ -645,6 +745,10 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co src->shader = cs->programs + src->kind; success &= reload_compute_shader(ctx, src, s8(" (F32)"), arena); + src->kind = BeamformerShaderKind_DecodeInt16Complex; + src->shader = cs->programs + src->kind; + success &= reload_compute_shader(ctx, src, s8(" (I16C)"), arena); + src->kind = BeamformerShaderKind_Decode; src->shader = cs->programs + src->kind; } @@ -696,15 +800,7 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co }break; case BeamformerWorkKind_CreateFilter:{ BeamformerCreateFilterContext *fctx = &work->create_filter_context; - Arena tmp_arena = arena; - glDeleteTextures(1, cs->filter_textures + fctx->slot); - glCreateTextures(GL_TEXTURE_1D, 1, cs->filter_textures + fctx->slot); - - u32 texture = cs->filter_textures[fctx->slot]; - glTextureStorage1D(texture, 1, GL_R32F, fctx->length); - f32 *filter = kaiser_low_pass_filter(&tmp_arena, fctx->cutoff_frequency, - sm->parameters.sampling_frequency, fctx->beta, fctx->length); - glTextureSubImage1D(texture, 0, 0, fctx->length, GL_RED, GL_FLOAT, filter); + beamformer_filter_update(cs->filters + fctx->slot, fctx, sm->parameters.sampling_frequency, arena); }break; case BeamformerWorkKind_UploadBuffer:{ os_shared_memory_region_lock(&ctx->shared_memory, sm->locks, (i32)work->lock, (u32)-1); @@ -731,10 +827,6 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co tex_format = GL_RED_INTEGER; tex_element_count = countof(sm->sparse_elements); }break; - case BeamformerUploadKind_Parameters:{ - ctx->ui_read_params = ctx->beamform_work_queue != q; - buffer = cs->shared_ubo; - }break; case BeamformerUploadKind_RFData:{ if (cs->rf_raw_size != uc->size || !uv4_equal(cs->dec_data_dim, uv4_from_u32_array(bp->dec_data_dim))) @@ -776,9 +868,13 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co push_compute_timing_info(ctx->compute_timing_table, (ComputeTimingInfo){.kind = ComputeTimingInfoKind_ComputeFrameBegin}); - u32 mask = 1 << (BeamformerSharedMemoryLockKind_Parameters - 1); + BeamformerComputePipeline *cp = &cs->compute_pipeline; + u32 mask = (1 << (BeamformerSharedMemoryLockKind_Parameters - 1)) | + (1 << (BeamformerSharedMemoryLockKind_ComputePipeline - 1)); if (sm->dirty_regions & mask) { - glNamedBufferSubData(cs->shared_ubo, 0, sizeof(sm->parameters), &sm->parameters); + plan_compute_pipeline(&ctx->shared_memory, cp, cs->filters); + atomic_store_u32(&ctx->ui_read_params, ctx->beamform_work_queue != q); + glNamedBufferSubData(cs->shared_ubo, 0, sizeof(cp->parameters), &cp->parameters); atomic_and_u32(&sm->dirty_regions, ~mask); } @@ -804,37 +900,19 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co frame->frame.das_shader_kind = bp->das_shader_id; frame->frame.compound_count = bp->dec_data_dim[2]; - u32 stage_count = sm->compute_stages_count; - BeamformerShaderKind *stages = sm->compute_stages; - BeamformerShaderParameters *params = sm->compute_shader_parameters; - - for (u32 i = 0; i < stage_count; i++) { - switch (stages[i]) { - case BeamformerShaderKind_DASFast:{ - if (!das_can_use_fast(bp)) - stages[i] = BeamformerShaderKind_DAS; - }break; - case BeamformerShaderKind_DAS:{ - if (das_can_use_fast(bp)) - stages[i] = BeamformerShaderKind_DASFast; - }break; - default:{}break; - } - } - b32 did_sum_shader = 0; - for (u32 i = 0; i < stage_count; i++) { - did_sum_shader |= stages[i] == BeamformerShaderKind_Sum; + for (i32 i = 0; i < cp->shader_count; i++) { + did_sum_shader |= cp->shaders[i] == BeamformerShaderKind_Sum; glBeginQuery(GL_TIME_ELAPSED, cs->shader_timer_ids[i]); - do_compute_shader(ctx, arena, frame, stages[i], params + i); + do_compute_shader(ctx, arena, frame, cp->shaders[i], cp->shader_parameters + i); glEndQuery(GL_TIME_ELAPSED); } /* NOTE(rnp): the first of these blocks until work completes */ - for (u32 i = 0; i < stage_count; i++) { + for (i32 i = 0; i < cp->shader_count; i++) { ComputeTimingInfo info = {0}; info.kind = ComputeTimingInfoKind_Shader; - info.shader = stages[i]; + info.shader = cp->shaders[i]; glGetQueryObjectui64v(cs->shader_timer_ids[i], GL_QUERY_RESULT, &info.timer_count); push_compute_timing_info(ctx->compute_timing_table, info); } diff --git a/beamformer.h b/beamformer.h @@ -101,8 +101,28 @@ typedef struct { } BeamformerRenderModel; typedef struct { + BeamformerFilterKind kind; + u32 texture; + i32 length; + f32 sampling_frequency; +} BeamformerFilter; + +typedef struct { + BeamformerShaderKind shaders[MAX_COMPUTE_SHADER_STAGES]; + BeamformerShaderParameters shader_parameters[MAX_COMPUTE_SHADER_STAGES]; + i32 shader_count; + BeamformerDataKind data_kind; + /* TODO(rnp): this can be different from the configuration provided via the + * the shared memory. In fact it could be split up based on shader */ + BeamformerParameters parameters; +} BeamformerComputePipeline; + +typedef struct { u32 programs[BeamformerShaderKind_ComputeCount]; + BeamformerComputePipeline compute_pipeline; + BeamformerFilter filters[BEAMFORMER_FILTER_SLOTS]; + /* NOTE: Decoded data is only relevant in the context of a single frame. We use two * buffers so that they can be swapped when chaining multiple compute stages */ u32 rf_data_ssbos[2]; @@ -111,7 +131,6 @@ typedef struct { u32 raw_data_ssbo; u32 shared_ubo; - u32 filter_textures[BEAMFORMER_FILTER_SLOTS]; u32 channel_mapping_texture; u32 sparse_elements_texture; u32 focal_vectors_texture; diff --git a/beamformer_parameters.h b/beamformer_parameters.h @@ -12,19 +12,21 @@ /* X(enumarant, number, shader file name, needs header, pretty name) */ #define COMPUTE_SHADERS \ - X(CudaDecode, 0, "", 0, "CUDA Decode") \ - X(CudaHilbert, 1, "", 0, "CUDA Hilbert") \ - X(DAS, 2, "das", 1, "DAS") \ - X(Decode, 3, "decode", 1, "Decode (I16)") \ - X(DecodeFloat, 4, "", 1, "Decode (F32)") \ - X(DecodeFloatComplex, 5, "", 1, "Decode (F32C)") \ - X(Demodulate, 6, "demod", 1, "Demodulate") \ - X(MinMax, 7, "min_max", 0, "Min/Max") \ - X(Sum, 8, "sum", 0, "Sum") + X(CudaDecode, 0, "", 0, "CUDA Decode") \ + X(CudaHilbert, 1, "", 0, "CUDA Hilbert") \ + X(DAS, 2, "das", 1, "DAS") \ + X(Decode, 3, "decode", 1, "Decode (I16)") \ + X(Demodulate, 4, "demod", 1, "Demodulate (I16)") \ + X(MinMax, 5, "min_max", 0, "Min/Max") \ + X(Sum, 6, "sum", 0, "Sum") #define COMPUTE_SHADERS_INTERNAL \ COMPUTE_SHADERS \ - X(DASFast, 9, "", 1, "DAS (Fast)") + X(DecodeInt16Complex, 7, "", 1, "Decode (I16C)") \ + X(DecodeFloat, 8, "", 1, "Decode (F32)") \ + X(DecodeFloatComplex, 9, "", 1, "Decode (F32C)") \ + X(DemodulateFloat, 10, "", 1, "Demodulate (F32)") \ + X(DASFast, 11, "", 1, "DAS (Fast)") typedef enum { #define X(e, n, s, h, pn) BeamformerShaderKind_##e = n, @@ -48,6 +50,24 @@ typedef struct { X(NONE, 0, "None") \ X(HADAMARD, 1, "Hadamard") +#define BEAMFORMER_DATA_KIND_LIST \ + X(Int16, 0) \ + X(Int16Complex, 1) \ + X(Float32, 2) \ + X(Float32Complex, 3) + +#define X(k, id) BeamformerDataKind_##k = id, +typedef enum {BEAMFORMER_DATA_KIND_LIST} BeamformerDataKind; +#undef X + +#define BEAMFORMER_FILTER_KIND_LIST \ + X(Kaiser, 0) \ + X(MatchedSine, 1) + +#define X(k, id) BeamformerFilterKind_##k = id, +typedef enum {BEAMFORMER_FILTER_KIND_LIST} BeamformerFilterKind; +#undef X + /* X(type, id, pretty name) */ #define BEAMFORMER_VIEW_PLANE_TAG_LIST \ X(XZ, 0, "XZ") \ diff --git a/beamformer_work_queue.h b/beamformer_work_queue.h @@ -2,7 +2,7 @@ #ifndef _BEAMFORMER_WORK_QUEUE_H_ #define _BEAMFORMER_WORK_QUEUE_H_ -#define BEAMFORMER_SHARED_MEMORY_VERSION (9UL) +#define BEAMFORMER_SHARED_MEMORY_VERSION (10UL) typedef struct BeamformerComputeFrame BeamformerComputeFrame; typedef struct ShaderReloadContext ShaderReloadContext; @@ -12,7 +12,6 @@ typedef enum { BeamformerWorkKind_ComputeIndirect, BeamformerWorkKind_CreateFilter, BeamformerWorkKind_ReloadShader, - BeamformerWorkKind_SendFrame, BeamformerWorkKind_ExportBuffer, BeamformerWorkKind_UploadBuffer, } BeamformerWorkKind; @@ -20,7 +19,6 @@ typedef enum { typedef enum { BeamformerUploadKind_ChannelMapping, BeamformerUploadKind_FocalVectors, - BeamformerUploadKind_Parameters, BeamformerUploadKind_RFData, BeamformerUploadKind_SparseElements, } BeamformerUploadKind; @@ -31,11 +29,6 @@ typedef struct { i32 shared_memory_offset; } BeamformerUploadContext; -typedef enum { - BeamformerFilterKind_Kaiser, - BeamformerFilterKind_MatchedSine, -} BeamformerFilterKind; - typedef struct { BeamformerFilterKind kind; union { @@ -65,6 +58,7 @@ typedef union { #define BEAMFORMER_SHARED_MEMORY_LOCKS \ X(None) \ + X(ComputePipeline) \ X(ChannelMapping) \ X(FocalVectors) \ X(Parameters) \ @@ -146,9 +140,12 @@ typedef struct { }; }; - BeamformerShaderParameters compute_shader_parameters[MAX_COMPUTE_SHADER_STAGES]; - BeamformerShaderKind compute_stages[MAX_COMPUTE_SHADER_STAGES]; - u32 compute_stages_count; + ////////////////////////// + // Pipeline Configuration + BeamformerShaderKind shaders[MAX_COMPUTE_SHADER_STAGES]; + BeamformerShaderParameters shader_parameters[MAX_COMPUTE_SHADER_STAGES]; + i32 shader_count; + BeamformerDataKind data_kind; /* TODO(rnp): hack: we need a different way of dispatching work for export */ b32 start_compute_from_main; diff --git a/helpers/ogl_beamformer_lib.c b/helpers/ogl_beamformer_lib.c @@ -145,23 +145,23 @@ beamformer_get_last_error_string(void) return beamformer_error_string(beamformer_get_last_error()); } -b32 -set_beamformer_pipeline(i32 *stages, u32 stages_count) +function b32 +validate_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data_kind) { - b32 result = 0; - if (stages_count <= countof(g_bp->compute_stages)) { - if (check_shared_memory()) { - g_bp->compute_stages_count = 0; - for (u32 i = 0; i < stages_count; i++) { - if (BETWEEN(stages[i], 0, BeamformerShaderKind_ComputeCount)) { - g_bp->compute_stages[g_bp->compute_stages_count++] = (BeamformerShaderKind)stages[i]; - } - } - result = g_bp->compute_stages_count == stages_count; - if (!result) { - g_lib_last_error = BF_LIB_ERR_KIND_INVALID_COMPUTE_STAGE; - g_bp->compute_stages_count = 0; - } + b32 result = shader_count <= countof(g_bp->shaders); + if (result) { + for (i32 i = 0; i < shader_count; i++) + result &= BETWEEN(shaders[i], 0, BeamformerShaderKind_ComputeCount); + if (!result) { + g_lib_last_error = BF_LIB_ERR_KIND_INVALID_COMPUTE_STAGE; + } else if (shaders[0] != BeamformerShaderKind_Demodulate && + shaders[0] != BeamformerShaderKind_Decode) + { + result = BF_LIB_ERR_KIND_INVALID_START_SHADER; + } else if (shaders[0] == BeamformerShaderKind_Demodulate && + !(data_kind == BeamformerDataKind_Int16 || data_kind == BeamformerDataKind_Float32)) + { + result = BF_LIB_ERR_KIND_INVALID_DEMOD_DATA_KIND; } } else { g_lib_last_error = BF_LIB_ERR_KIND_COMPUTE_STAGE_OVERFLOW; @@ -169,19 +169,57 @@ set_beamformer_pipeline(i32 *stages, u32 stages_count) return result; } +b32 +beamformer_set_pipeline_stage_parameters(i32 stage_index, i32 parameter, i32 timeout_ms) +{ + b32 result = 0; + BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_ComputePipeline; + if (check_shared_memory() && g_bp->shader_count != 0 && lib_try_lock(lock, timeout_ms)) { + stage_index %= (i32)g_bp->shader_count; + g_bp->shader_parameters[stage_index].filter_slot = (u8)parameter; + atomic_or_u32(&g_bp->dirty_regions, 1 << (lock - 1)); + lib_release_lock(lock); + } + return result; +} b32 -beamformer_set_pipeline_stage_parameters(i32 stage_index, i32 parameter) +beamformer_push_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data_kind, i32 timeout_ms) { b32 result = 0; - if (check_shared_memory() && g_bp->compute_stages_count != 0) { - stage_index %= (i32)g_bp->compute_stages_count; - g_bp->compute_shader_parameters[stage_index].filter_slot = (u8)parameter; + if (validate_pipeline(shaders, shader_count, data_kind) && check_shared_memory()) { + BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_ComputePipeline; + if (lib_try_lock(lock, timeout_ms)) { + g_bp->shader_count = shader_count; + g_bp->data_kind = data_kind; + for (i32 i = 0; i < shader_count; i++) + g_bp->shaders[i] = (BeamformerShaderKind)shaders[i]; + atomic_or_u32(&g_bp->dirty_regions, 1 << (lock - 1)); + lib_release_lock(lock); + result = 1; + } } return result; } b32 +set_beamformer_pipeline(i32 *stages, i32 stages_count) +{ + BeamformerDataKind data_kind = BeamformerDataKind_Int16; + switch (stages[0]) { + case BeamformerShaderKind_DecodeFloat:{ + data_kind = BeamformerDataKind_Float32; + }break; + case BeamformerShaderKind_DecodeFloatComplex:{ + data_kind = BeamformerDataKind_Float32Complex; + }break; + default:{}break; + } + b32 result = beamformer_push_pipeline(stages, stages_count, data_kind, 0); + return result; +} + +b32 beamformer_create_kaiser_low_pass_filter(f32 beta, f32 cutoff_frequency, i16 length, u8 slot) { b32 result = 0; @@ -222,23 +260,35 @@ beamformer_wait_for_compute_dispatch(i32 timeout_ms) } function b32 -beamformer_upload_buffer(void *data, u32 size, i32 store_offset, BeamformerUploadContext upload_context, +locked_region_upload(void *region, void *data, u32 size, BeamformerSharedMemoryLockKind lock, + b32 *dirty, i32 timeout_ms) +{ + b32 result = lib_try_lock(lock, timeout_ms); + if (result) { + if (dirty) *dirty = atomic_load_u32(&g_bp->dirty_regions) & (1 << (lock - 1)); + mem_copy(region, data, size); + atomic_or_u32(&g_bp->dirty_regions, (1 << (lock - 1))); + lib_release_lock(lock); + } + return result; +} + +function b32 +beamformer_upload_buffer(void *data, u32 size, i32 store_offset, BeamformerUploadKind kind, BeamformerSharedMemoryLockKind lock, i32 timeout_ms) { b32 result = 0; if (check_shared_memory()) { BeamformWork *work = try_push_work_queue(); - result = work && lib_try_lock(lock, timeout_ms); - if (result) { - work->upload_context = upload_context; + b32 dirty = 0; + result = work && locked_region_upload((u8 *)g_bp + store_offset, data, size, lock, &dirty, timeout_ms); + if (result && !dirty) { + work->upload_context.shared_memory_offset = store_offset; + work->upload_context.kind = kind; + work->upload_context.size = size; work->kind = BeamformerWorkKind_UploadBuffer; work->lock = lock; - mem_copy((u8 *)g_bp + store_offset, data, size); - if ((atomic_load_u32(&g_bp->dirty_regions) & (1 << (lock - 1))) == 0) { - atomic_or_u32(&g_bp->dirty_regions, (1 << (lock - 1))); - beamform_work_queue_push_commit(&g_bp->external_work_queue); - } - lib_release_lock(lock); + beamform_work_queue_push_commit(&g_bp->external_work_queue); } } return result; @@ -253,11 +303,9 @@ beamformer_upload_buffer(void *data, u32 size, i32 store_offset, BeamformerUploa b32 beamformer_push_##name (dtype *data, u32 count, i32 timeout_ms) { \ b32 result = 0; \ if (count <= countof(g_bp->name)) { \ - BeamformerUploadContext uc = {0}; \ - uc.shared_memory_offset = offsetof(BeamformerSharedMemory, name); \ - uc.kind = BeamformerUploadKind_##lock_name; \ - uc.size = count * elements * sizeof(dtype); \ - result = beamformer_upload_buffer(data, uc.size, uc.shared_memory_offset, uc, \ + result = beamformer_upload_buffer(data, count * elements * sizeof(dtype), \ + offsetof(BeamformerSharedMemory, name), \ + BeamformerUploadKind_##lock_name, \ BeamformerSharedMemoryLockKind_##lock_name, timeout_ms); \ } else { \ g_lib_last_error = BF_LIB_ERR_KIND_BUFFER_OVERFLOW; \ @@ -272,11 +320,8 @@ beamformer_push_data_base(void *data, u32 data_size, i32 timeout_ms, b32 start_f { b32 result = 0; if (data_size <= BEAMFORMER_MAX_RF_DATA_SIZE) { - BeamformerUploadContext uc = {0}; - uc.shared_memory_offset = BEAMFORMER_SCRATCH_OFF; - uc.size = data_size; - uc.kind = BeamformerUploadKind_RFData; - result = beamformer_upload_buffer(data, data_size, uc.shared_memory_offset, uc, + result = beamformer_upload_buffer(data, data_size, BEAMFORMER_SCRATCH_OFF, + BeamformerUploadKind_RFData, BeamformerSharedMemoryLockKind_ScratchSpace, timeout_ms); if (result && start_from_main) atomic_store_u32(&g_bp->start_compute_from_main, 1); } else { @@ -315,39 +360,27 @@ beamformer_push_data_with_compute(void *data, u32 data_size, u32 image_plane_tag b32 beamformer_push_parameters(BeamformerParameters *bp, i32 timeout_ms) { - BeamformerUploadContext uc = {0}; - uc.shared_memory_offset = offsetof(BeamformerSharedMemory, parameters); - uc.size = sizeof(g_bp->parameters); - uc.kind = BeamformerUploadKind_Parameters; - b32 result = beamformer_upload_buffer(bp, sizeof(*bp), - offsetof(BeamformerSharedMemory, parameters), uc, - BeamformerSharedMemoryLockKind_Parameters, timeout_ms); + b32 result = locked_region_upload((u8 *)g_bp + offsetof(BeamformerSharedMemory, parameters), + bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, + 0, timeout_ms); return result; } b32 beamformer_push_parameters_ui(BeamformerUIParameters *bp, i32 timeout_ms) { - BeamformerUploadContext uc = {0}; - uc.shared_memory_offset = offsetof(BeamformerSharedMemory, parameters); - uc.size = sizeof(g_bp->parameters); - uc.kind = BeamformerUploadKind_Parameters; - b32 result = beamformer_upload_buffer(bp, sizeof(*bp), - offsetof(BeamformerSharedMemory, parameters_ui), uc, - BeamformerSharedMemoryLockKind_Parameters, timeout_ms); + b32 result = locked_region_upload((u8 *)g_bp + offsetof(BeamformerSharedMemory, parameters_ui), + bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, + 0, timeout_ms); return result; } b32 beamformer_push_parameters_head(BeamformerParametersHead *bp, i32 timeout_ms) { - BeamformerUploadContext uc = {0}; - uc.shared_memory_offset = offsetof(BeamformerSharedMemory, parameters); - uc.size = sizeof(g_bp->parameters); - uc.kind = BeamformerUploadKind_Parameters; - b32 result = beamformer_upload_buffer(bp, sizeof(*bp), - offsetof(BeamformerSharedMemory, parameters_head), uc, - BeamformerSharedMemoryLockKind_Parameters, timeout_ms); + b32 result = locked_region_upload((u8 *)g_bp + offsetof(BeamformerSharedMemory, parameters_head), + bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, + 0, timeout_ms); return result; } diff --git a/helpers/ogl_beamformer_lib_base.h b/helpers/ogl_beamformer_lib_base.h @@ -8,17 +8,19 @@ #endif #define BEAMFORMER_LIB_ERRORS \ - X(NONE, 0, "None") \ - X(VERSION_MISMATCH, 1, "host-library version mismatch") \ - X(INVALID_ACCESS, 2, "library in invalid state") \ - X(COMPUTE_STAGE_OVERFLOW, 3, "compute stage overflow: maximum stages: " str(MAX_COMPUTE_SHADER_STAGES)) \ - X(INVALID_COMPUTE_STAGE, 4, "invalid compute shader stage") \ - X(INVALID_IMAGE_PLANE, 5, "invalid image plane") \ - X(BUFFER_OVERFLOW, 6, "passed buffer size exceeds available space") \ - X(WORK_QUEUE_FULL, 7, "work queue full") \ - X(EXPORT_SPACE_OVERFLOW, 8, "not enough space for data export") \ - X(SHARED_MEMORY, 9, "failed to open shared memory region") \ - X(SYNC_VARIABLE, 10, "failed to acquire lock within timeout period") + X(NONE, 0, "None") \ + X(VERSION_MISMATCH, 1, "host-library version mismatch") \ + X(INVALID_ACCESS, 2, "library in invalid state") \ + X(COMPUTE_STAGE_OVERFLOW, 3, "compute stage overflow: maximum stages: " str(MAX_COMPUTE_SHADER_STAGES)) \ + X(INVALID_COMPUTE_STAGE, 4, "invalid compute shader stage") \ + X(INVALID_START_SHADER, 5, "starting shader not Decode or Demodulate") \ + X(INVALID_DEMOD_DATA_KIND, 6, "data kind for demodulation not Int16 or Float") \ + X(INVALID_IMAGE_PLANE, 7, "invalid image plane") \ + X(BUFFER_OVERFLOW, 8, "passed buffer size exceeds available space") \ + X(WORK_QUEUE_FULL, 9, "work queue full") \ + X(EXPORT_SPACE_OVERFLOW, 10, "not enough space for data export") \ + X(SHARED_MEMORY, 11, "failed to open shared memory region") \ + X(SYNC_VARIABLE, 12, "failed to acquire lock within timeout period") #define X(type, num, string) BF_LIB_ERR_KIND_ ##type = num, typedef enum {BEAMFORMER_LIB_ERRORS} BeamformerLibErrorKind; @@ -32,9 +34,6 @@ LIB_FN const char *beamformer_error_string(BeamformerLibErrorKind kind); /* IMPORTANT: timeout of -1 will block forever */ -LIB_FN uint32_t set_beamformer_parameters(BeamformerParametersV0 *); -LIB_FN uint32_t set_beamformer_pipeline(int32_t *stages, uint32_t stages_count); -LIB_FN uint32_t send_data(void *data, uint32_t data_size); /* NOTE: sends data and waits for (complex) beamformed data to be returned. * out_data: must be allocated by the caller as 2 floats per output point. */ LIB_FN uint32_t beamform_data_synchronized(void *data, uint32_t data_size, int32_t output_points[3], @@ -56,6 +55,8 @@ LIB_FN uint32_t beamformer_push_channel_mapping(int16_t *mapping, uint32_t coun LIB_FN uint32_t beamformer_push_sparse_elements(int16_t *elements, uint32_t count, int32_t timeout_ms); LIB_FN uint32_t beamformer_push_focal_vectors(float *vectors, uint32_t count, int32_t timeout_ms); +LIB_FN uint32_t beamformer_set_pipeline_stage_parameters(int32_t stage_index, int32_t parameter, int32_t timeout_ms); +LIB_FN uint32_t beamformer_push_pipeline(int32_t *shaders, int32_t shader_count, BeamformerDataKind data_kind, int32_t timeout_ms); LIB_FN uint32_t beamformer_push_parameters(BeamformerParameters *, int32_t timeout_ms); LIB_FN uint32_t beamformer_push_parameters_ui(BeamformerUIParameters *, int32_t timeout_ms); LIB_FN uint32_t beamformer_push_parameters_head(BeamformerParametersHead *, int32_t timeout_ms); @@ -79,7 +80,7 @@ LIB_FN uint32_t beamformer_push_parameters_head(BeamformerParametersHead *, int3 * M: * M = (A - 8) / (2.285 (ω_s - ω_p)) */ -LIB_FN uint32_t beamformer_create_kaiser_low_pass_filter(float beta, f32 cutoff_frequency, +LIB_FN uint32_t beamformer_create_kaiser_low_pass_filter(float beta, float cutoff_frequency, int16_t length, uint8_t slot); ////////////////////////// @@ -87,3 +88,9 @@ LIB_FN uint32_t beamformer_create_kaiser_low_pass_filter(float beta, f32 cutoff_ LIB_FN int32_t beamformer_live_parameters_get_dirty_flag(void); LIB_FN uint32_t beamformer_set_live_parameters(BeamformerLiveImagingParameters *); LIB_FN BeamformerLiveImagingParameters *beamformer_get_live_parameters(void); + +////////////// +// Legacy API +LIB_FN uint32_t set_beamformer_parameters(BeamformerParametersV0 *); +LIB_FN uint32_t set_beamformer_pipeline(int32_t *stages, int32_t stages_count); +LIB_FN uint32_t send_data(void *data, uint32_t data_size); diff --git a/shaders/das.glsl b/shaders/das.glsl @@ -59,26 +59,23 @@ vec2 cubic(int base_index, float x) vec2 T2 = C_SPLINE * (samples[3] - P1); mat2x4 C = mat2x4(vec4(P1.x, P2.x, T1.x, T2.x), vec4(P1.y, P2.y, T1.y, T2.y)); - float fs = sampling_frequency / decimation_rate; - vec2 result = rotate_iq(S * h * C, x / fs); + vec2 result = rotate_iq(S * h * C, x / sampling_frequency); return result; } vec2 sample_rf(int channel, int transmit, float t) { vec2 result; - float fs = sampling_frequency / decimation_rate; - int base_index = int(channel * dec_data_dim.x * dec_data_dim.z + transmit * dec_data_dim.x); + int base_index = int(channel * dec_data_dim.x * dec_data_dim.z + transmit * dec_data_dim.x); if (interpolate) result = cubic(base_index, t); - else result = rotate_iq(rf_data[base_index + int(round(t))], t / fs); + else result = rotate_iq(rf_data[base_index + int(round(t))], t / sampling_frequency); return result; } float sample_index(float distance) { - float fs = sampling_frequency / decimation_rate; float time = distance / speed_of_sound + time_offset; - return time * fs; + return time * sampling_frequency; } float apodize(float arg) diff --git a/shaders/decode.glsl b/shaders/decode.glsl @@ -20,6 +20,12 @@ #define RESULT_TYPE_CAST(x) vec4((x), 0, 0) #define SAMPLE_DATA_TYPE vec2 #define SAMPLE_TYPE_CAST(x) (x) +#elif defined(INPUT_DATA_TYPE_INT16_COMPLEX) + #define INPUT_DATA_TYPE int + #define RF_SAMPLES_PER_INDEX 1 + #define RESULT_TYPE_CAST(x) vec4((x), 0, 0) + #define SAMPLE_DATA_TYPE vec2 + #define SAMPLE_TYPE_CAST(x) vec2(((x) << 16) >> 16, (x) >> 16) #else #define INPUT_DATA_TYPE int #define RF_SAMPLES_PER_INDEX 2 diff --git a/shaders/demod.glsl b/shaders/demod.glsl @@ -19,7 +19,7 @@ void main() uint in_offset = (dec_data_dim.x * dec_data_dim.z * channel + dec_data_dim.x * transmit); uint out_offset = (dec_data_dim.x * dec_data_dim.z * channel + dec_data_dim.x * transmit) + out_sample; - float arg = radians(360) * center_frequency / sampling_frequency; + float arg = radians(360) * center_frequency / (sampling_frequency * decimation_rate); vec2 result = vec2(0); for (int i = 0; i < imageSize(filter_coefficients).x; i++) { int index = int(in_sample + i); diff --git a/static.c b/static.c @@ -331,9 +331,9 @@ setup_beamformer(Arena *memory, BeamformerCtx **o_ctx, BeamformerInput **o_input sm->version = BEAMFORMER_SHARED_MEMORY_VERSION; /* NOTE: default compute shader pipeline */ - sm->compute_stages[0] = BeamformerShaderKind_Decode; - sm->compute_stages[1] = BeamformerShaderKind_DAS; - sm->compute_stages_count = 2; + sm->shaders[0] = BeamformerShaderKind_Decode; + sm->shaders[1] = BeamformerShaderKind_DAS; + sm->shader_count = 2; GLWorkerThreadContext *worker = &ctx->os.compute_worker; /* TODO(rnp): we should lock this down after we have something working */ diff --git a/tests/throughput.c b/tests/throughput.c @@ -354,7 +354,7 @@ execute_study(s8 study, Arena arena, Stream path, Options *options) free(zbp); i32 shader_stages[16]; - u32 shader_stage_count = 0; + i32 shader_stage_count = 0; if (options->cuda) shader_stages[shader_stage_count++] = BeamformerShaderKind_CudaDecode; else shader_stages[shader_stage_count++] = BeamformerShaderKind_Decode; shader_stages[shader_stage_count++] = BeamformerShaderKind_DAS; diff --git a/ui.c b/ui.c @@ -2570,8 +2570,9 @@ push_compute_time(Arena *arena, s8 prefix, f32 time) } function v2 -draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *stats, u32 *stages, - u32 stages_count, f32 compute_time_sum, TextSpec ts, Rect r, v2 mouse) +draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *stats, + BeamformerShaderKind *stages, i32 stages_count, f32 compute_time_sum, + TextSpec ts, Rect r, v2 mouse) { read_only local_persist s8 frame_labels[] = {s8_comp("0:"), s8_comp("-1:"), s8_comp("-2:"), s8_comp("-3:")}; f32 total_times[countof(frame_labels)] = {0}; @@ -2581,7 +2582,7 @@ draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *s cells[0].text = frame_labels[i]; u32 frame_index = (stats->latest_frame_index - i) % countof(stats->table.times); u32 seen_shaders = 0; - for (u32 j = 0; j < stages_count; j++) { + for (i32 j = 0; j < stages_count; j++) { if ((seen_shaders & (1u << stages[j])) == 0) total_times[i] += stats->table.times[frame_index][stages[j]]; seen_shaders |= (1u << stages[j]); @@ -2616,7 +2617,7 @@ draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *s Rect rect; rect.pos = v2_add(cr.pos, (v2){{cr.size.w + table->cell_pad.w , cr.size.h * 0.15f}}); rect.size = (v2){.y = 0.7f * cr.size.h}; - for (u32 i = 0; i < stages_count; i++) { + for (i32 i = 0; i < stages_count; i++) { rect.size.w = total_width * stats->table.times[frame_index][stages[i]] / total_times[row_index]; Color color = colour_from_normalized(g_colour_palette[stages[i] % countof(g_colour_palette)]); DrawRectangleRec(rect.rl, color); @@ -2662,19 +2663,16 @@ draw_compute_stats_view(BeamformerUI *ui, Arena arena, Variable *view, Rect r, v assert(view->type == VT_COMPUTE_STATS_VIEW); ComputeStatsView *csv = &view->compute_stats_view; - BeamformerSharedMemory *sm = ui->shared_memory.region; - ComputeShaderStats *stats = csv->compute_shader_stats; + BeamformerComputePipeline *cp = &ui->beamformer_context->csctx.compute_pipeline; + ComputeShaderStats *stats = csv->compute_shader_stats; f32 compute_time_sum = 0; - u32 stages = sm->compute_stages_count; + i32 stages = cp->shader_count; TextSpec text_spec = {.font = &ui->font, .colour = FG_COLOUR, .flags = TF_LIMITED}; - u32 compute_stages[MAX_COMPUTE_SHADER_STAGES]; - mem_copy(compute_stages, sm->compute_stages, stages * sizeof(*compute_stages)); - static_assert(BeamformerShaderKind_ComputeCount <= 32, "shader kind bitfield test"); u32 seen_shaders = 0; - for (u32 i = 0; i < stages; i++) { - BeamformerShaderKind index = compute_stages[i]; + for (i32 i = 0; i < stages; i++) { + BeamformerShaderKind index = cp->shaders[i]; if ((seen_shaders & (1u << index)) == 0) compute_time_sum += stats->average_times[index]; seen_shaders |= (1u << index); @@ -2689,13 +2687,13 @@ draw_compute_stats_view(BeamformerUI *ui, Arena arena, Variable *view, Rect r, v read_only local_persist s8 labels[BeamformerShaderKind_ComputeCount] = {COMPUTE_SHADERS_INTERNAL}; #undef X da_reserve(&arena, table, stages); - for (u32 i = 0; i < stages; i++) { - push_table_time_row(table, &arena, labels[compute_stages[i]], - stats->average_times[compute_stages[i]]); + for (i32 i = 0; i < stages; i++) { + push_table_time_row(table, &arena, labels[cp->shaders[i]], + stats->average_times[cp->shaders[i]]); } }break; case ComputeStatsViewKind_Bar:{ - result = draw_compute_stats_bar_view(ui, arena, stats, compute_stages, stages, compute_time_sum, + result = draw_compute_stats_bar_view(ui, arena, stats, cp->shaders, stages, compute_time_sum, text_spec, r, mouse); r.pos = v2_add(r.pos, (v2){.y = result.y}); }break;