Commit: 2d3df15f1c6f5e3f2ecbf61de94bfc4718d1c4ac
Parent: b43824566859bc568db1ffe9034c3ad899fbc480
Author: Randy Palamar
Date: Mon, 4 Aug 2025 12:27:43 -0600
core/lib: support multiple parameter blocks
currently ui is limited to display block 0 and won't display
anything unless data was sent to that block. Additionally garbage
collection on blocks that appear to be unused is not currently
implemented. Therefore GPU data such as textures and UBOS
associated with the block are held until the program exits even
when the block has stopped being used.
Diffstat:
17 files changed, 1063 insertions(+), 849 deletions(-)
diff --git a/beamformer.c b/beamformer.c
@@ -17,7 +17,6 @@
*/
#include "beamformer.h"
-#include "beamformer_work_queue.c"
global f32 dt_for_frame;
global u32 cycle_t;
@@ -41,35 +40,98 @@ typedef struct {
} ComputeFrameIterator;
function void
-beamformer_filter_update(BeamformerFilter *f, BeamformerCreateFilterContext *cfc,
- f32 sampling_frequency, Arena arena)
+beamformer_compute_plan_release(BeamformerComputeContext *cc, u32 block)
+{
+ assert(block < countof(cc->compute_plans));
+ BeamformerComputePlan *cp = cc->compute_plans[block];
+ if (cp) {
+ glDeleteBuffers(countof(cp->ubos), cp->ubos);
+ glDeleteTextures(countof(cp->textures), cp->textures);
+ for (u32 i = 0; i < countof(cp->filters); i++)
+ glDeleteTextures(1, &cp->filters[i].texture);
+ cc->compute_plans[block] = 0;
+ SLLPushFreelist(cp, cc->compute_plan_freelist);
+ }
+}
+
+function BeamformerComputePlan *
+beamformer_compute_plan_for_block(BeamformerComputeContext *cc, u32 block, Arena *arena)
+{
+ assert(block < countof(cc->compute_plans));
+ BeamformerComputePlan *result = cc->compute_plans[block];
+ if (!result) {
+ result = SLLPopFreelist(cc->compute_plan_freelist);
+ if (result) zero_struct(result);
+ else result = push_struct(arena, BeamformerComputePlan);
+ cc->compute_plans[block] = result;
+
+ glCreateBuffers(countof(result->ubos), result->ubos);
+
+ Stream label = arena_stream(*arena);
+ #define X(k, t, ...) \
+ glNamedBufferStorage(result->ubos[BeamformerComputeUBOKind_##k], sizeof(t), \
+ 0, GL_DYNAMIC_STORAGE_BIT); \
+ stream_append_s8(&label, s8(#t "[")); \
+ stream_append_u64(&label, block); \
+ stream_append_byte(&label, ']'); \
+ glObjectLabel(GL_BUFFER, result->ubos[BeamformerComputeUBOKind_##k], \
+ label.widx, (c8 *)label.data); \
+ label.widx = 0;
+ BEAMFORMER_COMPUTE_UBO_LIST
+ #undef X
+
+ #define X(_k, t, ...) t,
+ GLenum gl_kind[] = {BEAMFORMER_COMPUTE_TEXTURE_LIST};
+ #undef X
+ read_only local_persist s8 tex_prefix[] = {
+ #define X(k, ...) s8(#k "["),
+ BEAMFORMER_COMPUTE_TEXTURE_LIST
+ #undef X
+ };
+ glCreateTextures(GL_TEXTURE_1D, BeamformerComputeTextureKind_Count - 1, result->textures);
+ for (u32 i = 0; i < BeamformerComputeTextureKind_Count - 1; i++) {
+ /* TODO(rnp): this could be predicated on channel count for this compute plan */
+ glTextureStorage1D(result->textures[i], 1, gl_kind[i], BeamformerMaxChannelCount);
+ stream_append_s8(&label, tex_prefix[i]);
+ stream_append_u64(&label, block);
+ stream_append_byte(&label, ']');
+ glObjectLabel(GL_TEXTURE, result->textures[i], label.widx, (c8 *)label.data);
+ label.widx = 0;
+ }
+ }
+ return result;
+}
+
+function void
+beamformer_filter_update(BeamformerFilter *f, BeamformerFilterKind kind,
+ BeamformerFilterParameters fp, Arena arena)
{
glDeleteTextures(1, &f->texture);
glCreateTextures(GL_TEXTURE_1D, 1, &f->texture);
- glTextureStorage1D(f->texture, 1, GL_R32F, cfc->length);
+ glTextureStorage1D(f->texture, 1, GL_R32F, fp.length);
f32 *filter = 0;
- switch (cfc->kind) {
+ switch (kind) {
case BeamformerFilterKind_Kaiser:{
- filter = kaiser_low_pass_filter(&arena, cfc->cutoff_frequency, sampling_frequency,
- cfc->beta, cfc->length);
+ filter = kaiser_low_pass_filter(&arena, fp.cutoff_frequency, fp.sampling_frequency,
+ fp.beta, fp.length);
}break;
InvalidDefaultCase;
}
- f->kind = cfc->kind;
- f->length = cfc->length;
- f->sampling_frequency = sampling_frequency;
- glTextureSubImage1D(f->texture, 0, 0, f->length, GL_RED, GL_FLOAT, filter);
+ f->kind = kind;
+ f->parameters = fp;
+ glTextureSubImage1D(f->texture, 0, 0, fp.length, GL_RED, GL_FLOAT, filter);
}
function f32
beamformer_filter_time_offset(BeamformerFilter *f)
{
f32 result = 0;
+ BeamformerFilterParameters *fp = &f->parameters;
switch (f->kind) {
case BeamformerFilterKind_Kaiser:{
- result = (f32)f->length / 2.0f / f->sampling_frequency;
+ result = (f32)fp->length / 2.0f / fp->sampling_frequency;
}break;
InvalidDefaultCase;
}
@@ -146,43 +208,54 @@ alloc_beamform_frame(GLParams *gp, BeamformerFrame *out, iv3 out_dim, s8 name, A
}
function void
-alloc_shader_storage(BeamformerCtx *ctx, u32 rf_raw_size, Arena a)
+update_hadamard_texture(BeamformerComputePlan *cp, i32 order, Arena arena)
{
- ComputeShaderCtx *cs = &ctx->csctx;
- BeamformerParameters *bp = &((BeamformerSharedMemory *)ctx->shared_memory.region)->parameters;
+ i32 *hadamard = make_hadamard_transpose(&arena, order);
+ if (hadamard) {
+ cp->hadamard_order = order;
+ u32 *texture = cp->textures + BeamformerComputeTextureKind_Hadamard;
+ glDeleteTextures(1, texture);
+ glCreateTextures(GL_TEXTURE_2D, 1, texture);
+ glTextureStorage2D(*texture, 1, GL_R8I, order, order);
+ glTextureSubImage2D(*texture, 0, 0, 0, order, order, GL_RED_INTEGER, GL_INT, hadamard);
+
+ Stream label = arena_stream(arena);
+ stream_append_s8(&label, s8("Hadamard"));
+ stream_append_i64(&label, order);
+ LABEL_GL_OBJECT(GL_TEXTURE, *texture, stream_to_s8(&label));
+ }
+}
- cs->dec_data_dim = uv4_from_u32_array(bp->dec_data_dim);
- cs->rf_raw_size = rf_raw_size;
+function void
+alloc_shader_storage(BeamformerCtx *ctx, u32 decoded_data_size, Arena arena)
+{
+ BeamformerComputeContext *cc = &ctx->compute_context;
+ glDeleteBuffers(countof(cc->ping_pong_ssbos), cc->ping_pong_ssbos);
+ glCreateBuffers(countof(cc->ping_pong_ssbos), cc->ping_pong_ssbos);
- glDeleteBuffers(ARRAY_COUNT(cs->rf_data_ssbos), cs->rf_data_ssbos);
- glCreateBuffers(ARRAY_COUNT(cs->rf_data_ssbos), cs->rf_data_ssbos);
+ cc->ping_pong_ssbo_size = decoded_data_size;
- uz rf_decoded_size = 2 * sizeof(f32) * cs->dec_data_dim.x * cs->dec_data_dim.y * cs->dec_data_dim.z;
- Stream label = arena_stream(a);
- stream_append_s8(&label, s8("Decoded_RF_SSBO_"));
+ Stream label = arena_stream(arena);
+ stream_append_s8(&label, s8("PingPongSSBO["));
i32 s_widx = label.widx;
- for (i32 i = 0; i < countof(cs->rf_data_ssbos); i++) {
- glNamedBufferStorage(cs->rf_data_ssbos[i], (iz)rf_decoded_size, 0, 0);
+ for (i32 i = 0; i < countof(cc->ping_pong_ssbos); i++) {
+ glNamedBufferStorage(cc->ping_pong_ssbos[i], (iz)decoded_data_size, 0, 0);
stream_append_i64(&label, i);
- LABEL_GL_OBJECT(GL_BUFFER, cs->rf_data_ssbos[i], stream_to_s8(&label));
+ stream_append_byte(&label, ']');
+ LABEL_GL_OBJECT(GL_BUFFER, cc->ping_pong_ssbos[i], stream_to_s8(&label));
stream_reset(&label, s_widx);
}
+ /* TODO(rnp): (25.08.04) cuda lib is heavily broken atm. First there are multiple RF
+ * buffers and cuda decode shouldn't assume that the data is coming from the rf_buffer
+ * ssbo. Second each parameter block may need a different hadamard matrix so ideally
+ * decode should just take the texture as a parameter. Third, none of these dimensions
+ * need to be pre-known by the library unless its allocating GPU memory which it shouldn't
+ * need to do. For now grab out of parameter block 0 but it is not correct */
+ BeamformerParameterBlock *pb = beamformer_parameter_block(ctx->shared_memory.region, 0);
/* NOTE(rnp): these are stubs when CUDA isn't supported */
- /* TODO(rnp): cuda should know that there is more than one raw rf ssbo */
- cs->cuda_lib.register_buffers(cs->rf_data_ssbos, countof(cs->rf_data_ssbos), cs->rf_buffer.ssbo);
- cs->cuda_lib.init(bp->rf_raw_dim, bp->dec_data_dim);
-
- i32 order = (i32)cs->dec_data_dim.z;
- i32 *hadamard = make_hadamard_transpose(&a, order);
- if (hadamard) {
- glDeleteTextures(1, &cs->hadamard_texture);
- glCreateTextures(GL_TEXTURE_2D, 1, &cs->hadamard_texture);
- glTextureStorage2D(cs->hadamard_texture, 1, GL_R8I, order, order);
- glTextureSubImage2D(cs->hadamard_texture, 0, 0, 0, order, order, GL_RED_INTEGER,
- GL_INT, hadamard);
- LABEL_GL_OBJECT(GL_TEXTURE, cs->hadamard_texture, s8("Hadamard_Matrix"));
- }
+ cc->cuda_lib.register_buffers(cc->ping_pong_ssbos, countof(cc->ping_pong_ssbos), cc->rf_buffer.ssbo);
+ cc->cuda_lib.init(pb->parameters.rf_raw_dim, pb->parameters.dec_data_dim);
}
function void
@@ -193,7 +266,8 @@ push_compute_timing_info(ComputeTimingTable *t, ComputeTimingInfo info)
}
function b32
-fill_frame_compute_work(BeamformerCtx *ctx, BeamformWork *work, BeamformerViewPlaneTag plane, b32 indirect)
+fill_frame_compute_work(BeamformerCtx *ctx, BeamformWork *work, BeamformerViewPlaneTag plane,
+ u32 parameter_block, b32 indirect)
{
b32 result = 0;
if (work) {
@@ -202,16 +276,17 @@ fill_frame_compute_work(BeamformerCtx *ctx, BeamformWork *work, BeamformerViewPl
u32 frame_index = frame_id % countof(ctx->beamform_frames);
work->kind = indirect? BeamformerWorkKind_ComputeIndirect : BeamformerWorkKind_Compute;
work->lock = BeamformerSharedMemoryLockKind_DispatchCompute;
- work->frame = ctx->beamform_frames + frame_index;
- work->frame->ready_to_present = 0;
- work->frame->view_plane_tag = plane;
- work->frame->id = frame_id;
+ work->compute_context.parameter_block = parameter_block;
+ work->compute_context.frame = ctx->beamform_frames + frame_index;
+ work->compute_context.frame->ready_to_present = 0;
+ work->compute_context.frame->view_plane_tag = plane;
+ work->compute_context.frame->id = frame_id;
}
return result;
}
function void
-do_sum_shader(ComputeShaderCtx *cs, u32 *in_textures, u32 in_texture_count, f32 in_scale,
+do_sum_shader(BeamformerComputeContext *cc, u32 *in_textures, u32 in_texture_count, f32 in_scale,
u32 out_texture, iv3 out_data_dim)
{
/* NOTE: zero output before summing */
@@ -219,7 +294,7 @@ do_sum_shader(ComputeShaderCtx *cs, u32 *in_textures, u32 in_texture_count, f32
glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT);
glBindImageTexture(0, out_texture, 0, GL_TRUE, 0, GL_READ_WRITE, GL_RG32F);
- glProgramUniform1f(cs->programs[BeamformerShaderKind_Sum], SUM_PRESCALE_UNIFORM_LOC, in_scale);
+ glProgramUniform1f(cc->programs[BeamformerShaderKind_Sum], SUM_PRESCALE_UNIFORM_LOC, in_scale);
for (u32 i = 0; i < in_texture_count; i++) {
glBindImageTexture(1, in_textures[i], 0, GL_TRUE, 0, GL_READ_ONLY, GL_RG32F);
glDispatchCompute(ORONE((u32)out_data_dim.x / 32u),
@@ -296,21 +371,16 @@ compute_cursor_finished(struct compute_cursor *cursor)
}
function void
-plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp, BeamformerFilter *filters)
+plan_compute_pipeline(BeamformerComputePlan *cp, BeamformerParameterBlock *pb)
{
- BeamformerSharedMemory *sm = os_sm->region;
- BeamformerParameters *bp = &cp->das_ubo_data;
-
- i32 compute_lock = BeamformerSharedMemoryLockKind_ComputePipeline;
- i32 params_lock = BeamformerSharedMemoryLockKind_Parameters;
- os_shared_memory_region_lock(os_sm, sm->locks, compute_lock, (u32)-1);
+ BeamformerParameters *bp = &cp->das_ubo_data;
- b32 decode_first = sm->shaders[0] == BeamformerShaderKind_Decode;
+ b32 decode_first = pb->pipeline.shaders[0] == BeamformerShaderKind_Decode;
b32 cuda_hilbert = 0;
b32 demodulate = 0;
- for (i32 i = 0; i < sm->shader_count; i++) {
- switch (sm->shaders[i]) {
+ for (u32 i = 0; i < pb->pipeline.shader_count; i++) {
+ switch (pb->pipeline.shaders[i]) {
case BeamformerShaderKind_CudaHilbert:{ cuda_hilbert = 1; }break;
case BeamformerShaderKind_Demodulate:{ demodulate = 1; }break;
default:{}break;
@@ -319,15 +389,13 @@ plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp,
if (demodulate) cuda_hilbert = 0;
- os_shared_memory_region_lock(os_sm, sm->locks, params_lock, (u32)-1);
- mem_copy(bp, &sm->parameters, sizeof(*bp));
- os_shared_memory_region_unlock(os_sm, sm->locks, params_lock);
+ mem_copy(bp, &pb->parameters, sizeof(*bp));
- BeamformerDataKind data_kind = sm->data_kind;
- cp->shader_count = 0;
- for (i32 i = 0; i < sm->shader_count; i++) {
- BeamformerShaderParameters *sp = sm->shader_parameters + i;
- u32 shader = sm->shaders[i];
+ BeamformerDataKind data_kind = pb->pipeline.data_kind;
+ cp->pipeline.shader_count = 0;
+ for (u32 i = 0; i < pb->pipeline.shader_count; i++) {
+ BeamformerShaderParameters *sp = pb->pipeline.parameters + i;
+ u32 shader = pb->pipeline.shaders[i];
b32 commit = 0;
switch (shader) {
@@ -355,7 +423,7 @@ plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp,
case BeamformerShaderKind_Demodulate:{
if (decode_first || (!decode_first && data_kind == BeamformerDataKind_Float32))
shader = BeamformerShaderKind_DemodulateFloat;
- bp->time_offset += beamformer_filter_time_offset(filters + sp->filter_slot);
+ bp->time_offset += beamformer_filter_time_offset(cp->filters + sp->filter_slot);
commit = 1;
}break;
case BeamformerShaderKind_DAS:{
@@ -367,12 +435,12 @@ plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp,
}
if (commit) {
- i32 index = cp->shader_count++;
- cp->shaders[index] = shader;
- cp->shader_parameters[index] = *sp;
+ u32 index = cp->pipeline.shader_count++;
+ cp->pipeline.shaders[index] = shader;
+ cp->pipeline.parameters[index] = *sp;
}
}
- os_shared_memory_region_unlock(os_sm, sm->locks, compute_lock);
+ cp->pipeline.data_kind = data_kind;
u32 das_sample_stride = 1;
u32 das_transmit_stride = bp->dec_data_dim[0];
@@ -410,7 +478,7 @@ plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp,
cp->decode_dispatch.z = (u32)ceil_f32((f32)bp->dec_data_dim[2] / DECODE_LOCAL_SIZE_Z);
/* NOTE(rnp): decode 2 samples per dispatch when data is i16 */
- if (decode_first && cp->data_kind == BeamformerDataKind_Int16)
+ if (decode_first && data_kind == BeamformerDataKind_Int16)
cp->decode_dispatch.x = (u32)ceil_f32((f32)cp->decode_dispatch.x / 2);
/* NOTE(rnp): when we are demodulating we pretend that the sampler was alternating
@@ -461,6 +529,75 @@ plan_compute_pipeline(SharedMemoryRegion *os_sm, BeamformerComputePipeline *cp,
cp->rf_size = bp->dec_data_dim[0] * bp->dec_data_dim[1] * bp->dec_data_dim[2] * 8;
}
+function void
+beamformer_commit_parameter_block(BeamformerCtx *ctx, BeamformerComputePlan *cp, u32 block, Arena arena)
+{
+ BeamformerParameterBlock *pb = beamformer_parameter_block_lock(&ctx->shared_memory, block, -1);
+ for (u32 region = ctz_u32(pb->dirty_regions);
+ region != 32;
+ region = ctz_u32(pb->dirty_regions))
+ {
+ mark_parameter_block_region_clean(ctx->shared_memory.region, block, region);
+ switch (region) {
+ case BeamformerParameterBlockRegion_ComputePipeline:
+ case BeamformerParameterBlockRegion_Parameters:
+ {
+ plan_compute_pipeline(cp, pb);
+
+ /* NOTE(rnp): these are both handled by plan_compute_pipeline() */
+ u32 mask = 1 << BeamformerParameterBlockRegion_ComputePipeline |
+ 1 << BeamformerParameterBlockRegion_Parameters;
+ pb->dirty_regions &= ~mask;
+
+ #define X(k, t, v) glNamedBufferSubData(cp->ubos[BeamformerComputeUBOKind_##k], \
+ 0, sizeof(t), &cp->v ## _ubo_data);
+ BEAMFORMER_COMPUTE_UBO_LIST
+ #undef X
+
+ u32 *dec_data_dim = pb->parameters.dec_data_dim;
+ u32 decoded_data_size = (u32)(2 * sizeof(f32) * dec_data_dim[0] * dec_data_dim[1] * dec_data_dim[2]);
+ if (ctx->compute_context.ping_pong_ssbo_size < decoded_data_size)
+ alloc_shader_storage(ctx, decoded_data_size, arena);
+
+ if (cp->hadamard_order != (i32)cp->das_ubo_data.dec_data_dim[2])
+ update_hadamard_texture(cp, (i32)cp->das_ubo_data.dec_data_dim[2], arena);
+ }break;
+ case BeamformerParameterBlockRegion_ChannelMapping:
+ case BeamformerParameterBlockRegion_FocalVectors:
+ case BeamformerParameterBlockRegion_SparseElements:
+ {
+ BeamformerComputeTextureKind texture_kind = 0;
+ u32 texture_type = 0, texture_format = 0;
+ /* TODO(rnp): this whole thing could be a table */
+ switch (region) {
+ case BeamformerParameterBlockRegion_ChannelMapping:{
+ texture_kind = BeamformerComputeTextureKind_ChannelMapping;
+ texture_type = GL_SHORT;
+ texture_format = GL_RED_INTEGER;
+ /* TODO(rnp): cuda lib */
+ ctx->compute_context.cuda_lib.set_channel_mapping(pb->channel_mapping);
+ }break;
+ case BeamformerParameterBlockRegion_FocalVectors:{
+ texture_kind = BeamformerComputeTextureKind_FocalVectors;
+ texture_type = GL_FLOAT;
+ texture_format = GL_RG;
+ }break;
+ case BeamformerParameterBlockRegion_SparseElements:{
+ texture_kind = BeamformerComputeTextureKind_SparseElements;
+ texture_type = GL_SHORT;
+ texture_format = GL_RED_INTEGER;
+ }break;
+ InvalidDefaultCase;
+ }
+ glTextureSubImage1D(cp->textures[texture_kind], 0, 0, BeamformerMaxChannelCount,
+ texture_format, texture_type,
+ (u8 *)pb + BeamformerParameterBlockRegionOffsets[region]);
+ }break;
+ }
+ }
+ beamformer_parameter_block_unlock(&ctx->shared_memory, block);
+}
+
function m4
das_voxel_transform_matrix(BeamformerParameters *bp)
{
@@ -499,17 +636,16 @@ das_voxel_transform_matrix(BeamformerParameters *bp)
}
function void
-do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
- BeamformerShaderKind shader, BeamformerShaderParameters *sp)
+do_compute_shader(BeamformerCtx *ctx, BeamformerComputePlan *cp, BeamformerFrame *frame,
+ BeamformerShaderKind shader, BeamformerShaderParameters *sp, Arena arena)
{
- ComputeShaderCtx *csctx = &ctx->csctx;
- BeamformerComputePipeline *cp = &csctx->compute_pipeline;
+ BeamformerComputeContext *cc = &ctx->compute_context;
- u32 program = csctx->programs[shader];
+ u32 program = cc->programs[shader];
glUseProgram(program);
- u32 output_ssbo_idx = !csctx->last_output_ssbo_index;
- u32 input_ssbo_idx = csctx->last_output_ssbo_index;
+ u32 output_ssbo_idx = !cc->last_output_ssbo_index;
+ u32 input_ssbo_idx = cc->last_output_ssbo_index;
switch (shader) {
case BeamformerShaderKind_Decode:
@@ -519,59 +655,59 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
case BeamformerShaderKind_DecodeInt16ToFloat:
{
glBindBufferBase(GL_UNIFORM_BUFFER, 0, cp->ubos[BeamformerComputeUBOKind_Decode]);
- glBindImageTexture(0, csctx->hadamard_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R8I);
+ glBindImageTexture(0, cp->textures[BeamformerComputeTextureKind_Hadamard], 0, 0, 0, GL_READ_ONLY, GL_R8I);
- if (shader == cp->shaders[0]) {
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, csctx->rf_data_ssbos[input_ssbo_idx]);
- glBindImageTexture(1, csctx->channel_mapping_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R16I);
+ if (shader == cp->pipeline.shaders[0]) {
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, cc->ping_pong_ssbos[input_ssbo_idx]);
+ glBindImageTexture(1, cp->textures[BeamformerComputeTextureKind_ChannelMapping], 0, 0, 0, GL_READ_ONLY, GL_R16I);
glProgramUniform1ui(program, DECODE_FIRST_PASS_UNIFORM_LOC, 1);
glDispatchCompute(cp->decode_dispatch.x, cp->decode_dispatch.y, cp->decode_dispatch.z);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
}
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, csctx->rf_data_ssbos[input_ssbo_idx]);
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, csctx->rf_data_ssbos[output_ssbo_idx]);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx]);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, cc->ping_pong_ssbos[output_ssbo_idx]);
glProgramUniform1ui(program, DECODE_FIRST_PASS_UNIFORM_LOC, 0);
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, csctx->rf_data_ssbos[output_ssbo_idx]);
glDispatchCompute(cp->decode_dispatch.x, cp->decode_dispatch.y, cp->decode_dispatch.z);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
- csctx->last_output_ssbo_index = !csctx->last_output_ssbo_index;
+ cc->last_output_ssbo_index = !cc->last_output_ssbo_index;
}break;
case BeamformerShaderKind_CudaDecode:{
- csctx->cuda_lib.decode(0, output_ssbo_idx, 0);
- csctx->last_output_ssbo_index = !csctx->last_output_ssbo_index;
+ cc->cuda_lib.decode(0, output_ssbo_idx, 0);
+ cc->last_output_ssbo_index = !cc->last_output_ssbo_index;
}break;
case BeamformerShaderKind_CudaHilbert:{
- csctx->cuda_lib.hilbert(input_ssbo_idx, output_ssbo_idx);
- csctx->last_output_ssbo_index = !csctx->last_output_ssbo_index;
+ cc->cuda_lib.hilbert(input_ssbo_idx, output_ssbo_idx);
+ cc->last_output_ssbo_index = !cc->last_output_ssbo_index;
}break;
case BeamformerShaderKind_Demodulate:
case BeamformerShaderKind_DemodulateFloat:
{
BeamformerDemodulateUBO *ubo = &cp->demod_ubo_data;
+
glBindBufferBase(GL_UNIFORM_BUFFER, 0, cp->ubos[BeamformerComputeUBOKind_Demodulate]);
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, csctx->rf_data_ssbos[output_ssbo_idx]);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, cc->ping_pong_ssbos[output_ssbo_idx]);
if (!ubo->map_channels)
- glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, csctx->rf_data_ssbos[input_ssbo_idx]);
+ glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx]);
- glBindImageTexture(0, csctx->filters[sp->filter_slot].texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R32F);
+ glBindImageTexture(0, cp->filters[sp->filter_slot].texture, 0, 0, 0, GL_READ_ONLY, GL_R32F);
if (ubo->map_channels)
- glBindImageTexture(1, csctx->channel_mapping_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R16I);
+ glBindImageTexture(1, cp->textures[BeamformerComputeTextureKind_ChannelMapping], 0, 0, 0, GL_READ_ONLY, GL_R16I);
glDispatchCompute(cp->demod_dispatch.x, cp->demod_dispatch.y, cp->demod_dispatch.z);
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);
- csctx->last_output_ssbo_index = !csctx->last_output_ssbo_index;
+ cc->last_output_ssbo_index = !cc->last_output_ssbo_index;
}break;
case BeamformerShaderKind_MinMax:{
for (i32 i = 1; i < frame->mips; i++) {
glBindImageTexture(0, frame->texture, i - 1, GL_TRUE, 0, GL_READ_ONLY, GL_RG32F);
glBindImageTexture(1, frame->texture, i - 0, GL_TRUE, 0, GL_WRITE_ONLY, GL_RG32F);
- glProgramUniform1i(csctx->programs[shader], MIN_MAX_MIPS_LEVEL_UNIFORM_LOC, i);
+ glProgramUniform1i(cc->programs[shader], MIN_MAX_MIPS_LEVEL_UNIFORM_LOC, i);
u32 width = (u32)frame->dim.x >> i;
u32 height = (u32)frame->dim.y >> i;
@@ -593,9 +729,9 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
}
glBindBufferBase(GL_UNIFORM_BUFFER, 0, cp->ubos[BeamformerComputeUBOKind_DAS]);
- glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, csctx->rf_data_ssbos[input_ssbo_idx], 0, cp->rf_size);
- glBindImageTexture(1, csctx->sparse_elements_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_R16I);
- glBindImageTexture(2, csctx->focal_vectors_texture, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RG32F);
+ glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx], 0, cp->rf_size);
+ glBindImageTexture(1, cp->textures[BeamformerComputeTextureKind_SparseElements], 0, 0, 0, GL_READ_ONLY, GL_R16I);
+ glBindImageTexture(2, cp->textures[BeamformerComputeTextureKind_FocalVectors], 0, 0, 0, GL_READ_ONLY, GL_RG32F);
m4 voxel_transform = das_voxel_transform_matrix(ubo);
glProgramUniform1ui(program, DAS_CYCLE_T_UNIFORM_LOC, cycle_t++);
@@ -613,9 +749,9 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
loop_end = (i32)ubo->dec_data_dim[1];
}
f32 percent_per_step = 1.0f / (f32)loop_end;
- csctx->processing_progress = -percent_per_step;
+ cc->processing_progress = -percent_per_step;
for (i32 index = 0; index < loop_end; index++) {
- csctx->processing_progress += percent_per_step;
+ cc->processing_progress += percent_per_step;
/* IMPORTANT(rnp): prevents OS from coalescing and killing our shader */
glFinish();
glProgramUniform1i(program, DAS_FAST_CHANNEL_UNIFORM_LOC, index);
@@ -631,12 +767,12 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
u32 max_points_per_dispatch = KB(64);
struct compute_cursor cursor = start_compute_cursor(frame->dim, max_points_per_dispatch);
f32 percent_per_step = (f32)cursor.points_per_dispatch / (f32)cursor.total_points;
- csctx->processing_progress = -percent_per_step;
+ cc->processing_progress = -percent_per_step;
for (iv3 offset = {0};
!compute_cursor_finished(&cursor);
offset = step_compute_cursor(&cursor))
{
- csctx->processing_progress += percent_per_step;
+ cc->processing_progress += percent_per_step;
/* IMPORTANT(rnp): prevents OS from coalescing and killing our shader */
glFinish();
glProgramUniform3iv(program, DAS_VOXEL_OFFSET_UNIFORM_LOC, 1, offset.E);
@@ -666,14 +802,14 @@ do_compute_shader(BeamformerCtx *ctx, Arena arena, BeamformerFrame *frame,
u32 base_index = (u32)(frame - ctx->beamform_frames);
u32 to_average = (u32)cp->das_ubo_data.output_points[3];
u32 frame_count = 0;
- u32 *in_textures = push_array(&arena, u32, MAX_BEAMFORMED_SAVED_FRAMES);
+ u32 *in_textures = push_array(&arena, u32, BeamformerMaxSavedFrames);
ComputeFrameIterator cfi = compute_frame_iterator(ctx, 1 + base_index - to_average, to_average);
for (BeamformerFrame *it = frame_next(&cfi); it; it = frame_next(&cfi))
in_textures[frame_count++] = it->texture;
assert(to_average == frame_count);
- do_sum_shader(csctx, in_textures, frame_count, 1 / (f32)frame_count, aframe->texture, aframe->dim);
+ do_sum_shader(cc, in_textures, frame_count, 1 / (f32)frame_count, aframe->texture, aframe->dim);
aframe->min_coordinate = frame->min_coordinate;
aframe->max_coordinate = frame->max_coordinate;
aframe->compound_count = frame->compound_count;
@@ -810,11 +946,10 @@ reload_compute_shader(BeamformerCtx *ctx, ShaderReloadContext *src, s8 name_extr
}
function void
-complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_context)
+complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena *arena, iptr gl_context)
{
- ComputeShaderCtx *cs = &ctx->csctx;
- BeamformerSharedMemory *sm = ctx->shared_memory.region;
- BeamformerParameters *bp = &sm->parameters;
+ BeamformerComputeContext *cs = &ctx->compute_context;
+ BeamformerSharedMemory *sm = ctx->shared_memory.region;
BeamformWork *work = beamform_work_queue_pop(q);
while (work) {
@@ -822,13 +957,13 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
switch (work->kind) {
case BeamformerWorkKind_ReloadShader:{
ShaderReloadContext *src = work->shader_reload_context;
- b32 success = reload_compute_shader(ctx, src, s8(""), arena);
+ b32 success = reload_compute_shader(ctx, src, s8(""), *arena);
/* TODO(rnp): think of a better way of doing this */
switch (src->kind) {
case BeamformerShaderKind_DAS:{
src->kind = BeamformerShaderKind_DASFast;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (Fast)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (Fast)"), *arena);
src->kind = BeamformerShaderKind_DAS;
src->shader = cs->programs + src->kind;
@@ -836,19 +971,19 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
case BeamformerShaderKind_Decode:{
src->kind = BeamformerShaderKind_DecodeFloatComplex;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (F32C)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (F32C)"), *arena);
src->kind = BeamformerShaderKind_DecodeFloat;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (F32)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (F32)"), *arena);
src->kind = BeamformerShaderKind_DecodeInt16Complex;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (I16C)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (I16C)"), *arena);
src->kind = BeamformerShaderKind_DecodeInt16ToFloat;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (I16-F32)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (I16-F32)"), *arena);
src->kind = BeamformerShaderKind_Decode;
src->shader = cs->programs + src->kind;
@@ -856,7 +991,7 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
case BeamformerShaderKind_Demodulate:{
src->kind = BeamformerShaderKind_DemodulateFloat;
src->shader = cs->programs + src->kind;
- success &= reload_compute_shader(ctx, src, s8(" (F32)"), arena);
+ success &= reload_compute_shader(ctx, src, s8(" (F32)"), *arena);
src->kind = BeamformerShaderKind_Demodulate;
src->shader = cs->programs + src->kind;
@@ -865,7 +1000,7 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
}
if (success && ctx->latest_frame && !sm->live_imaging_parameters.active) {
- fill_frame_compute_work(ctx, work, ctx->latest_frame->view_plane_tag, 0);
+ fill_frame_compute_work(ctx, work, ctx->latest_frame->view_plane_tag, 0, 0);
can_commit = 0;
}
}break;
@@ -884,7 +1019,7 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
u32 out_size = (u32)dim.x * (u32)dim.y * (u32)dim.z * 2 * sizeof(f32);
if (out_size <= ec->size) {
glGetTextureImage(texture, 0, GL_RG, GL_FLOAT, (i32)out_size,
- (u8 *)sm + BEAMFORMER_SCRATCH_OFF);
+ beamformer_shared_memory_scratch_arena(sm).beg);
}
}
}break;
@@ -894,7 +1029,7 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
while (table->write_index != atomic_load_u32(&table->read_index));
ComputeShaderStats *stats = ctx->compute_shader_stats;
if (sizeof(stats->table) <= ec->size)
- mem_copy((u8 *)sm + BEAMFORMER_SCRATCH_OFF, &stats->table, sizeof(stats->table));
+ mem_copy(beamformer_shared_memory_scratch_arena(sm).beg, &stats->table, sizeof(stats->table));
}break;
InvalidDefaultCase;
}
@@ -902,79 +1037,29 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
post_sync_barrier(&ctx->shared_memory, BeamformerSharedMemoryLockKind_ExportSync, sm->locks);
}break;
case BeamformerWorkKind_CreateFilter:{
+ /* TODO(rnp): this should probably get deleted and moved to lazy loading */
BeamformerCreateFilterContext *fctx = &work->create_filter_context;
- beamformer_filter_update(cs->filters + fctx->slot, fctx, sm->parameters.sampling_frequency / 2, arena);
- }break;
- case BeamformerWorkKind_UploadBuffer:{
- os_shared_memory_region_lock(&ctx->shared_memory, sm->locks, (i32)work->lock, (u32)-1);
- BeamformerUploadContext *uc = &work->upload_context;
- u32 tex_type, tex_format, tex_1d = 0, buffer = 0;
- i32 tex_element_count;
- switch (uc->kind) {
- case BeamformerUploadKind_ChannelMapping:{
- tex_1d = cs->channel_mapping_texture;
- tex_type = GL_SHORT;
- tex_format = GL_RED_INTEGER;
- tex_element_count = countof(sm->channel_mapping);
- cs->cuda_lib.set_channel_mapping(sm->channel_mapping);
- }break;
- case BeamformerUploadKind_FocalVectors:{
- tex_1d = cs->focal_vectors_texture;
- tex_type = GL_FLOAT;
- tex_format = GL_RG;
- tex_element_count = countof(sm->focal_vectors);
- }break;
- case BeamformerUploadKind_SparseElements:{
- tex_1d = cs->sparse_elements_texture;
- tex_type = GL_SHORT;
- tex_format = GL_RED_INTEGER;
- tex_element_count = countof(sm->sparse_elements);
- }break;
- InvalidDefaultCase;
- }
-
- if (tex_1d) {
- glTextureSubImage1D(tex_1d, 0, 0, tex_element_count, tex_format,
- tex_type, (u8 *)sm + uc->shared_memory_offset);
- }
-
- if (buffer) {
- glNamedBufferSubData(buffer, 0, (i32)uc->size,
- (u8 *)sm + uc->shared_memory_offset);
- }
-
- mark_shared_memory_region_clean(sm, (i32)work->lock);
- os_shared_memory_region_unlock(&ctx->shared_memory, sm->locks, (i32)work->lock);
+ BeamformerComputePlan *cp = beamformer_compute_plan_for_block(cs, fctx->parameter_block, arena);
+ beamformer_filter_update(cp->filters + fctx->filter_slot, fctx->kind, fctx->parameters, *arena);
}break;
case BeamformerWorkKind_ComputeIndirect:{
- fill_frame_compute_work(ctx, work, work->compute_indirect_plane, 1);
+ fill_frame_compute_work(ctx, work, work->compute_indirect_context.view_plane,
+ work->compute_indirect_context.parameter_block, 1);
} /* FALLTHROUGH */
case BeamformerWorkKind_Compute:{
- DEBUG_DECL(glClearNamedBufferData(cs->rf_data_ssbos[0], GL_RG32F, GL_RG, GL_FLOAT, 0);)
- DEBUG_DECL(glClearNamedBufferData(cs->rf_data_ssbos[1], GL_RG32F, GL_RG, GL_FLOAT, 0);)
+ DEBUG_DECL(glClearNamedBufferData(cs->ping_pong_ssbos[0], GL_RG32F, GL_RG, GL_FLOAT, 0);)
+ DEBUG_DECL(glClearNamedBufferData(cs->ping_pong_ssbos[1], GL_RG32F, GL_RG, GL_FLOAT, 0);)
DEBUG_DECL(glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);)
push_compute_timing_info(ctx->compute_timing_table,
(ComputeTimingInfo){.kind = ComputeTimingInfoKind_ComputeFrameBegin});
- BeamformerComputePipeline *cp = &cs->compute_pipeline;
- u32 mask = (1 << (BeamformerSharedMemoryLockKind_Parameters - 1)) |
- (1 << (BeamformerSharedMemoryLockKind_ComputePipeline - 1));
- if (sm->dirty_regions & mask) {
- if (cs->rf_raw_size != cs->rf_buffer.rf_size ||
- !uv4_equal(cs->dec_data_dim, uv4_from_u32_array(bp->dec_data_dim)))
- {
- alloc_shader_storage(ctx, cs->rf_buffer.rf_size, arena);
- }
-
- plan_compute_pipeline(&ctx->shared_memory, cp, cs->filters);
- atomic_store_u32(&ctx->ui_read_params, ctx->beamform_work_queue != q);
- atomic_and_u32(&sm->dirty_regions, ~mask);
-
- #define X(k, t, v) glNamedBufferSubData(cp->ubos[BeamformerComputeUBOKind_##k], \
- 0, sizeof(t), &cp->v ## _ubo_data);
- BEAMFORMER_COMPUTE_UBO_LIST
- #undef X
+ BeamformerComputePlan *cp = beamformer_compute_plan_for_block(cs, work->compute_context.parameter_block, arena);
+ BeamformerParameterBlock *pb = beamformer_parameter_block(sm, work->compute_context.parameter_block);
+ if (pb->dirty_regions) {
+ u32 block = work->compute_context.parameter_block;
+ beamformer_commit_parameter_block(ctx, cp, block, *arena);
+ atomic_store_u32(&ctx->ui_dirty_parameter_blocks, (u32)(ctx->beamform_work_queue != q) << block);
}
post_sync_barrier(&ctx->shared_memory, work->lock, sm->locks);
@@ -982,26 +1067,26 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
atomic_store_u32(&cs->processing_compute, 1);
start_renderdoc_capture(gl_context);
- BeamformerFrame *frame = work->frame;
- iv3 try_dim = make_valid_test_dim(bp->output_points);
+ BeamformerFrame *frame = work->compute_context.frame;
+ iv3 try_dim = make_valid_test_dim(cp->das_ubo_data.output_points);
if (!iv3_equal(try_dim, frame->dim))
- alloc_beamform_frame(&ctx->gl, frame, try_dim, s8("Beamformed_Data"), arena);
+ alloc_beamform_frame(&ctx->gl, frame, try_dim, s8("Beamformed_Data"), *arena);
- if (bp->output_points[3] > 1) {
- if (!iv3_equal(try_dim, ctx->averaged_frames[0].dim)) {
- alloc_beamform_frame(&ctx->gl, ctx->averaged_frames + 0, try_dim, s8("Averaged Frame"), arena);
- alloc_beamform_frame(&ctx->gl, ctx->averaged_frames + 1, try_dim, s8("Averaged Frame"), arena);
- }
+ if (cp->das_ubo_data.output_points[3] > 1 && !iv3_equal(try_dim, ctx->averaged_frames[0].dim)) {
+ alloc_beamform_frame(&ctx->gl, ctx->averaged_frames + 0, try_dim, s8("Averaged Frame"), *arena);
+ alloc_beamform_frame(&ctx->gl, ctx->averaged_frames + 1, try_dim, s8("Averaged Frame"), *arena);
}
- frame->min_coordinate = v4_from_f32_array(bp->output_min_coordinate);
- frame->max_coordinate = v4_from_f32_array(bp->output_max_coordinate);
- frame->das_shader_kind = bp->das_shader_id;
- frame->compound_count = bp->dec_data_dim[2];
+ frame->min_coordinate = v4_from_f32_array(cp->das_ubo_data.output_min_coordinate);
+ frame->max_coordinate = v4_from_f32_array(cp->das_ubo_data.output_max_coordinate);
+ frame->das_shader_kind = cp->das_ubo_data.das_shader_id;
+ frame->compound_count = cp->das_ubo_data.dec_data_dim[2];
+ BeamformerComputeContext *cc = &ctx->compute_context;
+ BeamformerComputePipeline *pipeline = &cp->pipeline;
/* NOTE(rnp): first stage requires access to raw data buffer directly so we break
- * it out into a separate step. This way data can get release as soon as possible */
- if (cp->shader_count > 0) {
+ * it out into a separate step. This way data can get released as soon as possible */
+ if (pipeline->shader_count > 0) {
BeamformerRFBuffer *rf = &cs->rf_buffer;
u32 slot = rf->compute_index % countof(rf->compute_syncs);
@@ -1018,10 +1103,10 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
slot = (rf->compute_index - 1) % countof(rf->compute_syncs);
}
- glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, rf->ssbo, slot * rf->rf_size, rf->rf_size);
+ glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, rf->ssbo, slot * rf->size, rf->size);
- glBeginQuery(GL_TIME_ELAPSED, cs->shader_timer_ids[0]);
- do_compute_shader(ctx, arena, frame, cp->shaders[0], cp->shader_parameters + 0);
+ glBeginQuery(GL_TIME_ELAPSED, cc->shader_timer_ids[0]);
+ do_compute_shader(ctx, cp, frame, pipeline->shaders[0], pipeline->parameters + 0, *arena);
glEndQuery(GL_TIME_ELAPSED);
if (work->kind == BeamformerWorkKind_ComputeIndirect) {
@@ -1032,19 +1117,19 @@ complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena arena, iptr gl_co
}
b32 did_sum_shader = 0;
- for (i32 i = 1; i < cp->shader_count; i++) {
- did_sum_shader |= cp->shaders[i] == BeamformerShaderKind_Sum;
- glBeginQuery(GL_TIME_ELAPSED, cs->shader_timer_ids[i]);
- do_compute_shader(ctx, arena, frame, cp->shaders[i], cp->shader_parameters + i);
+ for (u32 i = 1; i < pipeline->shader_count; i++) {
+ did_sum_shader |= pipeline->shaders[i] == BeamformerShaderKind_Sum;
+ glBeginQuery(GL_TIME_ELAPSED, cc->shader_timer_ids[i]);
+ do_compute_shader(ctx, cp, frame, pipeline->shaders[i], pipeline->parameters + i, *arena);
glEndQuery(GL_TIME_ELAPSED);
}
/* NOTE(rnp): the first of these blocks until work completes */
- for (i32 i = 0; i < cp->shader_count; i++) {
+ for (u32 i = 0; i < pipeline->shader_count; i++) {
ComputeTimingInfo info = {0};
info.kind = ComputeTimingInfoKind_Shader;
- info.shader = cp->shaders[i];
- glGetQueryObjectui64v(cs->shader_timer_ids[i], GL_QUERY_RESULT, &info.timer_count);
+ info.shader = pipeline->shaders[i];
+ glGetQueryObjectui64v(cc->shader_timer_ids[i], GL_QUERY_RESULT, &info.timer_count);
push_compute_timing_info(ctx->compute_timing_table, info);
}
cs->processing_progress = 1;
@@ -1138,36 +1223,6 @@ coalesce_timing_table(ComputeTimingTable *t, ComputeShaderStats *stats)
}
}
-DEBUG_EXPORT BEAMFORMER_COMPUTE_SETUP_FN(beamformer_compute_setup)
-{
- BeamformerCtx *ctx = (BeamformerCtx *)user_context;
- BeamformerSharedMemory *sm = ctx->shared_memory.region;
- ComputeShaderCtx *cs = &ctx->csctx;
- BeamformerComputePipeline *cp = &cs->compute_pipeline;
-
- glCreateBuffers(countof(cp->ubos), cp->ubos);
- #define X(k, t, ...) \
- glNamedBufferStorage(cp->ubos[BeamformerComputeUBOKind_##k], sizeof(t), \
- 0, GL_DYNAMIC_STORAGE_BIT); \
- LABEL_GL_OBJECT(GL_BUFFER, cp->ubos[BeamformerComputeUBOKind_##k], s8(#t));
-
- BEAMFORMER_COMPUTE_UBO_LIST
- #undef X
-
- glCreateTextures(GL_TEXTURE_1D, 1, &cs->channel_mapping_texture);
- glCreateTextures(GL_TEXTURE_1D, 1, &cs->sparse_elements_texture);
- glCreateTextures(GL_TEXTURE_1D, 1, &cs->focal_vectors_texture);
- glTextureStorage1D(cs->channel_mapping_texture, 1, GL_R16I, ARRAY_COUNT(sm->channel_mapping));
- glTextureStorage1D(cs->sparse_elements_texture, 1, GL_R16I, ARRAY_COUNT(sm->sparse_elements));
- glTextureStorage1D(cs->focal_vectors_texture, 1, GL_RG32F, ARRAY_COUNT(sm->focal_vectors));
-
- LABEL_GL_OBJECT(GL_TEXTURE, cs->channel_mapping_texture, s8("Channel_Mapping"));
- LABEL_GL_OBJECT(GL_TEXTURE, cs->focal_vectors_texture, s8("Focal_Vectors"));
- LABEL_GL_OBJECT(GL_TEXTURE, cs->sparse_elements_texture, s8("Sparse_Elements"));
-
- glCreateQueries(GL_TIME_ELAPSED, countof(cs->shader_timer_ids), cs->shader_timer_ids);
-}
-
DEBUG_EXPORT BEAMFORMER_COMPLETE_COMPUTE_FN(beamformer_complete_compute)
{
BeamformerCtx *ctx = (BeamformerCtx *)user_context;
@@ -1187,7 +1242,7 @@ beamformer_rf_buffer_allocate(BeamformerRFBuffer *rf, u32 rf_size, Arena arena)
glNamedBufferStorage(rf->ssbo, countof(rf->compute_syncs) * rf_size, 0,
GL_DYNAMIC_STORAGE_BIT|GL_MAP_WRITE_BIT);
LABEL_GL_OBJECT(GL_BUFFER, rf->ssbo, s8("Raw_RF_SSBO"));
- rf->rf_size = rf_size;
+ rf->size = rf_size;
}
DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload)
@@ -1200,7 +1255,7 @@ DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload)
os_shared_memory_region_lock(ctx->shared_memory, sm->locks, (i32)scratch_lock, (u32)-1))
{
BeamformerRFBuffer *rf = ctx->rf_buffer;
- if (rf->rf_size < sm->scratch_rf_size)
+ if (rf->size < sm->scratch_rf_size)
beamformer_rf_buffer_allocate(rf, sm->scratch_rf_size, arena);
u32 slot = rf->insertion_index++ % countof(rf->compute_syncs);
@@ -1222,14 +1277,13 @@ DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload)
* at least when it is a big as this one wants to be. mapping and unmapping the
* desired range each time doesn't seem to introduce any performance hit */
u32 access = GL_MAP_WRITE_BIT|GL_MAP_FLUSH_EXPLICIT_BIT|GL_MAP_UNSYNCHRONIZED_BIT;
- u8 *buffer = glMapNamedBufferRange(rf->ssbo, slot * rf->rf_size, (i32)rf->rf_size, access);
+ u8 *buffer = glMapNamedBufferRange(rf->ssbo, slot * rf->size, (i32)rf->size, access);
- mem_copy(buffer, (u8 *)sm + BEAMFORMER_SCRATCH_OFF, rf->rf_size);
- mark_shared_memory_region_clean(sm, (i32)scratch_lock);
+ mem_copy(buffer, beamformer_shared_memory_scratch_arena(sm).beg, rf->size);
os_shared_memory_region_unlock(ctx->shared_memory, sm->locks, (i32)scratch_lock);
post_sync_barrier(ctx->shared_memory, upload_lock, sm->locks);
- glFlushMappedNamedBufferRange(rf->ssbo, 0, (i32)rf->rf_size);
+ glFlushMappedNamedBufferRange(rf->ssbo, 0, (i32)rf->size);
glUnmapNamedBuffer(rf->ssbo);
rf->upload_syncs[slot] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
diff --git a/beamformer.h b/beamformer.h
@@ -91,7 +91,7 @@ typedef struct {
} FrameViewRenderContext;
#include "beamformer_parameters.h"
-#include "beamformer_work_queue.h"
+#include "beamformer_shared_memory.c"
typedef struct {
iptr elements_offset;
@@ -102,9 +102,8 @@ typedef struct {
typedef struct {
BeamformerFilterKind kind;
+ BeamformerFilterParameters parameters;
u32 texture;
- i32 length;
- f32 sampling_frequency;
} BeamformerFilter;
/* X(name, type, gltype) */
@@ -131,14 +130,14 @@ typedef struct {
X(transmit_count, u32, uint) \
X(decode_mode, u32, uint)
-typedef align_as(16) struct {
+typedef alignas(16) struct {
#define X(name, type, ...) type name;
BEAMFORMER_DECODE_UBO_PARAM_LIST
#undef X
} BeamformerDecodeUBO;
static_assert((sizeof(BeamformerDecodeUBO) & 15) == 0, "UBO size must be a multiple of 16");
-typedef align_as(16) struct {
+typedef alignas(16) struct {
#define X(name, type, ...) type name;
BEAMFORMER_DEMOD_UBO_PARAM_LIST
#undef X
@@ -156,31 +155,49 @@ static_assert((sizeof(BeamformerDemodulateUBO) & 15) == 0, "UBO size must be a m
typedef enum {BEAMFORMER_COMPUTE_UBO_LIST BeamformerComputeUBOKind_Count} BeamformerComputeUBOKind;
#undef X
-typedef struct {
- BeamformerShaderKind shaders[MAX_COMPUTE_SHADER_STAGES];
- BeamformerShaderParameters shader_parameters[MAX_COMPUTE_SHADER_STAGES];
- i32 shader_count;
- BeamformerDataKind data_kind;
+#define BEAMFORMER_COMPUTE_TEXTURE_LIST \
+ X(ChannelMapping, GL_R16I) \
+ X(FocalVectors, GL_RG32F) \
+ X(SparseElements, GL_R16I) \
+ X(Hadamard, GL_R8I)
+
+typedef enum {
+ #define X(k, ...) BeamformerComputeTextureKind_##k,
+ BEAMFORMER_COMPUTE_TEXTURE_LIST
+ #undef X
+ BeamformerComputeTextureKind_Count
+} BeamformerComputeTextureKind;
+static_assert((BeamformerComputeTextureKind_Count - 1) == BeamformerComputeTextureKind_Hadamard,
+ "BeamformerComputeTextureKind_Hadamard must be end of TextureKinds");
+
+typedef struct BeamformerComputePlan BeamformerComputePlan;
+struct BeamformerComputePlan {
+ BeamformerComputePipeline pipeline;
uv3 decode_dispatch;
uv3 demod_dispatch;
- u32 rf_size;
+ u32 rf_size;
+ i32 hadamard_order;
+ u32 textures[BeamformerComputeTextureKind_Count];
u32 ubos[BeamformerComputeUBOKind_Count];
+ BeamformerFilter filters[BeamformerFilterSlots];
+
#define X(k, type, name) type name ##_ubo_data;
BEAMFORMER_COMPUTE_UBO_LIST
#undef X
-} BeamformerComputePipeline;
-#define MAX_RAW_DATA_FRAMES_IN_FLIGHT 3
+ BeamformerComputePlan *next;
+};
+
typedef struct {
- GLsync upload_syncs[MAX_RAW_DATA_FRAMES_IN_FLIGHT];
- GLsync compute_syncs[MAX_RAW_DATA_FRAMES_IN_FLIGHT];
+ GLsync upload_syncs[BeamformerMaxRawDataFramesInFlight];
+ GLsync compute_syncs[BeamformerMaxRawDataFramesInFlight];
u32 ssbo;
- u32 rf_size;
+ u32 size;
u32 data_timestamp_query;
@@ -189,34 +206,27 @@ typedef struct {
} BeamformerRFBuffer;
typedef struct {
- u32 programs[BeamformerShaderKind_ComputeCount];
-
- BeamformerComputePipeline compute_pipeline;
- BeamformerFilter filters[BEAMFORMER_FILTER_SLOTS];
-
+ u32 programs[BeamformerShaderKind_ComputeCount];
BeamformerRFBuffer rf_buffer;
- /* NOTE: Decoded data is only relevant in the context of a single frame. We use two
- * buffers so that they can be swapped when chaining multiple compute stages */
- u32 rf_data_ssbos[2];
- u32 last_output_ssbo_index;
+ BeamformerComputePlan *compute_plans[BeamformerMaxParameterBlockSlots];
+ BeamformerComputePlan *compute_plan_freelist;
- u32 channel_mapping_texture;
- u32 sparse_elements_texture;
- u32 focal_vectors_texture;
- u32 hadamard_texture;
+ /* NOTE(rnp): two interstage ssbos are allocated so that they may be used to
+ * ping pong data between compute stages */
+ u32 ping_pong_ssbos[2];
+ u32 last_output_ssbo_index;
- uv4 dec_data_dim;
- u32 rf_raw_size;
+ u32 ping_pong_ssbo_size;
f32 processing_progress;
b32 processing_compute;
- u32 shader_timer_ids[MAX_COMPUTE_SHADER_STAGES];
+ u32 shader_timer_ids[BeamformerMaxComputeShaderStages];
BeamformerRenderModel unit_cube_model;
CudaLib cuda_lib;
-} ComputeShaderCtx;
+} BeamformerComputeContext;
typedef enum {
#define X(type, id, pretty, fixed_tx) DASShaderKind_##type = id,
@@ -315,10 +325,9 @@ typedef struct {
Arena ui_backing_store;
void *ui;
- /* TODO(rnp): this is nasty and should be removed */
- b32 ui_read_params;
+ u32 ui_dirty_parameter_blocks;
- ComputeShaderCtx csctx;
+ BeamformerComputeContext compute_context;
/* TODO(rnp): ideally this would go in the UI but its hard to manage with the UI
* destroying itself on hot-reload */
@@ -334,7 +343,7 @@ typedef struct {
SharedMemoryRegion shared_memory;
- BeamformerFrame beamform_frames[MAX_BEAMFORMED_SAVED_FRAMES];
+ BeamformerFrame beamform_frames[BeamformerMaxSavedFrames];
BeamformerFrame *latest_frame;
u32 next_render_frame_index;
u32 display_frame_index;
@@ -358,10 +367,7 @@ struct ShaderReloadContext {
#define BEAMFORMER_FRAME_STEP_FN(name) void name(BeamformerCtx *ctx, BeamformerInput *input)
typedef BEAMFORMER_FRAME_STEP_FN(beamformer_frame_step_fn);
-#define BEAMFORMER_COMPUTE_SETUP_FN(name) void name(iptr user_context)
-typedef BEAMFORMER_COMPUTE_SETUP_FN(beamformer_compute_setup_fn);
-
-#define BEAMFORMER_COMPLETE_COMPUTE_FN(name) void name(iptr user_context, Arena arena, iptr gl_context)
+#define BEAMFORMER_COMPLETE_COMPUTE_FN(name) void name(iptr user_context, Arena *arena, iptr gl_context)
typedef BEAMFORMER_COMPLETE_COMPUTE_FN(beamformer_complete_compute_fn);
#define BEAMFORMER_RF_UPLOAD_FN(name) void name(BeamformerUploadThreadContext *ctx, Arena arena)
diff --git a/beamformer_parameters.h b/beamformer_parameters.h
@@ -123,10 +123,16 @@ typedef enum {
#define MIN_MAX_MIPS_LEVEL_UNIFORM_LOC 1
#define SUM_PRESCALE_UNIFORM_LOC 1
-#define MAX_BEAMFORMED_SAVED_FRAMES 16
-#define MAX_COMPUTE_SHADER_STAGES 16
-
-#define BEAMFORMER_FILTER_SLOTS 4
+#define BEAMFORMER_CONSTANTS_LIST \
+ X(FilterSlots, 4) \
+ X(MaxChannelCount, 256) \
+ X(MaxComputeShaderStages, 16) \
+ X(MaxParameterBlockSlots, 16) \
+ X(MaxRawDataFramesInFlight, 3) \
+ X(MaxSavedFrames, 16)
+#define X(k, v, ...) Beamformer##k = v,
+enum {BEAMFORMER_CONSTANTS_LIST};
+#undef X
/* TODO(rnp): actually use a substruct but generate a header compatible with MATLAB */
/* X(name, type, size, elements, gltype, glsize, comment) */
diff --git a/beamformer_shared_memory.c b/beamformer_shared_memory.c
@@ -0,0 +1,288 @@
+/* See LICENSE for license details. */
+#define BEAMFORMER_SHARED_MEMORY_VERSION (12UL)
+
+typedef struct BeamformerFrame BeamformerFrame;
+typedef struct ShaderReloadContext ShaderReloadContext;
+
+typedef enum {
+ BeamformerWorkKind_Compute,
+ BeamformerWorkKind_ComputeIndirect,
+ BeamformerWorkKind_CreateFilter,
+ BeamformerWorkKind_ReloadShader,
+ BeamformerWorkKind_ExportBuffer,
+ BeamformerWorkKind_UploadBuffer,
+} BeamformerWorkKind;
+
+typedef struct {
+ union {
+ struct {f32 beta; f32 cutoff_frequency;};
+ f32 xdc_center_frequency;
+ };
+ f32 sampling_frequency;
+ i16 length;
+} BeamformerFilterParameters;
+
+typedef struct {
+ BeamformerFilterKind kind;
+ BeamformerFilterParameters parameters;
+ u8 filter_slot;
+ u8 parameter_block;
+ static_assert(BeamformerFilterSlots <= 255, "CreateFilterContext only supports 255 filter slots");
+ static_assert(BeamformerMaxParameterBlockSlots <= 255, "CreateFilterContext only supports 255 parameter blocks");
+} BeamformerCreateFilterContext;
+
+typedef enum {
+ BeamformerExportKind_BeamformedData,
+ BeamformerExportKind_Stats,
+} BeamformerExportKind;
+
+typedef struct {
+ BeamformerExportKind kind;
+ u32 size;
+} BeamformerExportContext;
+
+/* TODO(rnp): remove the None lock */
+#define BEAMFORMER_SHARED_MEMORY_LOCKS \
+ X(None) \
+ X(ScratchSpace) \
+ X(UploadRF) \
+ X(ExportSync) \
+ X(DispatchCompute)
+
+#define X(name) BeamformerSharedMemoryLockKind_##name,
+typedef enum {BEAMFORMER_SHARED_MEMORY_LOCKS BeamformerSharedMemoryLockKind_Count} BeamformerSharedMemoryLockKind;
+#undef X
+
+typedef struct {
+ BeamformerFrame *frame;
+ u32 parameter_block;
+} BeamformerComputeWorkContext;
+
+typedef struct {
+ BeamformerViewPlaneTag view_plane;
+ u32 parameter_block;
+} BeamformerComputeIndirectWorkContext;
+
+/* NOTE: discriminated union based on type */
+typedef struct {
+ BeamformerWorkKind kind;
+ BeamformerSharedMemoryLockKind lock;
+ union {
+ void *generic;
+ BeamformerComputeWorkContext compute_context;
+ BeamformerComputeIndirectWorkContext compute_indirect_context;
+ BeamformerCreateFilterContext create_filter_context;
+ BeamformerExportContext export_context;
+ ShaderReloadContext *shader_reload_context;
+ };
+} BeamformWork;
+
+typedef struct {
+ union {
+ u64 queue;
+ struct {u32 widx, ridx;};
+ };
+ BeamformWork work_items[1 << 6];
+} BeamformWorkQueue;
+
+#define BEAMFORMER_SHARED_MEMORY_SIZE (GB(2))
+#define BEAMFORMER_SHARED_MEMORY_MIN_SCRATCH_SIZE (BEAMFORMER_SHARED_MEMORY_SIZE - \
+ sizeof(BeamformerSharedMemory) - \
+ sizeof(BeamformerParameterBlock))
+
+#define X(name, id) BeamformerLiveImagingDirtyFlags_##name = (1 << id),
+typedef enum {BEAMFORMER_LIVE_IMAGING_DIRTY_FLAG_LIST} BeamformerLiveImagingDirtyFlags;
+#undef X
+
+#define BEAMFORMER_PARAMETER_BLOCK_REGION_LIST \
+ X(ComputePipeline, pipeline) \
+ X(ChannelMapping, channel_mapping) \
+ X(FocalVectors, focal_vectors) \
+ X(Parameters, parameters) \
+ X(SparseElements, sparse_elements)
+
+typedef enum {
+ #define X(k, ...) BeamformerParameterBlockRegion_##k,
+ BEAMFORMER_PARAMETER_BLOCK_REGION_LIST
+ #undef X
+ BeamformerParameterBlockRegion_Count
+} BeamformerParameterBlockRegions;
+
+typedef union {
+ u8 filter_slot;
+} BeamformerShaderParameters;
+
+typedef struct {
+ BeamformerShaderKind shaders[BeamformerMaxComputeShaderStages];
+ BeamformerShaderParameters parameters[BeamformerMaxComputeShaderStages];
+ u32 shader_count;
+ BeamformerDataKind data_kind;
+} BeamformerComputePipeline;
+
+typedef struct {
+ alignas(16) union {
+ BeamformerParameters parameters;
+ struct {
+ BeamformerParametersHead parameters_head;
+ BeamformerUIParameters parameters_ui;
+ BeamformerParametersTail parameters_tail;
+ };
+ };
+
+ /* NOTE(rnp): signals to the beamformer that a subregion of a block has been updated */
+ u32 dirty_regions;
+ static_assert(BeamformerParameterBlockRegion_Count <= 32, "only 32 parameter block regions supported");
+
+ BeamformerComputePipeline pipeline;
+
+ alignas(16) i16 channel_mapping[BeamformerMaxChannelCount];
+ alignas(16) i16 sparse_elements[BeamformerMaxChannelCount];
+ /* NOTE(rnp): interleaved transmit angle, focal depth pairs */
+ alignas(16) v2 focal_vectors[BeamformerMaxChannelCount];
+} BeamformerParameterBlock;
+static_assert(sizeof(BeamformerParameterBlock) % alignof(BeamformerParameterBlock) == 0,
+ "sizeof(BeamformerParametersBlock) must be a multiple of its alignment");
+
+#define X(k, field) [BeamformerParameterBlockRegion_##k] = offsetof(BeamformerParameterBlock, field),
+read_only global u16 BeamformerParameterBlockRegionOffsets[BeamformerParameterBlockRegion_Count] = {
+ BEAMFORMER_PARAMETER_BLOCK_REGION_LIST
+};
+#undef X
+
+typedef struct {
+ u32 version;
+
+ /* NOTE(rnp): causes future library calls to fail.
+ * see note in beamformer_invalidate_shared_memory() */
+ b32 invalid;
+
+ /* NOTE(rnp): not used for locking on w32 but we can use these to peek at the status of
+ * the lock without leaving userspace. */
+ i32 locks[BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots];
+
+ /* NOTE(rnp): total number of parameter block regions the client has requested.
+ * used to calculate offset to scratch space and to track number of allocated
+ * semaphores on w32. Defaults to 1 but can be changed at runtime */
+ u32 reserved_parameter_blocks;
+
+ /* TODO(rnp): this is really sucky. we need a better way to communicate this */
+ u32 scratch_rf_size;
+
+ BeamformerLiveImagingParameters live_imaging_parameters;
+ BeamformerLiveImagingDirtyFlags live_imaging_dirty_flags;
+
+ BeamformWorkQueue external_work_queue;
+} BeamformerSharedMemory;
+
+function BeamformWork *
+beamform_work_queue_pop(BeamformWorkQueue *q)
+{
+ BeamformWork *result = 0;
+
+ static_assert(ISPOWEROF2(countof(q->work_items)), "queue capacity must be a power of 2");
+ u64 val = atomic_load_u64(&q->queue);
+ u64 mask = countof(q->work_items) - 1;
+ u64 widx = val & mask;
+ u64 ridx = val >> 32 & mask;
+
+ if (ridx != widx)
+ result = q->work_items + ridx;
+
+ return result;
+}
+
+function void
+beamform_work_queue_pop_commit(BeamformWorkQueue *q)
+{
+ atomic_add_u64(&q->queue, 0x100000000ULL);
+}
+
+function BeamformWork *
+beamform_work_queue_push(BeamformWorkQueue *q)
+{
+ BeamformWork *result = 0;
+
+ static_assert(ISPOWEROF2(countof(q->work_items)), "queue capacity must be a power of 2");
+ u64 val = atomic_load_u64(&q->queue);
+ u64 mask = countof(q->work_items) - 1;
+ u64 widx = val & mask;
+ u64 ridx = val >> 32 & mask;
+ u64 next = (widx + 1) & mask;
+
+ if (val & 0x80000000)
+ atomic_and_u64(&q->queue, ~0x80000000);
+
+ if (next != ridx) {
+ result = q->work_items + widx;
+ zero_struct(result);
+ }
+
+ return result;
+}
+
+function void
+beamform_work_queue_push_commit(BeamformWorkQueue *q)
+{
+ atomic_add_u64(&q->queue, 1);
+}
+
+function BeamformerParameterBlock *
+beamformer_parameter_block(BeamformerSharedMemory *sm, u32 block)
+{
+ assert(sm->reserved_parameter_blocks >= block);
+ BeamformerParameterBlock *result = (typeof(result))((u8 *)(sm + 1) + block * sizeof(*result));
+ return result;
+}
+
+function BeamformerParameterBlock *
+beamformer_parameter_block_lock(SharedMemoryRegion *sm, u32 block, i32 timeout_ms)
+{
+ assert(block < BeamformerMaxParameterBlockSlots);
+ BeamformerSharedMemory *b = sm->region;
+ BeamformerParameterBlock *result = 0;
+ if (os_shared_memory_region_lock(sm, b->locks, BeamformerSharedMemoryLockKind_Count + (i32)block, (u32)timeout_ms))
+ result = beamformer_parameter_block(sm->region, block);
+ return result;
+}
+
+function void
+beamformer_parameter_block_unlock(SharedMemoryRegion *sm, u32 block)
+{
+ assert(block < BeamformerMaxParameterBlockSlots);
+ BeamformerSharedMemory *b = sm->region;
+ os_shared_memory_region_unlock(sm, b->locks, BeamformerSharedMemoryLockKind_Count + (i32)block);
+}
+
+function Arena
+beamformer_shared_memory_scratch_arena(BeamformerSharedMemory *sm)
+{
+ assert(sm->reserved_parameter_blocks > 0);
+ BeamformerParameterBlock *last = beamformer_parameter_block(sm, sm->reserved_parameter_blocks);
+ Arena result = {.beg = (u8 *)(last + 1), .end = (u8 *)sm + BEAMFORMER_SHARED_MEMORY_SIZE};
+ result.beg = arena_aligned_start(result, KB(4));
+ return result;
+}
+
+function void
+mark_parameter_block_region_dirty(BeamformerSharedMemory *sm, u32 block, BeamformerParameterBlockRegions region)
+{
+ BeamformerParameterBlock *pb = beamformer_parameter_block(sm, block);
+ atomic_or_u32(&pb->dirty_regions, 1 << region);
+}
+
+function void
+mark_parameter_block_region_clean(BeamformerSharedMemory *sm, u32 block, BeamformerParameterBlockRegions region)
+{
+ BeamformerParameterBlock *pb = beamformer_parameter_block(sm, block);
+ atomic_and_u32(&pb->dirty_regions, ~(1 << region));
+}
+
+function void
+post_sync_barrier(SharedMemoryRegion *sm, BeamformerSharedMemoryLockKind lock, i32 *locks)
+{
+ /* NOTE(rnp): debug: here it is not a bug to release the lock if it
+ * isn't held but elswhere it is */
+ DEBUG_DECL(if (locks[lock])) {
+ os_shared_memory_region_unlock(sm, locks, (i32)lock);
+ }
+}
diff --git a/beamformer_work_queue.c b/beamformer_work_queue.c
@@ -1,81 +0,0 @@
-/* See LICENSE for license details. */
-#include "beamformer_work_queue.h"
-
-function BeamformWork *
-beamform_work_queue_pop(BeamformWorkQueue *q)
-{
- BeamformWork *result = 0;
-
- static_assert(ISPOWEROF2(countof(q->work_items)), "queue capacity must be a power of 2");
- u64 val = atomic_load_u64(&q->queue);
- u64 mask = countof(q->work_items) - 1;
- u64 widx = val & mask;
- u64 ridx = val >> 32 & mask;
-
- if (ridx != widx)
- result = q->work_items + ridx;
-
- return result;
-}
-
-function void
-beamform_work_queue_pop_commit(BeamformWorkQueue *q)
-{
- atomic_add_u64(&q->queue, 0x100000000ULL);
-}
-
-DEBUG_EXPORT BEAMFORM_WORK_QUEUE_PUSH_FN(beamform_work_queue_push)
-{
- BeamformWork *result = 0;
-
- static_assert(ISPOWEROF2(countof(q->work_items)), "queue capacity must be a power of 2");
- u64 val = atomic_load_u64(&q->queue);
- u64 mask = countof(q->work_items) - 1;
- u64 widx = val & mask;
- u64 ridx = val >> 32 & mask;
- u64 next = (widx + 1) & mask;
-
- if (val & 0x80000000)
- atomic_and_u64(&q->queue, ~0x80000000);
-
- if (next != ridx) {
- result = q->work_items + widx;
- zero_struct(result);
- }
-
- return result;
-}
-
-DEBUG_EXPORT BEAMFORM_WORK_QUEUE_PUSH_COMMIT_FN(beamform_work_queue_push_commit)
-{
- atomic_add_u64(&q->queue, 1);
-}
-
-function void
-mark_shared_memory_region_dirty(BeamformerSharedMemory *sm, i32 index)
-{
- atomic_or_u32(&sm->dirty_regions, (1 << (index - 1)));
-}
-
-function void
-mark_shared_memory_region_clean(BeamformerSharedMemory *sm, i32 index)
-{
- atomic_and_u32(&sm->dirty_regions, ~(1 << (index - 1)));
-}
-
-function b32
-is_shared_memory_region_dirty(BeamformerSharedMemory *sm, i32 index)
-{
- b32 result = (atomic_load_u32(&sm->dirty_regions) & (1 << (index - 1))) != 0;
- return result;
-}
-
-function void
-post_sync_barrier(SharedMemoryRegion *sm, BeamformerSharedMemoryLockKind lock, i32 *locks)
-{
- /* NOTE(rnp): debug: here it is not a bug to release the lock if it
- * isn't held but elswhere it is */
- DEBUG_DECL(if (locks[lock])) {
- os_shared_memory_region_unlock(sm, locks, (i32)lock);
- }
-}
diff --git a/beamformer_work_queue.h b/beamformer_work_queue.h
@@ -1,159 +0,0 @@
-/* See LICENSE for license details. */
-#ifndef _BEAMFORMER_WORK_QUEUE_H_
-#define _BEAMFORMER_WORK_QUEUE_H_
-
-#define BEAMFORMER_SHARED_MEMORY_VERSION (11UL)
-
-typedef struct BeamformerFrame BeamformerFrame;
-typedef struct ShaderReloadContext ShaderReloadContext;
-
-typedef enum {
- BeamformerWorkKind_Compute,
- BeamformerWorkKind_ComputeIndirect,
- BeamformerWorkKind_CreateFilter,
- BeamformerWorkKind_ReloadShader,
- BeamformerWorkKind_ExportBuffer,
- BeamformerWorkKind_UploadBuffer,
-} BeamformerWorkKind;
-
-typedef enum {
- BeamformerUploadKind_ChannelMapping,
- BeamformerUploadKind_FocalVectors,
- BeamformerUploadKind_SparseElements,
-} BeamformerUploadKind;
-
-typedef struct {
- BeamformerUploadKind kind;
- u32 size;
- i32 shared_memory_offset;
-} BeamformerUploadContext;
-
-typedef struct {
- BeamformerFilterKind kind;
- union {
- struct {
- f32 beta;
- f32 cutoff_frequency;
- };
- f32 xdc_center_frequency;
- };
- i16 length;
- i16 slot;
-} BeamformerCreateFilterContext;
-
-typedef enum {
- BeamformerExportKind_BeamformedData,
- BeamformerExportKind_Stats,
-} BeamformerExportKind;
-
-typedef struct {
- BeamformerExportKind kind;
- u32 size;
-} BeamformerExportContext;
-
-typedef union {
- u8 filter_slot;
-} BeamformerShaderParameters;
-
-#define BEAMFORMER_SHARED_MEMORY_LOCKS \
- X(None) \
- X(ComputePipeline) \
- X(ChannelMapping) \
- X(FocalVectors) \
- X(Parameters) \
- X(ScratchSpace) \
- X(SparseElements) \
- X(UploadRF) \
- X(ExportSync) \
- X(DispatchCompute)
-
-#define X(name) BeamformerSharedMemoryLockKind_##name,
-typedef enum {BEAMFORMER_SHARED_MEMORY_LOCKS BeamformerSharedMemoryLockKind_Count} BeamformerSharedMemoryLockKind;
-#undef X
-
-/* NOTE: discriminated union based on type */
-typedef struct {
- BeamformerWorkKind kind;
- BeamformerSharedMemoryLockKind lock;
- union {
- BeamformerFrame *frame;
- BeamformerCreateFilterContext create_filter_context;
- BeamformerExportContext export_context;
- BeamformerUploadContext upload_context;
- BeamformerViewPlaneTag compute_indirect_plane;
- ShaderReloadContext *shader_reload_context;
- void *generic;
- };
-} BeamformWork;
-
-typedef struct {
- union {
- u64 queue;
- struct {u32 widx, ridx;};
- };
- BeamformWork work_items[1 << 6];
-} BeamformWorkQueue;
-
-#define BEAMFORM_WORK_QUEUE_PUSH_FN(name) BeamformWork *name(BeamformWorkQueue *q)
-typedef BEAMFORM_WORK_QUEUE_PUSH_FN(beamform_work_queue_push_fn);
-
-#define BEAMFORM_WORK_QUEUE_PUSH_COMMIT_FN(name) void name(BeamformWorkQueue *q)
-typedef BEAMFORM_WORK_QUEUE_PUSH_COMMIT_FN(beamform_work_queue_push_commit_fn);
-
-#define BEAMFORMER_SHARED_MEMORY_SIZE (GB(2))
-#define BEAMFORMER_SCRATCH_OFF (sizeof(BeamformerSharedMemory) + 4096ULL \
- - (uintptr_t)(sizeof(BeamformerSharedMemory) & 4095ULL))
-#define BEAMFORMER_SCRATCH_SIZE (BEAMFORMER_SHARED_MEMORY_SIZE - BEAMFORMER_SCRATCH_OFF)
-#define BEAMFORMER_MAX_RF_DATA_SIZE (BEAMFORMER_SCRATCH_SIZE)
-
-#define X(name, id) BeamformerLiveImagingDirtyFlags_##name = (1 << id),
-typedef enum {BEAMFORMER_LIVE_IMAGING_DIRTY_FLAG_LIST} BeamformerLiveImagingDirtyFlags;
-#undef X
-
-typedef struct {
- u32 version;
-
- /* NOTE(rnp): causes future library calls to fail.
- * see note in beamformer_invalidate_shared_memory() */
- b32 invalid;
-
- /* NOTE(rnp): not used for locking on w32 but we can use these to peek at the status of
- * the lock without leaving userspace. also this struct needs a bunch of padding */
- i32 locks[BeamformerSharedMemoryLockKind_Count];
-
- /* NOTE(rnp): used to coalesce uploads when they are not yet uploaded to the GPU */
- u32 dirty_regions;
- static_assert(BeamformerSharedMemoryLockKind_Count <= 32, "only 32 lock regions supported");
-
- /* NOTE(rnp): interleaved transmit angle, focal depth pairs */
- align_as(64) v2 focal_vectors[256];
-
- i16 channel_mapping[256];
- i16 sparse_elements[256];
-
- union {
- BeamformerParameters parameters;
- struct {
- BeamformerParametersHead parameters_head;
- BeamformerUIParameters parameters_ui;
- BeamformerParametersTail parameters_tail;
- };
- };
-
- //////////////////////////
- // Pipeline Configuration
- BeamformerShaderKind shaders[MAX_COMPUTE_SHADER_STAGES];
- BeamformerShaderParameters shader_parameters[MAX_COMPUTE_SHADER_STAGES];
- i32 shader_count;
- BeamformerDataKind data_kind;
-
- /* TODO(rnp): this is really sucky. we need a better way to communicate this */
- u32 scratch_rf_size;
-
- BeamformerLiveImagingParameters live_imaging_parameters;
- BeamformerLiveImagingDirtyFlags live_imaging_dirty_flags;
-
- BeamformWorkQueue external_work_queue;
-} BeamformerSharedMemory;
-
-#endif /* _BEAMFORMER_WORK_QUEUE_H_ */
diff --git a/helpers/ogl_beamformer_lib.c b/helpers/ogl_beamformer_lib.c
@@ -16,7 +16,7 @@ W32(iptr) OpenFileMappingA(u32, b32, c8 *);
#error Unsupported Platform
#endif
-#include "../beamformer_work_queue.c"
+#include "../beamformer_shared_memory.c"
global struct {
SharedMemoryRegion shared_memory;
@@ -27,6 +27,13 @@ global struct {
#if OS_LINUX
+function b32
+os_reserve_region_locks(iptr os_context, u32 count)
+{
+ b32 result = count <= BeamformerMaxParameterBlockSlots;
+ return result;
+}
+
function SharedMemoryRegion
os_open_shared_memory_area(char *name)
{
@@ -42,31 +49,52 @@ os_open_shared_memory_area(char *name)
#elif OS_WINDOWS
+function b32
+os_reserve_region_locks(iptr os_context, u32 count)
+{
+ local_persist iptr semaphores[BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots];
+ w32_shared_memory_context *ctx = (typeof(ctx))os_context;
+
+ b32 result = count <= BeamformerMaxParameterBlockSlots;
+ if (result) {
+ count += BeamformerSharedMemoryLockKind_Count;
+ if (count > ctx->reserved_count) {
+ u8 buffer[1024];
+ Stream sb = {.data = buffer, .cap = countof(buffer)};
+ stream_append_s8(&sb, s8(OS_SHARED_MEMORY_NAME "_lock_"));
+
+ for (u32 i = ctx->reserved_count; i < count; i++) {
+ Stream lb = sb;
+ stream_append_u64(&lb, i);
+ stream_append_byte(&lb, 0);
+ semaphores[i] = CreateSemaphoreA(0, 1, 1, (c8 *)lb.data);
+ result &= semaphores[i] != INVALID_FILE;
+ }
+
+ if (result) {
+ ctx->semaphores = semaphores;
+ ctx->reserved_count = count;
+ }
+ } else if (count < ctx->reserved_count) {
+ for (u32 i = ctx->reserved_count; i >= count;)
+ CloseHandle(semaphores[--i]);
+ ctx->reserved_count = count;
+ }
+ }
+ return result;
+}
+
function SharedMemoryRegion
os_open_shared_memory_area(char *name)
{
+ local_persist w32_shared_memory_context ctx = {0};
SharedMemoryRegion result = {0};
iptr h = OpenFileMappingA(FILE_MAP_ALL_ACCESS, 0, name);
if (h != INVALID_FILE) {
void *new = MapViewOfFile(h, FILE_MAP_ALL_ACCESS, 0, 0, BEAMFORMER_SHARED_MEMORY_SIZE);
- if (new) {
- u8 buffer[1024];
- Stream sb = {.data = buffer, .cap = 1024};
- stream_append_s8s(&sb, c_str_to_s8(name), s8("_lock_"));
- local_persist iptr semaphores[BeamformerSharedMemoryLockKind_Count];
- local_persist w32_shared_memory_context ctx = {.semaphores = semaphores};
- b32 all_semaphores = 1;
- for (i32 i = 0; i < countof(semaphores); i++) {
- Stream lb = sb;
- stream_append_i64(&lb, i);
- stream_append_byte(&lb, 0);
- semaphores[i] = CreateSemaphoreA(0, 1, 1, (c8 *)lb.data);
- all_semaphores &= semaphores[i] != INVALID_FILE;
- }
- if (all_semaphores) {
- result.region = new;
- result.os_context = (iptr)&ctx;
- }
+ if (new && os_reserve_region_locks((iptr)&ctx, 1)) {
+ result.region = new;
+ result.os_context = (iptr)&ctx;
}
CloseHandle(h);
}
@@ -76,26 +104,39 @@ os_open_shared_memory_area(char *name)
#endif
function b32
+lib_error_check(b32 condition, BeamformerLibErrorKind error_kind)
+{
+ b32 result = condition;
+ if (!result) g_beamformer_library_context.last_error = error_kind;
+ return result;
+}
+
+function b32
check_shared_memory(void)
{
- b32 result = 1;
if (!g_beamformer_library_context.shared_memory.region) {
g_beamformer_library_context.shared_memory = os_open_shared_memory_area(OS_SHARED_MEMORY_NAME);
- if (!g_beamformer_library_context.shared_memory.region) {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_SHARED_MEMORY;
- result = 0;
- } else if (((BeamformerSharedMemory *)g_beamformer_library_context.shared_memory.region)->version !=
- BEAMFORMER_SHARED_MEMORY_VERSION)
- {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_VERSION_MISMATCH;
- result = 0;
+ if (lib_error_check(g_beamformer_library_context.shared_memory.region != 0, BF_LIB_ERR_KIND_SHARED_MEMORY)) {
+ u32 version = ((BeamformerSharedMemory *)g_beamformer_library_context.shared_memory.region)->version;
+ if (lib_error_check(version == BEAMFORMER_SHARED_MEMORY_VERSION, BF_LIB_ERR_KIND_VERSION_MISMATCH))
+ g_beamformer_library_context.bp = g_beamformer_library_context.shared_memory.region;
}
}
- if (result && ((BeamformerSharedMemory *)g_beamformer_library_context.shared_memory.region)->invalid) {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_ACCESS;
- result = 0;
+
+ b32 result = 0;
+ if (g_beamformer_library_context.bp)
+ result = lib_error_check(!g_beamformer_library_context.bp->invalid, BF_LIB_ERR_KIND_INVALID_ACCESS);
+ return result;
+}
+
+function b32
+valid_parameter_block(u32 block)
+{
+ b32 result = check_shared_memory();
+ if (result) {
+ result = lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
+ BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED);
}
- if (result) g_beamformer_library_context.bp = g_beamformer_library_context.shared_memory.region;
return result;
}
@@ -103,22 +144,22 @@ function BeamformWork *
try_push_work_queue(void)
{
BeamformWork *result = beamform_work_queue_push(&g_beamformer_library_context.bp->external_work_queue);
- if (!result) g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_WORK_QUEUE_FULL;
+ lib_error_check(result != 0, BF_LIB_ERR_KIND_WORK_QUEUE_FULL);
return result;
}
function b32
-lib_try_lock(BeamformerSharedMemoryLockKind lock, i32 timeout_ms)
+lib_try_lock(i32 lock, i32 timeout_ms)
{
b32 result = os_shared_memory_region_lock(&g_beamformer_library_context.shared_memory,
g_beamformer_library_context.bp->locks,
- (i32)lock, (u32)timeout_ms);
- if (!result) g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_SYNC_VARIABLE;
+ lock, (u32)timeout_ms);
+ lib_error_check(result, BF_LIB_ERR_KIND_SYNC_VARIABLE);
return result;
}
function void
-lib_release_lock(BeamformerSharedMemoryLockKind lock)
+lib_release_lock(i32 lock)
{
os_shared_memory_region_unlock(&g_beamformer_library_context.shared_memory,
g_beamformer_library_context.bp->locks, (i32)lock);
@@ -154,21 +195,36 @@ beamformer_get_last_error_string(void)
b32
beamformer_set_global_timeout(i32 timeout_ms)
{
- b32 result = timeout_ms >= -1;
- if (result) {
- g_beamformer_library_context.timeout_ms = timeout_ms;
- } else {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_TIMEOUT;
+ b32 result = lib_error_check(timeout_ms >= -1, BF_LIB_ERR_KIND_INVALID_TIMEOUT);
+ if (result) g_beamformer_library_context.timeout_ms = timeout_ms;
+ return result;
+}
+
+b32
+beamformer_reserve_parameter_blocks(uint32_t count)
+{
+ b32 result = 0;
+ if (check_shared_memory() &&
+ lib_error_check(os_reserve_region_locks(g_beamformer_library_context.shared_memory.os_context, count),
+ BF_LIB_ERR_KIND_PARAMETER_BLOCK_OVERFLOW))
+ {
+ u32 old_count = g_beamformer_library_context.bp->reserved_parameter_blocks;
+ g_beamformer_library_context.bp->reserved_parameter_blocks = count;
+ for (u32 i = old_count; i < count; i++)
+ zero_struct(beamformer_parameter_block(g_beamformer_library_context.bp, i));
+ result = 1;
}
return result;
}
function b32
-validate_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data_kind)
+validate_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
{
- b32 result = shader_count <= countof(g_beamformer_library_context.bp->shaders);
- if (result) {
- for (i32 i = 0; i < shader_count; i++)
+ b32 result = 1;
+ if (lib_error_check(shader_count <= BeamformerMaxComputeShaderStages,
+ BF_LIB_ERR_KIND_COMPUTE_STAGE_OVERFLOW))
+ {
+ for (u32 i = 0; i < shader_count; i++)
result &= BETWEEN(shaders[i], 0, BeamformerShaderKind_ComputeCount);
if (!result) {
g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_COMPUTE_STAGE;
@@ -183,41 +239,69 @@ validate_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data_kind)
g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_DEMOD_DATA_KIND;
result = 0;
}
- } else {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_COMPUTE_STAGE_OVERFLOW;
}
return result;
}
-b32
-beamformer_set_pipeline_stage_parameters(i32 stage_index, i32 parameter)
+function b32
+parameter_block_region_upload_explicit(void *data, u32 size, u32 block, BeamformerParameterBlockRegions region_id,
+ u32 block_offset, i32 timeout_ms)
{
- b32 result = 0;
- BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_ComputePipeline;
- if (check_shared_memory() && g_beamformer_library_context.bp->shader_count != 0 &&
- lib_try_lock(lock, g_beamformer_library_context.timeout_ms))
- {
- stage_index %= (i32)g_beamformer_library_context.bp->shader_count;
- g_beamformer_library_context.bp->shader_parameters[stage_index].filter_slot = (u8)parameter;
- atomic_or_u32(&g_beamformer_library_context.bp->dirty_regions, 1 << (lock - 1));
+ i32 lock = BeamformerSharedMemoryLockKind_Count + (i32)block;
+ b32 result = valid_parameter_block(block) && lib_try_lock(lock, timeout_ms);
+ if (result) {
+ mem_copy((u8 *)beamformer_parameter_block(g_beamformer_library_context.bp, block) + block_offset,
+ data, size);
+ mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block, region_id);
lib_release_lock(lock);
- result = 1;
}
return result;
}
+
+function b32
+parameter_block_region_upload(void *data, u32 size, u32 block,
+ BeamformerParameterBlockRegions region_id, i32 timeout_ms)
+{
+ assert(region_id < BeamformerParameterBlockRegion_Count);
+ b32 result = parameter_block_region_upload_explicit(data, size, block, region_id,
+ BeamformerParameterBlockRegionOffsets[region_id],
+ timeout_ms);
+ return result;
+}
+
b32
-beamformer_push_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data_kind)
+beamformer_set_pipeline_stage_parameters_at(u32 stage_index, i32 parameter, u32 block)
+{
+ u32 offset = BeamformerParameterBlockRegionOffsets[BeamformerParameterBlockRegion_ComputePipeline];
+ offset += offsetof(BeamformerComputePipeline, parameters);
+ offset += (stage_index % BeamformerMaxComputeShaderStages) * sizeof(BeamformerShaderParameters);
+ b32 result = parameter_block_region_upload_explicit(¶meter, sizeof(BeamformerShaderParameters), block,
+ BeamformerParameterBlockRegion_ComputePipeline, offset,
+ g_beamformer_library_context.timeout_ms);
+ return result;
+}
+
+b32
+beamformer_set_pipeline_stage_parameters(u32 stage_index, i32 parameter)
+{
+ b32 result = beamformer_set_pipeline_stage_parameters_at(stage_index, parameter, 0);
+ return result;
+}
+
+b32
+beamformer_push_pipeline_at(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind, u32 block)
{
b32 result = 0;
- if (validate_pipeline(shaders, shader_count, data_kind) && check_shared_memory()) {
- BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_ComputePipeline;
- if (lib_try_lock(lock, g_beamformer_library_context.timeout_ms)) {
- g_beamformer_library_context.bp->shader_count = shader_count;
- g_beamformer_library_context.bp->data_kind = data_kind;
- for (i32 i = 0; i < shader_count; i++)
- g_beamformer_library_context.bp->shaders[i] = (BeamformerShaderKind)shaders[i];
- atomic_or_u32(&g_beamformer_library_context.bp->dirty_regions, 1 << (lock - 1));
+ if (validate_pipeline(shaders, shader_count, data_kind)) {
+ i32 lock = BeamformerSharedMemoryLockKind_Count + (i32)block;
+ if (valid_parameter_block(block) && lib_try_lock(lock, g_beamformer_library_context.timeout_ms)) {
+ BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, block);
+ mem_copy(&b->pipeline.shaders, shaders, shader_count * sizeof(*shaders));
+ mark_parameter_block_region_dirty(g_beamformer_library_context.bp, block,
+ BeamformerParameterBlockRegion_ComputePipeline);
+ b->pipeline.shader_count = shader_count;
+ b->pipeline.data_kind = data_kind;
lib_release_lock(lock);
result = 1;
}
@@ -226,26 +310,46 @@ beamformer_push_pipeline(i32 *shaders, i32 shader_count, BeamformerDataKind data
}
b32
-beamformer_create_kaiser_low_pass_filter(f32 beta, f32 cutoff_frequency, i16 length, u8 slot)
+beamformer_push_pipeline(i32 *shaders, u32 shader_count, BeamformerDataKind data_kind)
+{
+ b32 result = beamformer_push_pipeline_at(shaders, shader_count, data_kind, 0);
+ return result;
+}
+
+function b32
+beamformer_create_filter(BeamformerFilterKind kind, BeamformerFilterParameters params, u8 filter_slot, u8 parameter_block)
{
b32 result = 0;
if (check_shared_memory()) {
BeamformWork *work = try_push_work_queue();
- result = work != 0;
- if (result) {
+ if (work) {
BeamformerCreateFilterContext *ctx = &work->create_filter_context;
- work->kind = BeamformerWorkKind_CreateFilter;
- ctx->kind = BeamformerFilterKind_Kaiser;
- ctx->cutoff_frequency = cutoff_frequency;
- ctx->beta = beta;
- ctx->length = length;
- ctx->slot = slot % BEAMFORMER_FILTER_SLOTS;
+ work->kind = BeamformerWorkKind_CreateFilter;
+ ctx->kind = kind;
+ ctx->parameters = params;
+ ctx->filter_slot = filter_slot % BeamformerFilterSlots;
+ ctx->parameter_block = parameter_block % BeamformerMaxParameterBlockSlots;
beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
+ result = 1;
}
}
return result;
}
+b32
+beamformer_create_kaiser_low_pass_filter(f32 beta, f32 cutoff_frequency, f32 sampling_frequency,
+ i16 length, u8 filter_slot, u8 parameter_block)
+{
+ BeamformerFilterParameters params = {
+ .beta = beta,
+ .cutoff_frequency = cutoff_frequency,
+ .sampling_frequency = sampling_frequency,
+ .length = length,
+ };
+ b32 result = beamformer_create_filter(BeamformerFilterKind_Kaiser, params, filter_slot, parameter_block);
+ return result;
+}
+
function b32
beamformer_flush_commands(i32 timeout_ms)
{
@@ -254,22 +358,22 @@ beamformer_flush_commands(i32 timeout_ms)
}
function b32
-beamformer_compute_indirect(BeamformerViewPlaneTag tag)
+beamformer_compute_indirect(BeamformerViewPlaneTag tag, u32 block)
{
- b32 result = check_shared_memory();
- if (result) {
- result = tag < BeamformerViewPlaneTag_Count;
- if (result) {
- BeamformWork *work = try_push_work_queue();
- result = work != 0;
- if (result) {
- work->kind = BeamformerWorkKind_ComputeIndirect;
- work->compute_indirect_plane = tag;
- beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
- beamformer_flush_commands(0);
- }
- } else {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_INVALID_IMAGE_PLANE;
+ b32 result = 0;
+ if (check_shared_memory() &&
+ lib_error_check(tag < BeamformerViewPlaneTag_Count, BF_LIB_ERR_KIND_INVALID_IMAGE_PLANE) &&
+ lib_error_check(block < g_beamformer_library_context.bp->reserved_parameter_blocks,
+ BF_LIB_ERR_KIND_PARAMETER_BLOCK_UNALLOCATED))
+ {
+ BeamformWork *work = try_push_work_queue();
+ if (work) {
+ work->kind = BeamformerWorkKind_ComputeIndirect;
+ work->compute_indirect_context.view_plane = tag;
+ work->compute_indirect_context.parameter_block = block;
+ beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
+ beamformer_flush_commands(0);
+ result = 1;
}
}
return result;
@@ -278,7 +382,7 @@ beamformer_compute_indirect(BeamformerViewPlaneTag tag)
b32
beamformer_start_compute(void)
{
- b32 result = beamformer_compute_indirect(0);
+ b32 result = beamformer_compute_indirect(0, 0);
return result;
}
@@ -292,78 +396,49 @@ beamformer_wait_for_compute_dispatch(i32 timeout_ms)
return result;
}
-function b32
-locked_region_upload(void *region, void *data, u32 size, BeamformerSharedMemoryLockKind lock,
- b32 *dirty, i32 timeout_ms)
-{
- b32 result = lib_try_lock(lock, timeout_ms);
- if (result) {
- if (dirty) *dirty = is_shared_memory_region_dirty(g_beamformer_library_context.bp, (i32)lock);
- mem_copy(region, data, size);
- mark_shared_memory_region_dirty(g_beamformer_library_context.bp, (i32)lock);
- lib_release_lock(lock);
- }
- return result;
-}
-
-function b32
-beamformer_upload_buffer(void *data, u32 size, i32 store_offset, BeamformerUploadKind kind,
- BeamformerSharedMemoryLockKind lock, i32 timeout_ms)
-{
- b32 result = 0;
- if (check_shared_memory()) {
- BeamformWork *work = try_push_work_queue();
- b32 dirty = 0;
- result = work && locked_region_upload((u8 *)g_beamformer_library_context.bp + store_offset,
- data, size, lock, &dirty, timeout_ms);
- if (result && !dirty) {
- work->upload_context.shared_memory_offset = store_offset;
- work->upload_context.kind = kind;
- work->upload_context.size = size;
- work->kind = BeamformerWorkKind_UploadBuffer;
- work->lock = lock;
- beamform_work_queue_push_commit(&g_beamformer_library_context.bp->external_work_queue);
- }
- }
- return result;
-}
-
#define BEAMFORMER_UPLOAD_FNS \
X(channel_mapping, i16, 1, ChannelMapping) \
X(sparse_elements, i16, 1, SparseElements) \
X(focal_vectors, f32, 2, FocalVectors)
-#define X(name, dtype, elements, lock_name) \
-b32 beamformer_push_##name (dtype *data, u32 count) { \
+#define X(name, dtype, elements, region_name) \
+b32 beamformer_push_##name ##_at(dtype *data, u32 count, u32 block) { \
b32 result = 0; \
- if (count <= countof(g_beamformer_library_context.bp->name)) { \
- result = beamformer_upload_buffer(data, count * elements * sizeof(dtype), \
- offsetof(BeamformerSharedMemory, name), \
- BeamformerUploadKind_##lock_name, \
- BeamformerSharedMemoryLockKind_##lock_name, \
- g_beamformer_library_context.timeout_ms); \
- } else { \
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_BUFFER_OVERFLOW; \
+ if (lib_error_check(count <= countof(((BeamformerParameterBlock *)0)->name), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) { \
+ result = parameter_block_region_upload(data, count * elements * sizeof(dtype), block, \
+ BeamformerParameterBlockRegion_##region_name, \
+ g_beamformer_library_context.timeout_ms); \
} \
return result; \
}
BEAMFORMER_UPLOAD_FNS
#undef X
+#define X(name, dtype, ...) \
+b32 beamformer_push_##name (dtype *data, u32 count) { \
+ b32 result = beamformer_push_##name ##_at(data, count, 0); \
+ return result; \
+}
+BEAMFORMER_UPLOAD_FNS
+#undef X
+
function b32
beamformer_push_data_base(void *data, u32 data_size, i32 timeout_ms)
{
b32 result = 0;
- if (data_size <= BEAMFORMER_MAX_RF_DATA_SIZE) {
- if (lib_try_lock(BeamformerSharedMemoryLockKind_UploadRF, timeout_ms)) {
- result = locked_region_upload((u8 *)g_beamformer_library_context.bp + BEAMFORMER_SCRATCH_OFF,
- data, data_size, BeamformerSharedMemoryLockKind_ScratchSpace,
- 0, 0);
- /* TODO(rnp): need a better way to communicate this */
- if (result) g_beamformer_library_context.bp->scratch_rf_size = data_size;
+ if (check_shared_memory()) {
+ Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
+ if (lib_error_check(data_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_BUFFER_OVERFLOW)) {
+ if (lib_try_lock(BeamformerSharedMemoryLockKind_UploadRF, timeout_ms)) {
+ if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
+ mem_copy(scratch.beg, data, data_size);
+ /* TODO(rnp): need a better way to communicate this */
+ g_beamformer_library_context.bp->scratch_rf_size = data_size;
+ lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
+ result = 1;
+ }
+ }
}
- } else {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_BUFFER_OVERFLOW;
}
return result;
}
@@ -375,49 +450,44 @@ beamformer_push_data(void *data, u32 data_size)
}
b32
-beamformer_push_data_with_compute(void *data, u32 data_size, u32 image_plane_tag)
+beamformer_push_data_with_compute(void *data, u32 data_size, u32 image_plane_tag, u32 parameter_slot)
{
b32 result = beamformer_push_data_base(data, data_size, g_beamformer_library_context.timeout_ms);
- if (result) result = beamformer_compute_indirect(image_plane_tag);
+ if (result) result = beamformer_compute_indirect(image_plane_tag, parameter_slot);
+ return result;
+}
+
+b32
+beamformer_push_parameters_at(BeamformerParameters *bp, u32 block)
+{
+ b32 result = parameter_block_region_upload(bp, sizeof(*bp), block,
+ BeamformerParameterBlockRegion_Parameters,
+ g_beamformer_library_context.timeout_ms);
return result;
}
b32
beamformer_push_parameters(BeamformerParameters *bp)
{
- b32 result = 0;
- if (check_shared_memory()) {
- result = locked_region_upload((u8 *)g_beamformer_library_context.bp +
- offsetof(BeamformerSharedMemory, parameters),
- bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, 0,
- g_beamformer_library_context.timeout_ms);
- }
+ b32 result = beamformer_push_parameters_at(bp, 0);
return result;
}
b32
beamformer_push_parameters_ui(BeamformerUIParameters *bp)
{
- b32 result = 0;
- if (check_shared_memory()) {
- result = locked_region_upload((u8 *)g_beamformer_library_context.bp +
- offsetof(BeamformerSharedMemory, parameters_ui),
- bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, 0,
- g_beamformer_library_context.timeout_ms);
- }
+ b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
+ offsetof(BeamformerParameterBlock, parameters_ui),
+ g_beamformer_library_context.timeout_ms);
return result;
}
b32
beamformer_push_parameters_head(BeamformerParametersHead *bp)
{
- b32 result = 0;
- if (check_shared_memory()) {
- result = locked_region_upload((u8 *)g_beamformer_library_context.bp +
- offsetof(BeamformerSharedMemory, parameters_head),
- bp, sizeof(*bp), BeamformerSharedMemoryLockKind_Parameters, 0,
- g_beamformer_library_context.timeout_ms);
- }
+ b32 result = parameter_block_region_upload_explicit(bp, sizeof(*bp), 0, BeamformerParameterBlockRegion_Parameters,
+ offsetof(BeamformerParameterBlock, parameters_head),
+ g_beamformer_library_context.timeout_ms);
return result;
}
@@ -436,16 +506,17 @@ beamformer_export_buffer(BeamformerExportContext export_context)
}
function b32
-beamformer_read_output(void *out, uz size, i32 timeout_ms)
+beamformer_read_output(void *out, iz size, i32 timeout_ms)
{
b32 result = 0;
if (lib_try_lock(BeamformerSharedMemoryLockKind_ExportSync, timeout_ms)) {
- lib_release_lock(BeamformerSharedMemoryLockKind_ExportSync);
if (lib_try_lock(BeamformerSharedMemoryLockKind_ScratchSpace, 0)) {
- mem_copy(out, (u8 *)g_beamformer_library_context.bp + BEAMFORMER_SCRATCH_OFF, size);
+ Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
+ mem_copy(out, scratch.beg, (uz)size);
lib_release_lock(BeamformerSharedMemoryLockKind_ScratchSpace);
result = 1;
}
+ lib_release_lock(BeamformerSharedMemoryLockKind_ExportSync);
}
return result;
}
@@ -459,12 +530,17 @@ beamform_data_synchronized(void *data, u32 data_size, i32 output_points[3], f32
output_points[1] = MAX(1, output_points[1]);
output_points[2] = MAX(1, output_points[2]);
- g_beamformer_library_context.bp->parameters.output_points[0] = output_points[0];
- g_beamformer_library_context.bp->parameters.output_points[1] = output_points[1];
- g_beamformer_library_context.bp->parameters.output_points[2] = output_points[2];
+ BeamformerParameterBlock *b = beamformer_parameter_block(g_beamformer_library_context.bp, 0);
+ b->parameters.output_points[0] = output_points[0];
+ b->parameters.output_points[1] = output_points[1];
+ b->parameters.output_points[2] = output_points[2];
+
+ iz output_size = output_points[0] * output_points[1] * output_points[2] * (i32)sizeof(f32) * 2;
- uz output_size = (u32)output_points[0] * (u32)output_points[1] * (u32)output_points[2] * sizeof(f32) * 2;
- if (output_size <= BEAMFORMER_SCRATCH_SIZE && beamformer_push_data_with_compute(data, data_size, 0)) {
+ Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
+ if (lib_error_check(output_size <= arena_capacity(&scratch, u8), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)
+ && beamformer_push_data_with_compute(data, data_size, 0, 0))
+ {
BeamformerExportContext export;
export.kind = BeamformerExportKind_BeamformedData;
export.size = (u32)output_size;
@@ -476,8 +552,6 @@ beamform_data_synchronized(void *data, u32 data_size, i32 output_points[3], f32
result = beamformer_read_output(out_data, output_size, timeout_ms);
}
- } else {
- g_beamformer_library_context.last_error = BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW;
}
}
return result;
@@ -486,14 +560,19 @@ beamform_data_synchronized(void *data, u32 data_size, i32 output_points[3], f32
b32
beamformer_compute_timings(BeamformerComputeStatsTable *output, i32 timeout_ms)
{
+ static_assert(sizeof(*output) <= BEAMFORMER_SHARED_MEMORY_MIN_SCRATCH_SIZE,
+ "timing table size exceeds scratch space");
+
b32 result = 0;
if (check_shared_memory()) {
- static_assert(sizeof(*output) <= BEAMFORMER_SCRATCH_SIZE, "timing table size exceeds scratch space");
- BeamformerExportContext export;
- export.kind = BeamformerExportKind_Stats;
- export.size = sizeof(*output);
- if (beamformer_export_buffer(export) && beamformer_flush_commands(0))
- result = beamformer_read_output(output, sizeof(*output), timeout_ms);
+ Arena scratch = beamformer_shared_memory_scratch_arena(g_beamformer_library_context.bp);
+ if (lib_error_check(arena_capacity(&scratch, u8) <= (iz)sizeof(*output), BF_LIB_ERR_KIND_EXPORT_SPACE_OVERFLOW)) {
+ BeamformerExportContext export;
+ export.kind = BeamformerExportKind_Stats;
+ export.size = sizeof(*output);
+ if (beamformer_export_buffer(export) && beamformer_flush_commands(0))
+ result = beamformer_read_output(output, sizeof(*output), timeout_ms);
+ }
}
return result;
}
diff --git a/helpers/ogl_beamformer_lib_base.h b/helpers/ogl_beamformer_lib_base.h
@@ -8,20 +8,22 @@
#endif
#define BEAMFORMER_LIB_ERRORS \
- X(NONE, 0, "None") \
- X(VERSION_MISMATCH, 1, "host-library version mismatch") \
- X(INVALID_ACCESS, 2, "library in invalid state") \
- X(COMPUTE_STAGE_OVERFLOW, 3, "compute stage overflow: maximum stages: " str(MAX_COMPUTE_SHADER_STAGES)) \
- X(INVALID_COMPUTE_STAGE, 4, "invalid compute shader stage") \
- X(INVALID_START_SHADER, 5, "starting shader not Decode or Demodulate") \
- X(INVALID_DEMOD_DATA_KIND, 6, "data kind for demodulation not Int16 or Float") \
- X(INVALID_IMAGE_PLANE, 7, "invalid image plane") \
- X(BUFFER_OVERFLOW, 8, "passed buffer size exceeds available space") \
- X(WORK_QUEUE_FULL, 9, "work queue full") \
- X(EXPORT_SPACE_OVERFLOW, 10, "not enough space for data export") \
- X(SHARED_MEMORY, 11, "failed to open shared memory region") \
- X(SYNC_VARIABLE, 12, "failed to acquire lock within timeout period") \
- X(INVALID_TIMEOUT, 13, "invalid timeout value")
+ X(NONE, 0, "None") \
+ X(VERSION_MISMATCH, 1, "host-library version mismatch") \
+ X(INVALID_ACCESS, 2, "library in invalid state") \
+ X(PARAMETER_BLOCK_OVERFLOW, 3, "parameter block count overflow") \
+ X(PARAMETER_BLOCK_UNALLOCATED, 4, "push to unallocated parameter block") \
+ X(COMPUTE_STAGE_OVERFLOW, 5, "compute stage overflow") \
+ X(INVALID_COMPUTE_STAGE, 6, "invalid compute shader stage") \
+ X(INVALID_START_SHADER, 7, "starting shader not Decode or Demodulate") \
+ X(INVALID_DEMOD_DATA_KIND, 8, "data kind for demodulation not Int16 or Float") \
+ X(INVALID_IMAGE_PLANE, 9, "invalid image plane") \
+ X(BUFFER_OVERFLOW, 10, "passed buffer size exceeds available space") \
+ X(WORK_QUEUE_FULL, 11, "work queue full") \
+ X(EXPORT_SPACE_OVERFLOW, 12, "not enough space for data export") \
+ X(SHARED_MEMORY, 13, "failed to open shared memory region") \
+ X(SYNC_VARIABLE, 14, "failed to acquire lock within timeout period") \
+ X(INVALID_TIMEOUT, 15, "invalid timeout value")
#define X(type, num, string) BF_LIB_ERR_KIND_ ##type = num,
typedef enum {BEAMFORMER_LIB_ERRORS} BeamformerLibErrorKind;
@@ -54,22 +56,39 @@ LIB_FN uint32_t beamformer_compute_timings(BeamformerComputeStatsTable *output,
/* NOTE: tells the beamformer to start beamforming */
LIB_FN uint32_t beamformer_start_compute(void);
+LIB_FN uint32_t beamformer_push_data_with_compute(void *data, uint32_t size,
+ uint32_t image_plane_tag,
+ uint32_t parameter_slot);
+
/* NOTE: waits for previously queued beamform to start or for timeout_ms */
LIB_FN uint32_t beamformer_wait_for_compute_dispatch(int32_t timeout_ms);
-LIB_FN uint32_t beamformer_push_data_with_compute(void *data, uint32_t size, uint32_t image_plane_tag);
/* NOTE: these functions only queue an upload; you must flush (start_compute) */
LIB_FN uint32_t beamformer_push_data(void *data, uint32_t size);
LIB_FN uint32_t beamformer_push_channel_mapping(int16_t *mapping, uint32_t count);
LIB_FN uint32_t beamformer_push_sparse_elements(int16_t *elements, uint32_t count);
LIB_FN uint32_t beamformer_push_focal_vectors(float *vectors, uint32_t count);
-LIB_FN uint32_t beamformer_set_pipeline_stage_parameters(int32_t stage_index, int32_t parameter);
-LIB_FN uint32_t beamformer_push_pipeline(int32_t *shaders, int32_t shader_count, BeamformerDataKind data_kind);
+///////////////////////////
+// Parameter Configuration
+LIB_FN uint32_t beamformer_reserve_parameter_blocks(uint32_t count);
+LIB_FN uint32_t beamformer_set_pipeline_stage_parameters(uint32_t stage_index, int32_t parameter);
+LIB_FN uint32_t beamformer_push_pipeline(int32_t *shaders, uint32_t shader_count, BeamformerDataKind data_kind);
LIB_FN uint32_t beamformer_push_parameters(BeamformerParameters *);
LIB_FN uint32_t beamformer_push_parameters_ui(BeamformerUIParameters *);
LIB_FN uint32_t beamformer_push_parameters_head(BeamformerParametersHead *);
+LIB_FN uint32_t beamformer_set_pipeline_stage_parameters_at(uint32_t stage_index,
+ int32_t parameter,
+ uint32_t parameter_slot);
+LIB_FN uint32_t beamformer_push_pipeline_at(int32_t *shaders, uint32_t shader_count,
+ BeamformerDataKind data_kind, uint32_t parameter_slot);
+LIB_FN uint32_t beamformer_push_parameters_at(BeamformerParameters *, uint32_t parameter_slot);
+
+LIB_FN uint32_t beamformer_push_channel_mapping_at(int16_t *mapping, uint32_t count, uint32_t parameter_slot);
+LIB_FN uint32_t beamformer_push_sparse_elements_at(int16_t *elements, uint32_t count, uint32_t parameter_slot);
+LIB_FN uint32_t beamformer_push_focal_vectors_at(float *vectors, uint32_t count, uint32_t parameter_slot);
+
////////////////////
// Filter Creation
@@ -90,7 +109,8 @@ LIB_FN uint32_t beamformer_push_parameters_head(BeamformerParametersHead *);
* M = (A - 8) / (2.285 (ω_s - ω_p))
*/
LIB_FN uint32_t beamformer_create_kaiser_low_pass_filter(float beta, float cutoff_frequency,
- int16_t length, uint8_t slot);
+ float sampling_frequency, int16_t length,
+ uint8_t filter_slot, uint8_t parameter_block);
//////////////////////////
// Live Imaging Controls
diff --git a/intrinsics.c b/intrinsics.c
@@ -19,7 +19,7 @@
#endif
#if COMPILER_MSVC
- #define align_as(n) __declspec(align(n))
+ #define alignas(n) __declspec(align(n))
#define pack_struct(s) __pragma(pack(push, 1)) s __pragma(pack(pop))
#define no_return __declspec(noreturn)
@@ -51,7 +51,7 @@
#define sqrt_f64(a) sqrt(a)
#else
- #define align_as(n) __attribute__((aligned(n)))
+ #define alignas(n) __attribute__((aligned(n)))
#define pack_struct(s) s __attribute__((packed))
#define no_return __attribute__((noreturn))
diff --git a/math.c b/math.c
@@ -308,23 +308,6 @@ v3_normalize(v3 a)
return result;
}
-function uv4
-uv4_from_u32_array(u32 v[4])
-{
- uv4 result;
- result.E[0] = v[0];
- result.E[1] = v[1];
- result.E[2] = v[2];
- result.E[3] = v[3];
- return result;
-}
-
-function b32
-uv4_equal(uv4 a, uv4 b)
-{
- return a.x == b.x && a.y == b.y && a.z == b.z && a.w == b.w;
-}
-
function v4
v4_from_f32_array(f32 v[4])
{
@@ -617,7 +600,7 @@ hsv_to_rgb(v4 hsv)
* k(n) = fmod((n + H * 6), 6)
* (R, G, B) = (f(n = 5), f(n = 3), f(n = 1))
*/
- align_as(16) f32 nval[4] = {5.0f, 3.0f, 1.0f, 0.0f};
+ alignas(16) f32 nval[4] = {5.0f, 3.0f, 1.0f, 0.0f};
f32x4 n = load_f32x4(nval);
f32x4 H = dup_f32x4(hsv.x);
f32x4 S = dup_f32x4(hsv.y);
diff --git a/os_linux.c b/os_linux.c
@@ -138,7 +138,7 @@ os_file_exists(char *path)
}
function SharedMemoryRegion
-os_create_shared_memory_area(Arena *arena, char *name, i32 lock_count, iz requested_capacity)
+os_create_shared_memory_area(Arena *arena, char *name, u32 lock_count, iz requested_capacity)
{
iz capacity = os_round_up_to_page_size(requested_capacity);
SharedMemoryRegion result = {0};
diff --git a/os_win32.c b/os_win32.c
@@ -83,6 +83,7 @@ typedef struct {
typedef struct {
iptr *semaphores;
+ u32 reserved_count;
} w32_shared_memory_context;
#define W32(r) __declspec(dllimport) r __stdcall
@@ -250,7 +251,7 @@ os_file_exists(char *path)
}
function SharedMemoryRegion
-os_create_shared_memory_area(Arena *arena, char *name, i32 lock_count, iz requested_capacity)
+os_create_shared_memory_area(Arena *arena, char *name, u32 lock_count, iz requested_capacity)
{
iz capacity = os_round_up_to_page_size(requested_capacity);
assert(capacity <= (iz)U32_MAX);
@@ -260,21 +261,20 @@ os_create_shared_memory_area(Arena *arena, char *name, i32 lock_count, iz reques
void *new = MapViewOfFile(h, FILE_MAP_ALL_ACCESS, 0, 0, (u32)capacity);
if (new) {
w32_shared_memory_context *ctx = push_struct(arena, typeof(*ctx));
- ctx->semaphores = push_array(arena, typeof(*ctx->semaphores), lock_count);
- result.os_context = (iptr)ctx;
- result.region = new;
+ ctx->semaphores = push_array(arena, typeof(*ctx->semaphores), lock_count);
+ ctx->reserved_count = lock_count;
+ result.os_context = (iptr)ctx;
+ result.region = new;
Stream sb = arena_stream(*arena);
stream_append_s8s(&sb, c_str_to_s8(name), s8("_lock_"));
- for (i32 i = 0; i < lock_count; i++) {
+ for (u32 i = 0; i < lock_count; i++) {
Stream lb = sb;
- stream_append_i64(&lb, i);
+ stream_append_u64(&lb, i);
stream_append_byte(&lb, 0);
ctx->semaphores[i] = CreateSemaphoreA(0, 1, 1, (c8 *)lb.data);
- if (ctx->semaphores[i] == INVALID_FILE) {
- os_fatal(s8("os_create_shared_memory_area: "
- "failed to create semaphore\n"));
- }
+ if (ctx->semaphores[i] == INVALID_FILE)
+ os_fatal(s8("os_create_shared_memory_area: failed to create semaphore\n"));
}
}
}
diff --git a/static.c b/static.c
@@ -9,14 +9,11 @@
global void *debug_lib;
#define DEBUG_ENTRY_POINTS \
- X(beamformer_debug_ui_deinit) \
- X(beamformer_complete_compute) \
- X(beamformer_compute_setup) \
- X(beamformer_frame_step) \
- X(beamformer_reload_shader) \
- X(beamformer_rf_upload) \
- X(beamform_work_queue_push) \
- X(beamform_work_queue_push_commit)
+ X(beamformer_debug_ui_deinit) \
+ X(beamformer_complete_compute) \
+ X(beamformer_frame_step) \
+ X(beamformer_reload_shader) \
+ X(beamformer_rf_upload)
#define X(name) global name ##_fn *name;
DEBUG_ENTRY_POINTS
@@ -273,12 +270,14 @@ function OS_THREAD_ENTRY_POINT_FN(compute_worker_thread_entry_point)
glfwMakeContextCurrent(ctx->window_handle);
ctx->gl_context = os_get_native_gl_context(ctx->window_handle);
- beamformer_compute_setup(ctx->user_context);
+ BeamformerCtx *beamformer = (BeamformerCtx *)ctx->user_context;
+ glCreateQueries(GL_TIME_ELAPSED, countof(beamformer->compute_context.shader_timer_ids),
+ beamformer->compute_context.shader_timer_ids);
for (;;) {
worker_thread_sleep(ctx);
asan_poison_region(ctx->arena.beg, ctx->arena.end - ctx->arena.beg);
- beamformer_complete_compute(ctx->user_context, ctx->arena, ctx->gl_context);
+ beamformer_complete_compute(ctx->user_context, &ctx->arena, ctx->gl_context);
}
unreachable();
@@ -353,21 +352,19 @@ setup_beamformer(Arena *memory, BeamformerCtx **o_ctx, BeamformerInput **o_input
ctx->compute_shader_stats = push_struct(memory, ComputeShaderStats);
ctx->compute_timing_table = push_struct(memory, ComputeTimingTable);
- ctx->shared_memory = os_create_shared_memory_area(memory, OS_SHARED_MEMORY_NAME,
- BeamformerSharedMemoryLockKind_Count,
+ /* TODO(rnp): I'm not sure if its a good idea to pre-reserve a bunch of semaphores
+ * on w32 but thats what we are doing for now */
+ u32 lock_count = BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots;
+ ctx->shared_memory = os_create_shared_memory_area(memory, OS_SHARED_MEMORY_NAME, lock_count,
BEAMFORMER_SHARED_MEMORY_SIZE);
BeamformerSharedMemory *sm = ctx->shared_memory.region;
if (!sm) os_fatal(s8("Get more ram lol\n"));
mem_clear(sm, 0, sizeof(*sm));
sm->version = BEAMFORMER_SHARED_MEMORY_VERSION;
+ sm->reserved_parameter_blocks = 1;
- /* NOTE: default compute shader pipeline */
- sm->shaders[0] = BeamformerShaderKind_Decode;
- sm->shaders[1] = BeamformerShaderKind_DAS;
- sm->shader_count = 2;
-
- ComputeShaderCtx *cs = &ctx->csctx;
+ BeamformerComputeContext *cs = &ctx->compute_context;
GLWorkerThreadContext *worker = &ctx->os.compute_worker;
/* TODO(rnp): we should lock this down after we have something working */
@@ -582,6 +579,9 @@ setup_beamformer(Arena *memory, BeamformerCtx **o_ctx, BeamformerInput **o_input
cs->unit_cube_model = render_model_from_arrays(unit_cube_vertices, unit_cube_normals,
sizeof(unit_cube_vertices),
unit_cube_indices, countof(unit_cube_indices));
+
+ /* stfu gcc this is used */
+ DEBUG_DECL((void)BeamformerParameterBlockRegionOffsets;)
}
function void
diff --git a/tests/throughput.c b/tests/throughput.c
@@ -291,7 +291,7 @@ function b32
send_frame(i16 *restrict i16_data, BeamformerParameters *restrict bp)
{
u32 data_size = bp->rf_raw_dim[0] * bp->rf_raw_dim[1] * sizeof(i16);
- b32 result = beamformer_push_data_with_compute(i16_data, data_size, BeamformerViewPlaneTag_XZ);
+ b32 result = beamformer_push_data_with_compute(i16_data, data_size, BeamformerViewPlaneTag_XZ, 0);
if (!result && !g_should_exit) printf("lib error: %s\n", beamformer_get_last_error_string());
return result;
@@ -339,7 +339,7 @@ execute_study(s8 study, Arena arena, Stream path, Options *options)
}
{
- align_as(64) v2 focal_vectors[countof(zbp->focal_depths)];
+ alignas(64) v2 focal_vectors[countof(zbp->focal_depths)];
for (u32 i = 0; i < countof(zbp->focal_depths); i++)
focal_vectors[i] = (v2){{zbp->transmit_angles[i], zbp->focal_depths[i]}};
beamformer_push_focal_vectors((f32 *)focal_vectors, countof(focal_vectors));
@@ -352,7 +352,7 @@ execute_study(s8 study, Arena arena, Stream path, Options *options)
free(zbp);
i32 shader_stages[16];
- i32 shader_stage_count = 0;
+ u32 shader_stage_count = 0;
if (options->cuda) shader_stages[shader_stage_count++] = BeamformerShaderKind_CudaDecode;
else shader_stages[shader_stage_count++] = BeamformerShaderKind_Decode;
shader_stages[shader_stage_count++] = BeamformerShaderKind_DAS;
diff --git a/ui.c b/ui.c
@@ -394,6 +394,7 @@ struct BeamformerUI {
BeamformerUIParameters params;
b32 flush_params;
+ u32 selected_parameter_block;
FrameViewRenderContext *frame_view_render_context;
@@ -656,7 +657,7 @@ push_custom_view_title(Stream *s, Variable *var)
}break;
case BeamformerFrameViewKind_Indexed:{
stream_append_s8(s, s8(": Index {"));
- stream_append_u64(s, *bv->cycler->cycler.state % MAX_BEAMFORMED_SAVED_FRAMES);
+ stream_append_u64(s, *bv->cycler->cycler.state % BeamformerMaxSavedFrames);
stream_append_s8(s, s8("} ["));
}break;
case BeamformerFrameViewKind_3DXPlane:{ stream_append_s8(s, s8(": 3D X-Plane")); }break;
@@ -1273,7 +1274,7 @@ ui_beamformer_frame_view_convert(BeamformerUI *ui, Arena *arena, Variable *view,
}break;
case BeamformerFrameViewKind_Indexed:{
bv->cycler = add_variable_cycler(ui, menu, arena, 0, ui->small_font, s8("Index:"),
- &bv->cycler_state, 0, MAX_BEAMFORMED_SAVED_FRAMES);
+ &bv->cycler_state, 0, BeamformerMaxSavedFrames);
}break;
default:{}break;
}
@@ -1316,8 +1317,8 @@ add_compute_progress_bar(Variable *parent, BeamformerCtx *ctx)
result->view.child = add_variable(ui, result, &ui->arena, s8(""), 0,
VT_COMPUTE_PROGRESS_BAR, ui->small_font);
ComputeProgressBar *bar = &result->view.child->compute_progress_bar;
- bar->progress = &ctx->csctx.processing_progress;
- bar->processing = &ctx->csctx.processing_compute;
+ bar->progress = &ctx->compute_context.processing_progress;
+ bar->processing = &ctx->compute_context.processing_compute;
return result;
}
@@ -2571,7 +2572,7 @@ push_compute_time(Arena *arena, s8 prefix, f32 time)
function v2
draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *stats,
- BeamformerShaderKind *stages, i32 stages_count, f32 compute_time_sum,
+ BeamformerShaderKind *stages, u32 stages_count, f32 compute_time_sum,
TextSpec ts, Rect r, v2 mouse)
{
read_only local_persist s8 frame_labels[] = {s8_comp("0:"), s8_comp("-1:"), s8_comp("-2:"), s8_comp("-3:")};
@@ -2582,7 +2583,7 @@ draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *s
cells[0].text = frame_labels[i];
u32 frame_index = (stats->latest_frame_index - i) % countof(stats->table.times);
u32 seen_shaders = 0;
- for (i32 j = 0; j < stages_count; j++) {
+ for (u32 j = 0; j < stages_count; j++) {
if ((seen_shaders & (1u << stages[j])) == 0)
total_times[i] += stats->table.times[frame_index][stages[j]];
seen_shaders |= (1u << stages[j]);
@@ -2617,7 +2618,7 @@ draw_compute_stats_bar_view(BeamformerUI *ui, Arena arena, ComputeShaderStats *s
Rect rect;
rect.pos = v2_add(cr.pos, (v2){{cr.size.w + table->cell_pad.w , cr.size.h * 0.15f}});
rect.size = (v2){.y = 0.7f * cr.size.h};
- for (i32 i = 0; i < stages_count; i++) {
+ for (u32 i = 0; i < stages_count; i++) {
rect.size.w = total_width * stats->table.times[frame_index][stages[i]] / total_times[row_index];
Color color = colour_from_normalized(g_colour_palette[i % countof(g_colour_palette)]);
DrawRectangleRec(rect.rl, color);
@@ -2690,17 +2691,21 @@ draw_compute_stats_view(BeamformerUI *ui, Arena arena, Variable *view, Rect r, v
{
assert(view->type == VT_COMPUTE_STATS_VIEW);
- ComputeStatsView *csv = &view->compute_stats_view;
- BeamformerComputePipeline *cp = &ui->beamformer_context->csctx.compute_pipeline;
- ComputeShaderStats *stats = csv->compute_shader_stats;
+ read_only local_persist BeamformerComputePlan dummy_plan = {0};
+ u32 selected_plan = ui->selected_parameter_block % BeamformerMaxParameterBlockSlots;
+ BeamformerComputePlan *cp = ui->beamformer_context->compute_context.compute_plans[selected_plan];
+ if (!cp) cp = &dummy_plan;
+
+ ComputeStatsView *csv = &view->compute_stats_view;
+ ComputeShaderStats *stats = csv->compute_shader_stats;
f32 compute_time_sum = 0;
- i32 stages = cp->shader_count;
+ u32 stages = cp->pipeline.shader_count;
TextSpec text_spec = {.font = &ui->font, .colour = FG_COLOUR, .flags = TF_LIMITED};
static_assert(BeamformerShaderKind_ComputeCount <= 32, "shader kind bitfield test");
u32 seen_shaders = 0;
- for (i32 i = 0; i < stages; i++) {
- BeamformerShaderKind index = cp->shaders[i];
+ for (u32 i = 0; i < stages; i++) {
+ BeamformerShaderKind index = cp->pipeline.shaders[i];
if ((seen_shaders & (1u << index)) == 0)
compute_time_sum += stats->average_times[index];
seen_shaders |= (1u << index);
@@ -2715,20 +2720,20 @@ draw_compute_stats_view(BeamformerUI *ui, Arena arena, Variable *view, Rect r, v
read_only local_persist s8 labels[BeamformerShaderKind_ComputeCount] = {COMPUTE_SHADERS_INTERNAL};
#undef X
da_reserve(&arena, table, stages);
- for (i32 i = 0; i < stages; i++) {
- push_table_time_row(table, &arena, labels[cp->shaders[i]],
- stats->average_times[cp->shaders[i]]);
+ for (u32 i = 0; i < stages; i++) {
+ push_table_time_row(table, &arena, labels[cp->pipeline.shaders[i]],
+ stats->average_times[cp->pipeline.shaders[i]]);
}
}break;
case ComputeStatsViewKind_Bar:{
- result = draw_compute_stats_bar_view(ui, arena, stats, cp->shaders, stages, compute_time_sum,
- text_spec, r, mouse);
+ result = draw_compute_stats_bar_view(ui, arena, stats, cp->pipeline.shaders, stages,
+ compute_time_sum, text_spec, r, mouse);
r.pos = v2_add(r.pos, (v2){.y = result.y});
}break;
InvalidDefaultCase;
}
- u32 rf_size = ui->beamformer_context->csctx.rf_buffer.rf_size;
+ u32 rf_size = ui->beamformer_context->compute_context.rf_buffer.size;
push_table_time_row_with_fps(table, &arena, s8("Compute Total:"), compute_time_sum);
push_table_time_row_with_fps(table, &arena, s8("RF Upload Delta:"), stats->rf_time_delta_average);
push_table_memory_size_row(table, &arena, s8("Input RF Size:"), rf_size);
@@ -3802,7 +3807,7 @@ ui_init(BeamformerCtx *ctx, Arena store)
ui = ctx->ui = push_struct(&store, typeof(*ui));
ui->arena = store;
ui->frame_view_render_context = &ctx->frame_view_render_context;
- ui->unit_cube_model = ctx->csctx.unit_cube_model;
+ ui->unit_cube_model = ctx->compute_context.unit_cube_model;
ui->shared_memory = ctx->shared_memory;
ui->beamformer_context = ctx;
@@ -3837,8 +3842,6 @@ ui_init(BeamformerCtx *ctx, Arena store)
split->region_split.left = add_compute_progress_bar(split, ctx);
split->region_split.right = add_compute_stats_view(ui, split, &ui->arena, ctx);
- ctx->ui_read_params = 1;
-
/* NOTE(rnp): shrink variable size once this fires */
assert((uz)(ui->arena.beg - (u8 *)ui) < KB(64));
}
@@ -3864,11 +3867,16 @@ draw_ui(BeamformerCtx *ctx, BeamformerInput *input, BeamformerFrame *frame_to_dr
asan_poison_region(ui->arena.beg, ui->arena.end - ui->arena.beg);
- /* TODO(rnp): there should be a better way of detecting this */
- if (ctx->ui_read_params) {
- mem_copy(&ui->params, &sm->parameters_ui, sizeof(ui->params));
- ui->flush_params = 0;
- ctx->ui_read_params = 0;
+ u32 selected_block = ui->selected_parameter_block % BeamformerMaxParameterBlockSlots;
+ u32 selected_mask = 1 << selected_block;
+ if (ctx->ui_dirty_parameter_blocks & selected_mask) {
+ BeamformerParameterBlock *pb = beamformer_parameter_block_lock(&ctx->shared_memory, selected_block, 0);
+ if (pb) {
+ mem_copy(&ui->params, &pb->parameters_ui, sizeof(ui->params));
+ ui->flush_params = 0;
+ atomic_and_u32(&ctx->ui_dirty_parameter_blocks, ~selected_mask);
+ beamformer_parameter_block_unlock(&ctx->shared_memory, selected_block);
+ }
}
/* NOTE: process interactions first because the user interacted with
@@ -3878,23 +3886,26 @@ draw_ui(BeamformerCtx *ctx, BeamformerInput *input, BeamformerFrame *frame_to_dr
if (ui->flush_params) {
validate_ui_parameters(&ui->params);
- i32 lock = BeamformerSharedMemoryLockKind_Parameters;
- if (ctx->latest_frame && os_shared_memory_region_lock(&ctx->shared_memory, sm->locks, lock, 0)) {
- mem_copy(&sm->parameters_ui, &ui->params, sizeof(ui->params));
- ui->flush_params = 0;
- mark_shared_memory_region_dirty(sm, lock);
- os_shared_memory_region_unlock(&ctx->shared_memory, sm->locks, lock);
-
- BeamformerSharedMemoryLockKind dispatch_lock = BeamformerSharedMemoryLockKind_DispatchCompute;
- if (!sm->live_imaging_parameters.active &&
- os_shared_memory_region_lock(&ctx->shared_memory, sm->locks, (i32)dispatch_lock, 0))
- {
- BeamformWork *work = beamform_work_queue_push(ctx->beamform_work_queue);
- BeamformerViewPlaneTag tag = frame_to_draw ? frame_to_draw->view_plane_tag : 0;
- if (fill_frame_compute_work(ctx, work, tag, 0))
- beamform_work_queue_push_commit(ctx->beamform_work_queue);
+ if (ctx->latest_frame) {
+ BeamformerParameterBlock *pb = beamformer_parameter_block_lock(&ctx->shared_memory, selected_block, 0);
+ if (pb) {
+ ui->flush_params = 0;
+ mem_copy(&pb->parameters_ui, &ui->params, sizeof(ui->params));
+ mark_parameter_block_region_dirty(ctx->shared_memory.region, selected_block,
+ BeamformerParameterBlockRegion_Parameters);
+ beamformer_parameter_block_unlock(&ctx->shared_memory, selected_block);
+
+ BeamformerSharedMemoryLockKind dispatch_lock = BeamformerSharedMemoryLockKind_DispatchCompute;
+ if (!sm->live_imaging_parameters.active &&
+ os_shared_memory_region_lock(&ctx->shared_memory, sm->locks, (i32)dispatch_lock, 0))
+ {
+ BeamformWork *work = beamform_work_queue_push(ctx->beamform_work_queue);
+ BeamformerViewPlaneTag tag = frame_to_draw ? frame_to_draw->view_plane_tag : 0;
+ if (fill_frame_compute_work(ctx, work, tag, selected_block, 0))
+ beamform_work_queue_push_commit(ctx->beamform_work_queue);
+ }
+ os_wake_waiters(&ctx->os.compute_worker.sync_variable);
}
- os_wake_waiters(&ctx->os.compute_worker.sync_variable);
}
}
diff --git a/util.c b/util.c
@@ -23,9 +23,26 @@ mem_move(u8 *dest, u8 *src, uz n)
}
function u8 *
+arena_aligned_start(Arena a, uz alignment)
+{
+ uz padding = -(uintptr_t)a.beg & (alignment - 1);
+ u8 *result = a.beg + padding;
+ return result;
+}
+
+#define arena_capacity(a, t) arena_capacity_(a, sizeof(t), alignof(t))
+function iz
+arena_capacity_(Arena *a, iz size, uz alignment)
+{
+ iz available = a->end - arena_aligned_start(*a, alignment);
+ iz result = available / size;
+ return result;
+}
+
+function u8 *
arena_commit(Arena *a, iz size)
{
- ASSERT(a->end - a->beg >= size);
+ assert(a->end - a->beg >= size);
u8 *result = a->beg;
a->beg += size;
return result;
@@ -42,27 +59,16 @@ arena_pop(Arena *a, iz length)
function void *
arena_alloc(Arena *a, iz len, uz align, iz count)
{
- /* NOTE: special case 0 arena */
- if (a->beg == 0)
- return 0;
-
- uz padding = -(uintptr_t)a->beg & (align - 1);
- iz available = a->end - a->beg - (iz)padding;
- assert((available >= 0 && count <= available / len));
- void *p = a->beg + padding;
- asan_unpoison_region(p, count * len);
- a->beg += (iz)padding + count * len;
- /* TODO: Performance? */
- return mem_clear(p, 0, count * len);
-}
-
-#define arena_capacity(a, t) arena_capacity_(a, sizeof(t), _Alignof(t))
-function iz
-arena_capacity_(Arena *a, iz size, uz alignment)
-{
- uz padding = -(uintptr_t)a->beg & (alignment - 1);
- iz available = a->end - a->beg - (iz)padding;
- iz result = available / size;
+ void *result = 0;
+ if (a->beg) {
+ u8 *start = arena_aligned_start(*a, align);
+ iz available = a->end - start;
+ assert((available >= 0 && count <= available / len));
+ asan_unpoison_region(start, count * len);
+ a->beg = start + count * len;
+ /* TODO: Performance? */
+ result = mem_clear(start, 0, count * len);
+ }
return result;
}
diff --git a/util.h b/util.h
@@ -62,6 +62,7 @@
#define global static
#define local_persist static
+#define alignof _Alignof
#define static_assert _Static_assert
/* NOTE: garbage to get the prepocessor to properly stringize the value of a macro */
@@ -337,7 +338,7 @@ typedef RENDERDOC_START_FRAME_CAPTURE_FN(renderdoc_start_frame_capture_fn);
#define RENDERDOC_END_FRAME_CAPTURE_FN(name) b32 name(iptr gl_context, iptr window_handle)
typedef RENDERDOC_END_FRAME_CAPTURE_FN(renderdoc_end_frame_capture_fn);
-typedef align_as(16) u8 RenderDocAPI[216];
+typedef alignas(16) u8 RenderDocAPI[216];
#define RENDERDOC_API_FN_ADDR(a, offset) (*(iptr *)((*a) + offset))
#define RENDERDOC_START_FRAME_CAPTURE(a) (renderdoc_start_frame_capture_fn *)RENDERDOC_API_FN_ADDR(a, 152)
#define RENDERDOC_END_FRAME_CAPTURE(a) (renderdoc_end_frame_capture_fn *) RENDERDOC_API_FN_ADDR(a, 168)