beamformer_core.c (59291B)
1 /* See LICENSE for license details. */ 2 /* TODO(rnp): 3 * [ ]: refactor: DecodeMode_None should use a different mapping and optional conversion shader 4 * for rf only mode with no filter and demod/filter should gain the OutputFloats flag for iq 5 * case and rf mode with filter; this can also be used instead of first pass uniform 6 * [ ]: refactor: replace UploadRF with just the scratch_rf_size variable, 7 * use below to spin wait in library 8 * [ ]: utilize umonitor/umwait (intel), monitorx/mwaitx (amd), and wfe/sev (aarch64) 9 * for power efficient low latency waiting 10 * [ ]: refactor: split decode into reshape and decode 11 * - the check for first pass reshaping is the last non constant check 12 * in the shader 13 * - this will also remove the need for the channel mapping in the decode shader 14 * [X]: refactor: ui: reload only shader which is affected by the interaction 15 * [ ]: BeamformWorkQueue -> BeamformerWorkQueue 16 * [ ]: need to keep track of gpu memory in some way 17 * - want to be able to store more than 16 2D frames but limit 3D frames 18 * - maybe keep track of how much gpu memory is committed for beamformed images 19 * and use that to determine when to loop back over existing textures 20 * - to do this maybe use a circular linked list instead of a flat array 21 * - then have a way of querying how many frames are available for a specific point count 22 * [ ]: bug: reinit cuda on hot-reload 23 */ 24 25 #include "compiler.h" 26 27 #if defined(BEAMFORMER_DEBUG) && !defined(BEAMFORMER_EXPORT) && OS_WINDOWS 28 #define BEAMFORMER_EXPORT __declspec(dllexport) 29 #endif 30 31 #include "beamformer_internal.h" 32 33 global f32 dt_for_frame; 34 35 #define DECODE_FIRST_PASS_UNIFORM_LOC 1 36 37 #define DAS_CYCLE_T_UNIFORM_LOC 2 38 #define DAS_FAST_CHANNEL_UNIFORM_LOC 3 39 40 #define MIN_MAX_MIPS_LEVEL_UNIFORM_LOC 1 41 #define SUM_PRESCALE_UNIFORM_LOC 1 42 43 #if !BEAMFORMER_RENDERDOC_HOOKS 44 #define start_renderdoc_capture(...) 45 #define end_renderdoc_capture(...) 46 #define renderdoc_attached(...) (0) 47 #else 48 global renderdoc_start_frame_capture_fn *start_frame_capture; 49 global renderdoc_end_frame_capture_fn *end_frame_capture; 50 #define start_renderdoc_capture(gl) if (start_frame_capture) start_frame_capture(gl, 0) 51 #define end_renderdoc_capture(gl) if (end_frame_capture) end_frame_capture(gl, 0) 52 #define renderdoc_attached(...) (start_frame_capture != 0) 53 #endif 54 55 typedef struct { 56 BeamformerFrame *frames; 57 u32 capacity; 58 u32 offset; 59 u32 cursor; 60 u32 needed_frames; 61 } ComputeFrameIterator; 62 63 function void 64 beamformer_compute_plan_release(BeamformerComputeContext *cc, u32 block) 65 { 66 assert(block < countof(cc->compute_plans)); 67 BeamformerComputePlan *cp = cc->compute_plans[block]; 68 if (cp) { 69 glDeleteBuffers(countof(cp->ubos), cp->ubos); 70 glDeleteTextures(countof(cp->textures), cp->textures); 71 for (u32 i = 0; i < countof(cp->filters); i++) 72 glDeleteBuffers(1, &cp->filters[i].ssbo); 73 cc->compute_plans[block] = 0; 74 SLLPushFreelist(cp, cc->compute_plan_freelist); 75 } 76 } 77 78 function BeamformerComputePlan * 79 beamformer_compute_plan_for_block(BeamformerComputeContext *cc, u32 block, Arena *arena) 80 { 81 assert(block < countof(cc->compute_plans)); 82 BeamformerComputePlan *result = cc->compute_plans[block]; 83 if (!result) { 84 result = SLLPopFreelist(cc->compute_plan_freelist); 85 if (!result) result = push_struct_no_zero(arena, BeamformerComputePlan); 86 zero_struct(result); 87 cc->compute_plans[block] = result; 88 89 glCreateBuffers(countof(result->ubos), result->ubos); 90 91 Stream label = arena_stream(*arena); 92 #define X(k, t, ...) \ 93 glNamedBufferStorage(result->ubos[BeamformerComputeUBOKind_##k], sizeof(t), \ 94 0, GL_DYNAMIC_STORAGE_BIT); \ 95 stream_append_s8(&label, s8(#t "[")); \ 96 stream_append_u64(&label, block); \ 97 stream_append_byte(&label, ']'); \ 98 glObjectLabel(GL_BUFFER, result->ubos[BeamformerComputeUBOKind_##k], \ 99 label.widx, (c8 *)label.data); \ 100 label.widx = 0; 101 BEAMFORMER_COMPUTE_UBO_LIST 102 #undef X 103 104 #define X(_k, t, ...) t, 105 GLenum gl_kind[] = {BEAMFORMER_COMPUTE_TEXTURE_LIST_FULL}; 106 #undef X 107 read_only local_persist s8 tex_prefix[] = { 108 #define X(k, ...) s8_comp(#k "["), 109 BEAMFORMER_COMPUTE_TEXTURE_LIST_FULL 110 #undef X 111 }; 112 glCreateTextures(GL_TEXTURE_1D, BeamformerComputeTextureKind_Count - 1, result->textures); 113 for (u32 i = 0; i < BeamformerComputeTextureKind_Count - 1; i++) { 114 /* TODO(rnp): this could be predicated on channel count for this compute plan */ 115 glTextureStorage1D(result->textures[i], 1, gl_kind[i], BeamformerMaxChannelCount); 116 stream_append_s8(&label, tex_prefix[i]); 117 stream_append_u64(&label, block); 118 stream_append_byte(&label, ']'); 119 glObjectLabel(GL_TEXTURE, result->textures[i], label.widx, (c8 *)label.data); 120 label.widx = 0; 121 } 122 } 123 return result; 124 } 125 126 function void 127 beamformer_filter_update(BeamformerFilter *f, BeamformerFilterParameters fp, u32 block, u32 slot, Arena arena) 128 { 129 Stream sb = arena_stream(arena); 130 stream_append_s8s(&sb, 131 beamformer_filter_kind_strings[fp.kind % countof(beamformer_filter_kind_strings)], 132 s8("Filter[")); 133 stream_append_u64(&sb, block); 134 stream_append_s8(&sb, s8("][")); 135 stream_append_u64(&sb, slot); 136 stream_append_byte(&sb, ']'); 137 s8 label = arena_stream_commit(&arena, &sb); 138 139 void *filter = 0; 140 switch (fp.kind) { 141 case BeamformerFilterKind_Kaiser:{ 142 /* TODO(rnp): this should also support complex */ 143 /* TODO(rnp): implement this as an IFIR filter instead to reduce computation */ 144 filter = kaiser_low_pass_filter(&arena, fp.kaiser.cutoff_frequency, fp.sampling_frequency, 145 fp.kaiser.beta, (i32)fp.kaiser.length); 146 f->length = (i32)fp.kaiser.length; 147 f->time_delay = (f32)f->length / 2.0f / fp.sampling_frequency; 148 }break; 149 case BeamformerFilterKind_MatchedChirp:{ 150 typeof(fp.matched_chirp) *mc = &fp.matched_chirp; 151 f32 fs = fp.sampling_frequency; 152 f->length = (i32)(mc->duration * fs); 153 if (fp.complex) { 154 filter = baseband_chirp(&arena, mc->min_frequency, mc->max_frequency, fs, f->length, 1, 0.5f); 155 f->time_delay = complex_filter_first_moment(filter, f->length, fs); 156 } else { 157 filter = rf_chirp(&arena, mc->min_frequency, mc->max_frequency, fs, f->length, 1); 158 f->time_delay = real_filter_first_moment(filter, f->length, fs); 159 } 160 }break; 161 InvalidDefaultCase; 162 } 163 164 f->parameters = fp; 165 166 glDeleteBuffers(1, &f->ssbo); 167 glCreateBuffers(1, &f->ssbo); 168 glNamedBufferStorage(f->ssbo, f->length * (i32)sizeof(f32) * (fp.complex? 2 : 1), filter, 0); 169 glObjectLabel(GL_BUFFER, f->ssbo, (i32)label.len, (c8 *)label.data); 170 } 171 172 function ComputeFrameIterator 173 compute_frame_iterator(BeamformerCtx *ctx, u32 start_index, u32 needed_frames) 174 { 175 start_index = start_index % ARRAY_COUNT(ctx->beamform_frames); 176 177 ComputeFrameIterator result; 178 result.frames = ctx->beamform_frames; 179 result.offset = start_index; 180 result.capacity = ARRAY_COUNT(ctx->beamform_frames); 181 result.cursor = 0; 182 result.needed_frames = needed_frames; 183 return result; 184 } 185 186 function BeamformerFrame * 187 frame_next(ComputeFrameIterator *bfi) 188 { 189 BeamformerFrame *result = 0; 190 if (bfi->cursor != bfi->needed_frames) { 191 u32 index = (bfi->offset + bfi->cursor++) % bfi->capacity; 192 result = bfi->frames + index; 193 } 194 return result; 195 } 196 197 function b32 198 beamformer_frame_compatible(BeamformerFrame *f, iv3 dim, GLenum gl_kind) 199 { 200 b32 result = gl_kind == f->gl_kind && iv3_equal(dim, f->dim); 201 return result; 202 } 203 204 function iv3 205 das_valid_points(iv3 points) 206 { 207 iv3 result; 208 result.x = Max(points.x, 1); 209 result.y = Max(points.y, 1); 210 result.z = Max(points.z, 1); 211 return result; 212 } 213 214 function void 215 alloc_beamform_frame(BeamformerFrame *out, iv3 out_dim, GLenum gl_kind, s8 name, Arena arena) 216 { 217 out->dim = das_valid_points(out_dim); 218 219 /* NOTE: allocate storage for beamformed output data; 220 * this is shared between compute and fragment shaders */ 221 u32 max_dim = (u32)Max(out->dim.x, Max(out->dim.y, out->dim.z)); 222 out->mips = (i32)ctz_u32(round_up_power_of_2(max_dim)) + 1; 223 224 out->gl_kind = gl_kind; 225 226 Stream label = arena_stream(arena); 227 stream_append_s8(&label, name); 228 stream_append_byte(&label, '['); 229 stream_append_hex_u64(&label, out->id); 230 stream_append_byte(&label, ']'); 231 232 glDeleteTextures(1, &out->texture); 233 glCreateTextures(GL_TEXTURE_3D, 1, &out->texture); 234 glTextureStorage3D(out->texture, out->mips, gl_kind, out->dim.x, out->dim.y, out->dim.z); 235 236 glTextureParameteri(out->texture, GL_TEXTURE_MIN_FILTER, GL_NEAREST); 237 glTextureParameteri(out->texture, GL_TEXTURE_MAG_FILTER, GL_NEAREST); 238 239 LABEL_GL_OBJECT(GL_TEXTURE, out->texture, stream_to_s8(&label)); 240 } 241 242 function void 243 update_hadamard_texture(BeamformerComputePlan *cp, i32 order, Arena arena) 244 { 245 f32 *hadamard = make_hadamard_transpose(&arena, order); 246 if (hadamard) { 247 cp->hadamard_order = order; 248 u32 *texture = cp->textures + BeamformerComputeTextureKind_Hadamard; 249 glDeleteTextures(1, texture); 250 glCreateTextures(GL_TEXTURE_2D, 1, texture); 251 glTextureStorage2D(*texture, 1, GL_R32F, order, order); 252 glTextureSubImage2D(*texture, 0, 0, 0, order, order, GL_RED, GL_FLOAT, hadamard); 253 254 Stream label = arena_stream(arena); 255 stream_append_s8(&label, s8("Hadamard")); 256 stream_append_i64(&label, order); 257 LABEL_GL_OBJECT(GL_TEXTURE, *texture, stream_to_s8(&label)); 258 } 259 } 260 261 function void 262 alloc_shader_storage(BeamformerCtx *ctx, u32 decoded_data_size, Arena arena) 263 { 264 BeamformerComputeContext *cc = &ctx->compute_context; 265 glDeleteBuffers(countof(cc->ping_pong_ssbos), cc->ping_pong_ssbos); 266 glCreateBuffers(countof(cc->ping_pong_ssbos), cc->ping_pong_ssbos); 267 268 cc->ping_pong_ssbo_size = decoded_data_size; 269 270 Stream label = arena_stream(arena); 271 stream_append_s8(&label, s8("PingPongSSBO[")); 272 i32 s_widx = label.widx; 273 for (i32 i = 0; i < countof(cc->ping_pong_ssbos); i++) { 274 glNamedBufferStorage(cc->ping_pong_ssbos[i], (iz)decoded_data_size, 0, 0); 275 stream_append_i64(&label, i); 276 stream_append_byte(&label, ']'); 277 LABEL_GL_OBJECT(GL_BUFFER, cc->ping_pong_ssbos[i], stream_to_s8(&label)); 278 stream_reset(&label, s_widx); 279 } 280 281 /* TODO(rnp): (25.08.04) cuda lib is heavily broken atm. First there are multiple RF 282 * buffers and cuda decode shouldn't assume that the data is coming from the rf_buffer 283 * ssbo. Second each parameter block may need a different hadamard matrix so ideally 284 * decode should just take the texture as a parameter. Third, none of these dimensions 285 * need to be pre-known by the library unless its allocating GPU memory which it shouldn't 286 * need to do. For now grab out of parameter block 0 but it is not correct */ 287 BeamformerParameterBlock *pb = beamformer_parameter_block(ctx->shared_memory, 0); 288 /* NOTE(rnp): these are stubs when CUDA isn't supported */ 289 cuda_register_buffers(cc->ping_pong_ssbos, countof(cc->ping_pong_ssbos), cc->rf_buffer.ssbo); 290 u32 decoded_data_dimension[3] = {pb->parameters.sample_count, pb->parameters.channel_count, pb->parameters.acquisition_count}; 291 cuda_init(pb->parameters.raw_data_dimensions.E, decoded_data_dimension); 292 } 293 294 function void 295 push_compute_timing_info(ComputeTimingTable *t, ComputeTimingInfo info) 296 { 297 u32 index = atomic_add_u32(&t->write_index, 1) % countof(t->buffer); 298 t->buffer[index] = info; 299 } 300 301 function b32 302 fill_frame_compute_work(BeamformerCtx *ctx, BeamformWork *work, BeamformerViewPlaneTag plane, 303 u32 parameter_block, b32 indirect) 304 { 305 b32 result = work != 0; 306 if (result) { 307 u32 frame_id = atomic_add_u32(&ctx->next_render_frame_index, 1); 308 u32 frame_index = frame_id % countof(ctx->beamform_frames); 309 work->kind = indirect? BeamformerWorkKind_ComputeIndirect : BeamformerWorkKind_Compute; 310 work->lock = BeamformerSharedMemoryLockKind_DispatchCompute; 311 work->compute_context.parameter_block = parameter_block; 312 work->compute_context.frame = ctx->beamform_frames + frame_index; 313 work->compute_context.frame->ready_to_present = 0; 314 work->compute_context.frame->view_plane_tag = plane; 315 work->compute_context.frame->id = frame_id; 316 } 317 return result; 318 } 319 320 function void 321 do_sum_shader(BeamformerComputeContext *cc, u32 *in_textures, u32 in_texture_count, 322 u32 out_texture, iv3 out_data_dim) 323 { 324 /* NOTE: zero output before summing */ 325 glClearTexImage(out_texture, 0, GL_RED, GL_FLOAT, 0); 326 glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT); 327 328 glBindImageTexture(0, out_texture, 0, GL_TRUE, 0, GL_READ_WRITE, GL_RG32F); 329 for (u32 i = 0; i < in_texture_count; i++) { 330 glBindImageTexture(1, in_textures[i], 0, GL_TRUE, 0, GL_READ_ONLY, GL_RG32F); 331 glDispatchCompute(ORONE((u32)out_data_dim.x / 32u), 332 ORONE((u32)out_data_dim.y), 333 ORONE((u32)out_data_dim.z / 32u)); 334 glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); 335 } 336 } 337 338 function void 339 plan_compute_pipeline(BeamformerComputePlan *cp, BeamformerParameterBlock *pb) 340 { 341 b32 run_cuda_hilbert = 0; 342 b32 demodulate = 0; 343 344 for (u32 i = 0; i < pb->pipeline.shader_count; i++) { 345 switch (pb->pipeline.shaders[i]) { 346 case BeamformerShaderKind_CudaHilbert:{ run_cuda_hilbert = 1; }break; 347 case BeamformerShaderKind_Demodulate:{ demodulate = 1; }break; 348 default:{}break; 349 } 350 } 351 352 if (demodulate) run_cuda_hilbert = 0; 353 354 cp->iq_pipeline = demodulate || run_cuda_hilbert; 355 356 f32 sampling_frequency = pb->parameters.sampling_frequency; 357 u32 decimation_rate = MAX(pb->parameters.decimation_rate, 1); 358 u32 sample_count = pb->parameters.sample_count; 359 if (demodulate) { 360 sample_count /= (2 * decimation_rate); 361 sampling_frequency /= 2 * (f32)decimation_rate; 362 } 363 364 cp->rf_size = sample_count * pb->parameters.channel_count * pb->parameters.acquisition_count; 365 if (cp->iq_pipeline) cp->rf_size *= 8; 366 else cp->rf_size *= 4; 367 368 u32 das_sample_stride = 1; 369 u32 das_transmit_stride = sample_count; 370 u32 das_channel_stride = sample_count * pb->parameters.acquisition_count; 371 372 f32 time_offset = pb->parameters.time_offset; 373 374 // TODO(rnp): subgroup size 375 u32 subgroup_size = gl_parameters.vendor_id == GLVendor_NVIDIA ? 32 : 64; 376 377 BeamformerDataKind data_kind = pb->pipeline.data_kind; 378 cp->pipeline.shader_count = 0; 379 for (u32 i = 0; i < pb->pipeline.shader_count; i++) { 380 BeamformerShaderParameters *sp = pb->pipeline.parameters + i; 381 u32 slot = cp->pipeline.shader_count; 382 u32 shader = pb->pipeline.shaders[i]; 383 b32 commit = 0; 384 385 BeamformerShaderDescriptor *ld = cp->shader_descriptors + slot - 1; 386 BeamformerShaderDescriptor *sd = cp->shader_descriptors + slot; 387 zero_struct(sd); 388 389 switch (shader) { 390 case BeamformerShaderKind_CudaHilbert:{ commit = run_cuda_hilbert; }break; 391 case BeamformerShaderKind_Decode:{ 392 /* TODO(rnp): rework decode first and demodulate after */ 393 b32 first = slot == 0; 394 395 sd->bake.data_kind = data_kind; 396 if (!first) { 397 if (data_kind == BeamformerDataKind_Int16) { 398 sd->bake.data_kind = BeamformerDataKind_Int16Complex; 399 } else { 400 sd->bake.data_kind = BeamformerDataKind_Float32Complex; 401 } 402 } 403 404 BeamformerShaderKind *last_shader = cp->pipeline.shaders + slot - 1; 405 assert(first || ((*last_shader == BeamformerShaderKind_Demodulate || 406 *last_shader == BeamformerShaderKind_Filter))); 407 408 BeamformerShaderDecodeBakeParameters *db = &sd->bake.Decode; 409 db->decode_mode = pb->parameters.decode_mode; 410 db->transmit_count = pb->parameters.acquisition_count; 411 412 u32 channel_stride = pb->parameters.acquisition_count * pb->parameters.sample_count; 413 db->input_sample_stride = first? 1 : ld->bake.Filter.output_sample_stride; 414 db->input_channel_stride = first? channel_stride : ld->bake.Filter.output_channel_stride; 415 db->input_transmit_stride = first? pb->parameters.sample_count : 1; 416 417 db->output_sample_stride = das_sample_stride; 418 db->output_channel_stride = das_channel_stride; 419 db->output_transmit_stride = das_transmit_stride; 420 if (first) { 421 db->output_channel_stride *= decimation_rate; 422 db->output_transmit_stride *= decimation_rate; 423 } 424 425 if (run_cuda_hilbert) sd->bake.flags |= BeamformerShaderDecodeFlags_DilateOutput; 426 427 if (db->decode_mode == BeamformerDecodeMode_None) { 428 sd->layout = (uv3){{subgroup_size, 1, 1}}; 429 430 sd->dispatch.x = (u32)ceil_f32((f32)sample_count / (f32)sd->layout.x); 431 sd->dispatch.y = (u32)ceil_f32((f32)pb->parameters.channel_count / (f32)sd->layout.y); 432 sd->dispatch.z = (u32)ceil_f32((f32)pb->parameters.acquisition_count / (f32)sd->layout.z); 433 } else if (db->transmit_count > 40) { 434 sd->bake.flags |= BeamformerShaderDecodeFlags_UseSharedMemory; 435 db->to_process = 2; 436 437 if (db->transmit_count == 48) 438 db->to_process = db->transmit_count / 16; 439 440 b32 use_16z = db->transmit_count == 48 || db->transmit_count == 80 || 441 db->transmit_count == 96 || db->transmit_count == 160; 442 sd->layout = (uv3){{4, 1, use_16z? 16 : 32}}; 443 444 sd->dispatch.x = (u32)ceil_f32((f32)sample_count / (f32)sd->layout.x); 445 sd->dispatch.y = (u32)ceil_f32((f32)pb->parameters.channel_count / (f32)sd->layout.y); 446 sd->dispatch.z = (u32)ceil_f32((f32)pb->parameters.acquisition_count / (f32)sd->layout.z / (f32)db->to_process); 447 } else { 448 db->to_process = 1; 449 450 /* NOTE(rnp): register caching. using more threads will cause the compiler to do 451 * contortions to avoid spilling registers. using less gives higher performance */ 452 sd->layout = (uv3){{subgroup_size / 2, 1, 1}}; 453 454 sd->dispatch.x = (u32)ceil_f32((f32)sample_count / (f32)sd->layout.x); 455 sd->dispatch.y = (u32)ceil_f32((f32)pb->parameters.channel_count / (f32)sd->layout.y); 456 sd->dispatch.z = 1; 457 } 458 459 if (first) sd->dispatch.x *= decimation_rate; 460 461 /* NOTE(rnp): decode 2 samples per dispatch when data is i16 */ 462 if (first && data_kind == BeamformerDataKind_Int16) 463 sd->dispatch.x = (u32)ceil_f32((f32)sd->dispatch.x / 2); 464 465 commit = first || db->decode_mode != BeamformerDecodeMode_None; 466 }break; 467 case BeamformerShaderKind_Demodulate: 468 case BeamformerShaderKind_Filter: 469 { 470 b32 first = slot == 0; 471 b32 demod = shader == BeamformerShaderKind_Demodulate; 472 BeamformerFilter *f = cp->filters + sp->filter_slot; 473 474 time_offset += f->time_delay; 475 476 BeamformerShaderFilterBakeParameters *fb = &sd->bake.Filter; 477 fb->filter_length = (u32)f->length; 478 if (demod) sd->bake.flags |= BeamformerShaderFilterFlags_Demodulate; 479 if (f->parameters.complex) sd->bake.flags |= BeamformerShaderFilterFlags_ComplexFilter; 480 481 sd->bake.data_kind = data_kind; 482 if (!first) sd->bake.data_kind = BeamformerDataKind_Float32; 483 484 /* NOTE(rnp): when we are demodulating we pretend that the sampler was alternating 485 * between sampling the I portion and the Q portion of an IQ signal. Therefore there 486 * is an implicit decimation factor of 2 which must always be included. All code here 487 * assumes that the signal was sampled in such a way that supports this operation. 488 * To recover IQ[n] from the sampled data (RF[n]) we do the following: 489 * I[n] = RF[n] 490 * Q[n] = RF[n + 1] 491 * IQ[n] = I[n] - j*Q[n] 492 */ 493 if (demod) { 494 fb->demodulation_frequency = pb->parameters.demodulation_frequency; 495 fb->sampling_frequency = pb->parameters.sampling_frequency / 2; 496 fb->decimation_rate = decimation_rate; 497 fb->sample_count = pb->parameters.sample_count; 498 499 fb->output_channel_stride = das_channel_stride; 500 fb->output_sample_stride = das_sample_stride; 501 fb->output_transmit_stride = das_transmit_stride; 502 503 if (first) { 504 fb->input_channel_stride = pb->parameters.sample_count * pb->parameters.acquisition_count / 2; 505 fb->input_sample_stride = 1; 506 fb->input_transmit_stride = pb->parameters.sample_count / 2; 507 508 if (pb->parameters.decode_mode == BeamformerDecodeMode_None) { 509 sd->bake.flags |= BeamformerShaderFilterFlags_OutputFloats; 510 } else { 511 /* NOTE(rnp): output optimized layout for decoding */ 512 fb->output_channel_stride = das_channel_stride; 513 fb->output_sample_stride = pb->parameters.acquisition_count; 514 fb->output_transmit_stride = 1; 515 } 516 } else { 517 assert(cp->pipeline.shaders[slot - 1] == BeamformerShaderKind_Decode); 518 fb->input_channel_stride = ld->bake.Decode.output_channel_stride; 519 fb->input_sample_stride = ld->bake.Decode.output_sample_stride; 520 fb->input_transmit_stride = ld->bake.Decode.output_transmit_stride; 521 } 522 } else { 523 fb->decimation_rate = 1; 524 fb->output_channel_stride = sample_count * pb->parameters.acquisition_count; 525 fb->output_sample_stride = 1; 526 fb->output_transmit_stride = sample_count; 527 fb->input_channel_stride = sample_count * pb->parameters.acquisition_count; 528 fb->input_sample_stride = 1; 529 fb->input_transmit_stride = sample_count; 530 fb->sample_count = sample_count; 531 } 532 533 /* TODO(rnp): filter may need a different dispatch layout */ 534 sd->layout = (uv3){{128, 1, 1}}; 535 sd->dispatch.x = (u32)ceil_f32((f32)sample_count / (f32)sd->layout.x); 536 sd->dispatch.y = (u32)ceil_f32((f32)pb->parameters.channel_count / (f32)sd->layout.y); 537 sd->dispatch.z = (u32)ceil_f32((f32)pb->parameters.acquisition_count / (f32)sd->layout.z); 538 539 commit = 1; 540 }break; 541 case BeamformerShaderKind_DAS:{ 542 sd->bake.data_kind = BeamformerDataKind_Float32; 543 if (cp->iq_pipeline) 544 sd->bake.data_kind = BeamformerDataKind_Float32Complex; 545 546 BeamformerShaderDASBakeParameters *db = &sd->bake.DAS; 547 BeamformerDASUBO *du = &cp->das_ubo_data; 548 du->xdc_element_pitch = pb->parameters.xdc_element_pitch; 549 db->sampling_frequency = sampling_frequency; 550 db->demodulation_frequency = pb->parameters.demodulation_frequency; 551 db->speed_of_sound = pb->parameters.speed_of_sound; 552 db->time_offset = time_offset; 553 db->f_number = pb->parameters.f_number; 554 db->acquisition_kind = pb->parameters.acquisition_kind; 555 db->sample_count = sample_count; 556 db->channel_count = pb->parameters.channel_count; 557 db->acquisition_count = pb->parameters.acquisition_count; 558 db->interpolation_mode = pb->parameters.interpolation_mode; 559 db->transmit_angle = pb->parameters.focal_vector.E[0]; 560 db->focus_depth = pb->parameters.focal_vector.E[1]; 561 db->transmit_receive_orientation = pb->parameters.transmit_receive_orientation; 562 563 // NOTE(rnp): old gcc will miscompile an assignment 564 mem_copy(du->voxel_transform.E, pb->parameters.das_voxel_transform.E, sizeof(du->voxel_transform)); 565 mem_copy(du->xdc_transform.E, pb->parameters.xdc_transform.E, sizeof(du->xdc_transform)); 566 567 u32 id = pb->parameters.acquisition_kind; 568 569 if (id == BeamformerAcquisitionKind_UFORCES || id == BeamformerAcquisitionKind_FORCES) 570 du->voxel_transform = m4_mul(du->xdc_transform, du->voxel_transform); 571 572 if (id == BeamformerAcquisitionKind_UFORCES || id == BeamformerAcquisitionKind_UHERCULES) 573 sd->bake.flags |= BeamformerShaderDASFlags_Sparse; 574 575 if (pb->parameters.single_focus) sd->bake.flags |= BeamformerShaderDASFlags_SingleFocus; 576 if (pb->parameters.single_orientation) sd->bake.flags |= BeamformerShaderDASFlags_SingleOrientation; 577 if (pb->parameters.coherency_weighting) sd->bake.flags |= BeamformerShaderDASFlags_CoherencyWeighting; 578 else sd->bake.flags |= BeamformerShaderDASFlags_Fast; 579 580 sd->layout = (uv3){{1, 1, 1}}; 581 582 b32 has_x = cp->output_points.x > 1; 583 b32 has_y = cp->output_points.y > 1; 584 b32 has_z = cp->output_points.z > 1; 585 586 u32 grid_3d_z_size = Max(1, subgroup_size / (4 * 4)); 587 u32 grid_2d_y_size = Max(1, subgroup_size / 8); 588 589 switch (iv3_dimension(cp->output_points)) { 590 591 case 1:{ 592 if (has_x) sd->layout.x = subgroup_size; 593 if (has_y) sd->layout.y = subgroup_size; 594 if (has_z) sd->layout.z = subgroup_size; 595 }break; 596 597 case 2:{ 598 if (has_x && has_y) {sd->layout.x = 8; sd->layout.y = grid_2d_y_size;} 599 if (has_x && has_z) {sd->layout.x = 8; sd->layout.z = grid_2d_y_size;} 600 if (has_y && has_z) {sd->layout.y = 8; sd->layout.z = grid_2d_y_size;} 601 }break; 602 603 case 3:{sd->layout = (uv3){{4, 4, grid_3d_z_size}};}break; 604 605 InvalidDefaultCase; 606 } 607 608 sd->dispatch.x = (u32)ceil_f32((f32)cp->output_points.x / sd->layout.x); 609 sd->dispatch.y = (u32)ceil_f32((f32)cp->output_points.y / sd->layout.y); 610 sd->dispatch.z = (u32)ceil_f32((f32)cp->output_points.z / sd->layout.z); 611 612 commit = 1; 613 }break; 614 default:{ commit = 1; }break; 615 } 616 617 if (commit) { 618 u32 index = cp->pipeline.shader_count++; 619 cp->pipeline.shaders[index] = shader; 620 cp->pipeline.parameters[index] = *sp; 621 } 622 } 623 cp->pipeline.data_kind = data_kind; 624 } 625 626 function void 627 stream_push_shader_header(Stream *s, BeamformerShaderKind shader_kind, s8 header) 628 { 629 stream_append_s8s(s, s8("#version 460 core\n\n"), header); 630 631 switch (shader_kind) { 632 case BeamformerShaderKind_DAS:{ 633 stream_append_s8(s, s8("" 634 "layout(location = " str(DAS_CYCLE_T_UNIFORM_LOC) ") uniform uint u_cycle_t;\n" 635 "layout(location = " str(DAS_FAST_CHANNEL_UNIFORM_LOC) ") uniform int u_channel;\n\n" 636 )); 637 }break; 638 case BeamformerShaderKind_Decode:{ 639 stream_append_s8s(s, s8("" 640 "layout(location = " str(DECODE_FIRST_PASS_UNIFORM_LOC) ") uniform bool u_first_pass;\n\n" 641 )); 642 }break; 643 case BeamformerShaderKind_MinMax:{ 644 stream_append_s8(s, s8("layout(location = " str(MIN_MAX_MIPS_LEVEL_UNIFORM_LOC) 645 ") uniform int u_mip_map;\n\n")); 646 }break; 647 case BeamformerShaderKind_Sum:{ 648 stream_append_s8(s, s8("layout(location = " str(SUM_PRESCALE_UNIFORM_LOC) 649 ") uniform float u_sum_prescale = 1.0;\n\n")); 650 }break; 651 default:{}break; 652 } 653 } 654 655 function void 656 load_compute_shader(BeamformerCtx *ctx, BeamformerComputePlan *cp, u32 shader_slot, Arena arena) 657 { 658 read_only local_persist s8 compute_headers[BeamformerShaderKind_ComputeCount] = { 659 /* X(name, type, gltype) */ 660 #define X(name, t, gltype) "\t" #gltype " " #name ";\n" 661 [BeamformerShaderKind_DAS] = s8_comp("layout(std140, binding = 0) uniform parameters {\n" 662 BEAMFORMER_DAS_UBO_PARAM_LIST 663 "};\n\n" 664 ), 665 #undef X 666 }; 667 668 BeamformerShaderKind shader = cp->pipeline.shaders[shader_slot]; 669 670 u32 program = 0; 671 i32 reloadable_index = beamformer_shader_reloadable_index_by_shader[shader]; 672 if (reloadable_index != -1) { 673 BeamformerShaderKind base_shader = beamformer_reloadable_shader_kinds[reloadable_index]; 674 s8 path; 675 if (!BakeShaders) 676 path = push_s8_from_parts(&arena, os_path_separator(), s8("shaders"), 677 beamformer_reloadable_shader_files[reloadable_index]); 678 679 Stream shader_stream = arena_stream(arena); 680 stream_push_shader_header(&shader_stream, base_shader, compute_headers[base_shader]); 681 682 i32 header_vector_length = beamformer_shader_header_vector_lengths[reloadable_index]; 683 i32 *header_vector = beamformer_shader_header_vectors[reloadable_index]; 684 for (i32 index = 0; index < header_vector_length; index++) 685 stream_append_s8(&shader_stream, beamformer_shader_global_header_strings[header_vector[index]]); 686 687 BeamformerShaderDescriptor *sd = cp->shader_descriptors + shader_slot; 688 689 if (sd->layout.x != 0) { 690 stream_append_s8(&shader_stream, s8("layout(local_size_x = ")); 691 stream_append_u64(&shader_stream, sd->layout.x); 692 stream_append_s8(&shader_stream, s8(", local_size_y = ")); 693 stream_append_u64(&shader_stream, sd->layout.y); 694 stream_append_s8(&shader_stream, s8(", local_size_z = ")); 695 stream_append_u64(&shader_stream, sd->layout.z); 696 stream_append_s8(&shader_stream, s8(") in;\n\n")); 697 } 698 699 u32 *parameters = (u32 *)&sd->bake; 700 s8 *names = beamformer_shader_bake_parameter_names[reloadable_index]; 701 u32 float_bits = beamformer_shader_bake_parameter_float_bits[reloadable_index]; 702 i32 count = beamformer_shader_bake_parameter_counts[reloadable_index]; 703 704 for (i32 index = 0; index < count; index++) { 705 stream_append_s8s(&shader_stream, s8("#define "), names[index], 706 (float_bits & (1 << index))? s8(" uintBitsToFloat") : s8(" "), s8("(0x")); 707 stream_append_hex_u64(&shader_stream, parameters[index]); 708 stream_append_s8(&shader_stream, s8(")\n")); 709 } 710 711 stream_append_s8(&shader_stream, s8("#define DataKind (0x")); 712 stream_append_hex_u64(&shader_stream, sd->bake.data_kind); 713 stream_append_s8(&shader_stream, s8(")\n\n")); 714 715 s8 *flag_names = beamformer_shader_flag_strings[reloadable_index]; 716 u32 flag_count = beamformer_shader_flag_strings_count[reloadable_index]; 717 u32 flags = sd->bake.flags; 718 for (u32 bit = 0; bit < flag_count; bit++) { 719 stream_append_s8s(&shader_stream, s8("#define "), flag_names[bit], 720 (flags & (1 << bit))? s8(" 1") : s8(" 0"), s8("\n")); 721 } 722 723 if (!renderdoc_attached()) 724 stream_append_s8(&shader_stream, s8("\n#line 1\n")); 725 726 s8 shader_text; 727 if (BakeShaders) { 728 stream_append_s8(&shader_stream, beamformer_shader_data[reloadable_index]); 729 shader_text = arena_stream_commit(&arena, &shader_stream); 730 } else { 731 shader_text = arena_stream_commit(&arena, &shader_stream); 732 i64 length = os_read_entire_file((c8 *)path.data, arena.beg, arena_capacity(&arena, u8)); 733 shader_text.len += length; 734 arena_commit(&arena, length); 735 } 736 737 /* TODO(rnp): instance name */ 738 s8 shader_name = beamformer_shader_names[shader]; 739 program = load_shader(arena, &shader_text, (u32 []){GL_COMPUTE_SHADER}, 1, shader_name); 740 } 741 742 glDeleteProgram(cp->programs[shader_slot]); 743 cp->programs[shader_slot] = program; 744 } 745 746 function void 747 beamformer_commit_parameter_block(BeamformerCtx *ctx, BeamformerComputePlan *cp, u32 block, Arena arena) 748 { 749 BeamformerParameterBlock *pb = beamformer_parameter_block_lock(ctx->shared_memory, block, -1); 750 for EachBit(pb->dirty_regions, region) { 751 switch (region) { 752 case BeamformerParameterBlockRegion_ComputePipeline: 753 case BeamformerParameterBlockRegion_Parameters: 754 { 755 cp->output_points = das_valid_points(pb->parameters.output_points.xyz); 756 cp->average_frames = pb->parameters.output_points.E[3]; 757 758 plan_compute_pipeline(cp, pb); 759 760 /* NOTE(rnp): these are both handled by plan_compute_pipeline() */ 761 u32 mask = 1 << BeamformerParameterBlockRegion_ComputePipeline | 762 1 << BeamformerParameterBlockRegion_Parameters; 763 pb->dirty_regions &= ~mask; 764 765 for (u32 shader_slot = 0; shader_slot < cp->pipeline.shader_count; shader_slot++) { 766 u128 hash = u128_hash_from_data(cp->shader_descriptors + shader_slot, sizeof(BeamformerShaderDescriptor)); 767 if (!u128_equal(hash, cp->shader_hashes[shader_slot])) 768 cp->dirty_programs |= 1 << shader_slot; 769 cp->shader_hashes[shader_slot] = hash; 770 } 771 772 #define X(k, t, v) glNamedBufferSubData(cp->ubos[BeamformerComputeUBOKind_##k], \ 773 0, sizeof(t), &cp->v ## _ubo_data); 774 BEAMFORMER_COMPUTE_UBO_LIST 775 #undef X 776 777 cp->acquisition_count = pb->parameters.acquisition_count; 778 cp->acquisition_kind = pb->parameters.acquisition_kind; 779 780 u32 decoded_data_size = cp->rf_size; 781 if (ctx->compute_context.ping_pong_ssbo_size < decoded_data_size) 782 alloc_shader_storage(ctx, decoded_data_size, arena); 783 784 if (cp->hadamard_order != (i32)cp->acquisition_count) 785 update_hadamard_texture(cp, (i32)cp->acquisition_count, arena); 786 787 mem_copy(cp->voxel_transform.E, pb->parameters.das_voxel_transform.E, sizeof(cp->voxel_transform)); 788 789 GLenum gl_kind = cp->iq_pipeline ? GL_RG32F : GL_R32F; 790 if (cp->average_frames > 1 && !beamformer_frame_compatible(ctx->averaged_frames + 0, cp->output_points, gl_kind)) { 791 alloc_beamform_frame(ctx->averaged_frames + 0, cp->output_points, gl_kind, s8("Averaged Frame"), arena); 792 alloc_beamform_frame(ctx->averaged_frames + 1, cp->output_points, gl_kind, s8("Averaged Frame"), arena); 793 } 794 }break; 795 case BeamformerParameterBlockRegion_ChannelMapping:{ 796 cuda_set_channel_mapping(pb->channel_mapping); 797 }break; 798 case BeamformerParameterBlockRegion_FocalVectors: 799 case BeamformerParameterBlockRegion_SparseElements: 800 case BeamformerParameterBlockRegion_TransmitReceiveOrientations: 801 { 802 BeamformerComputeTextureKind texture_kind = 0; 803 u32 pixel_type = 0, texture_format = 0; 804 switch (region) { 805 #define X(kind, _gl, tf, pt, ...) \ 806 case BeamformerParameterBlockRegion_## kind:{ \ 807 texture_kind = BeamformerComputeTextureKind_## kind; \ 808 texture_format = tf; \ 809 pixel_type = pt; \ 810 }break; 811 BEAMFORMER_COMPUTE_TEXTURE_LIST 812 #undef X 813 InvalidDefaultCase; 814 } 815 glTextureSubImage1D(cp->textures[texture_kind], 0, 0, BeamformerMaxChannelCount, 816 texture_format, pixel_type, 817 (u8 *)pb + BeamformerParameterBlockRegionOffsets[region]); 818 }break; 819 } 820 } 821 beamformer_parameter_block_unlock(ctx->shared_memory, block); 822 } 823 824 function void 825 do_compute_shader(BeamformerCtx *ctx, BeamformerComputePlan *cp, BeamformerFrame *frame, 826 BeamformerShaderKind shader, u32 shader_slot, BeamformerShaderParameters *sp, Arena arena) 827 { 828 BeamformerComputeContext *cc = &ctx->compute_context; 829 830 u32 program = cp->programs[shader_slot]; 831 glUseProgram(program); 832 833 u32 output_ssbo_idx = !cc->last_output_ssbo_index; 834 u32 input_ssbo_idx = cc->last_output_ssbo_index; 835 836 uv3 dispatch = cp->shader_descriptors[shader_slot].dispatch; 837 switch (shader) { 838 case BeamformerShaderKind_Decode:{ 839 glBindImageTexture(0, cp->textures[BeamformerComputeTextureKind_Hadamard], 0, 0, 0, GL_READ_ONLY, GL_R32F); 840 841 BeamformerDecodeMode mode = cp->shader_descriptors[shader_slot].bake.Decode.decode_mode; 842 if (shader_slot == 0) { 843 if (mode != BeamformerDecodeMode_None) { 844 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, cc->ping_pong_ssbos[input_ssbo_idx]); 845 glProgramUniform1ui(program, DECODE_FIRST_PASS_UNIFORM_LOC, 1); 846 847 glDispatchCompute(dispatch.x, dispatch.y, dispatch.z); 848 glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); 849 } 850 } 851 852 if (mode != BeamformerDecodeMode_None) 853 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx]); 854 855 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, cc->ping_pong_ssbos[output_ssbo_idx]); 856 857 glProgramUniform1ui(program, DECODE_FIRST_PASS_UNIFORM_LOC, 0); 858 859 glDispatchCompute(dispatch.x, dispatch.y, dispatch.z); 860 glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); 861 862 cc->last_output_ssbo_index = !cc->last_output_ssbo_index; 863 }break; 864 case BeamformerShaderKind_CudaDecode:{ 865 cuda_decode(0, output_ssbo_idx, 0); 866 cc->last_output_ssbo_index = !cc->last_output_ssbo_index; 867 }break; 868 case BeamformerShaderKind_CudaHilbert:{ 869 cuda_hilbert(input_ssbo_idx, output_ssbo_idx); 870 cc->last_output_ssbo_index = !cc->last_output_ssbo_index; 871 }break; 872 case BeamformerShaderKind_Filter: 873 case BeamformerShaderKind_Demodulate: 874 { 875 if (shader_slot != 0) 876 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx]); 877 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 2, cc->ping_pong_ssbos[output_ssbo_idx]); 878 glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, cp->filters[sp->filter_slot].ssbo); 879 880 glDispatchCompute(dispatch.x, dispatch.y, dispatch.z); 881 glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT); 882 883 cc->last_output_ssbo_index = !cc->last_output_ssbo_index; 884 }break; 885 case BeamformerShaderKind_MinMax:{ 886 for (i32 i = 1; i < frame->mips; i++) { 887 glBindImageTexture(0, frame->texture, i - 1, GL_TRUE, 0, GL_READ_ONLY, GL_RG32F); 888 glBindImageTexture(1, frame->texture, i - 0, GL_TRUE, 0, GL_WRITE_ONLY, GL_RG32F); 889 glProgramUniform1i(program, MIN_MAX_MIPS_LEVEL_UNIFORM_LOC, i); 890 891 u32 width = (u32)frame->dim.x >> i; 892 u32 height = (u32)frame->dim.y >> i; 893 u32 depth = (u32)frame->dim.z >> i; 894 glDispatchCompute(ORONE(width / 32), ORONE(height), ORONE(depth / 32)); 895 glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); 896 } 897 }break; 898 case BeamformerShaderKind_DAS:{ 899 local_persist u32 das_cycle_t = 0; 900 901 BeamformerShaderBakeParameters *bp = &cp->shader_descriptors[shader_slot].bake; 902 b32 fast = (bp->flags & BeamformerShaderDASFlags_Fast) != 0; 903 b32 sparse = (bp->flags & BeamformerShaderDASFlags_Sparse) != 0; 904 905 if (fast) { 906 glClearTexImage(frame->texture, 0, GL_RED, GL_FLOAT, 0); 907 glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT); 908 glBindImageTexture(0, frame->texture, 0, GL_TRUE, 0, GL_READ_WRITE, cp->iq_pipeline ? GL_RG32F : GL_R32F); 909 } else { 910 glBindImageTexture(0, frame->texture, 0, GL_TRUE, 0, GL_WRITE_ONLY, cp->iq_pipeline ? GL_RG32F : GL_R32F); 911 } 912 913 u32 sparse_texture = cp->textures[BeamformerComputeTextureKind_SparseElements]; 914 if (!sparse) sparse_texture = 0; 915 916 glBindBufferBase(GL_UNIFORM_BUFFER, 0, cp->ubos[BeamformerComputeUBOKind_DAS]); 917 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, cc->ping_pong_ssbos[input_ssbo_idx], 0, cp->rf_size); 918 glBindImageTexture(1, sparse_texture, 0, 0, 0, GL_READ_ONLY, GL_R16I); 919 glBindImageTexture(2, cp->textures[BeamformerComputeTextureKind_FocalVectors], 0, 0, 0, GL_READ_ONLY, GL_RG32F); 920 glBindImageTexture(3, cp->textures[BeamformerComputeTextureKind_TransmitReceiveOrientations], 0, 0, 0, GL_READ_ONLY, GL_R8I); 921 922 glProgramUniform1ui(program, DAS_CYCLE_T_UNIFORM_LOC, das_cycle_t++); 923 924 if (fast) { 925 i32 loop_end; 926 if (bp->DAS.acquisition_kind == BeamformerAcquisitionKind_RCA_VLS || 927 bp->DAS.acquisition_kind == BeamformerAcquisitionKind_RCA_TPW) 928 { 929 /* NOTE(rnp): to avoid repeatedly sampling the whole focal vectors 930 * texture we loop over transmits for VLS/TPW */ 931 loop_end = (i32)bp->DAS.acquisition_count; 932 } else { 933 loop_end = (i32)bp->DAS.channel_count; 934 } 935 f32 percent_per_step = 1.0f / (f32)loop_end; 936 cc->processing_progress = -percent_per_step; 937 for (i32 index = 0; index < loop_end; index++) { 938 cc->processing_progress += percent_per_step; 939 /* IMPORTANT(rnp): prevents OS from coalescing and killing our shader */ 940 glFinish(); 941 glProgramUniform1i(program, DAS_FAST_CHANNEL_UNIFORM_LOC, index); 942 glDispatchCompute(dispatch.x, dispatch.y, dispatch.z); 943 glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); 944 } 945 } else { 946 glDispatchCompute(dispatch.x, dispatch.y, dispatch.z); 947 } 948 glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT|GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); 949 }break; 950 case BeamformerShaderKind_Sum:{ 951 u32 aframe_index = ctx->averaged_frame_index % ARRAY_COUNT(ctx->averaged_frames); 952 BeamformerFrame *aframe = ctx->averaged_frames + aframe_index; 953 aframe->id = ctx->averaged_frame_index; 954 atomic_store_u32(&aframe->ready_to_present, 0); 955 /* TODO(rnp): hack we need a better way of specifying which frames to sum; 956 * this is fine for rolling averaging but what if we want to do something else */ 957 assert(frame >= ctx->beamform_frames); 958 assert(frame < ctx->beamform_frames + countof(ctx->beamform_frames)); 959 u32 base_index = (u32)(frame - ctx->beamform_frames); 960 u32 to_average = (u32)cp->average_frames; 961 u32 frame_count = 0; 962 u32 *in_textures = push_array(&arena, u32, BeamformerMaxSavedFrames); 963 ComputeFrameIterator cfi = compute_frame_iterator(ctx, 1 + base_index - to_average, to_average); 964 for (BeamformerFrame *it = frame_next(&cfi); it; it = frame_next(&cfi)) 965 in_textures[frame_count++] = it->texture; 966 967 assert(to_average == frame_count); 968 969 glProgramUniform1f(program, SUM_PRESCALE_UNIFORM_LOC, 1 / (f32)frame_count); 970 do_sum_shader(cc, in_textures, frame_count, aframe->texture, aframe->dim); 971 mem_copy(aframe->voxel_transform.E, frame->voxel_transform.E, sizeof(frame->voxel_transform)); 972 aframe->compound_count = frame->compound_count; 973 aframe->acquisition_kind = frame->acquisition_kind; 974 }break; 975 InvalidDefaultCase; 976 } 977 } 978 979 function s8 980 shader_text_with_header(s8 header, s8 filepath, b32 has_file, BeamformerShaderKind shader_kind, Arena *arena) 981 { 982 Stream sb = arena_stream(*arena); 983 stream_push_shader_header(&sb, shader_kind, header); 984 stream_append_s8(&sb, s8("\n#line 1\n")); 985 986 s8 result; 987 if (BakeShaders) { 988 /* TODO(rnp): better handling of shaders with no backing file */ 989 if (has_file) { 990 i32 reloadable_index = beamformer_shader_reloadable_index_by_shader[shader_kind]; 991 stream_append_s8(&sb, beamformer_shader_data[reloadable_index]); 992 } 993 result = arena_stream_commit(arena, &sb); 994 } else { 995 result = arena_stream_commit(arena, &sb); 996 if (has_file) { 997 i64 length = os_read_entire_file((c8 *)filepath.data, arena->beg, arena_capacity(arena, u8)); 998 result.len += length; 999 arena_commit(arena, length); 1000 } 1001 } 1002 1003 return result; 1004 } 1005 1006 /* NOTE(rnp): currently this function is only handling rendering shaders. 1007 * look at load_compute_shader for compute shaders */ 1008 function void 1009 beamformer_reload_shader(BeamformerCtx *ctx, BeamformerShaderReloadContext *src, Arena arena, s8 shader_name) 1010 { 1011 BeamformerShaderKind kind = beamformer_reloadable_shader_kinds[src->reloadable_info_index]; 1012 assert(kind == BeamformerShaderKind_Render3D); 1013 1014 s8 path = push_s8_from_parts(&arena, os_path_separator(), s8("shaders"), 1015 beamformer_reloadable_shader_files[src->reloadable_info_index]); 1016 1017 i32 shader_count = 1; 1018 BeamformerShaderReloadContext *link = src->link; 1019 while (link != src) { shader_count++; link = link->link; } 1020 1021 s8 *shader_texts = push_array(&arena, s8, shader_count); 1022 u32 *shader_types = push_array(&arena, u32, shader_count); 1023 1024 i32 index = 0; 1025 do { 1026 b32 has_file = link->reloadable_info_index >= 0; 1027 shader_texts[index] = shader_text_with_header(link->header, path, has_file, kind, &arena); 1028 shader_types[index] = link->gl_type; 1029 index++; 1030 link = link->link; 1031 } while (link != src); 1032 1033 u32 *shader = &ctx->frame_view_render_context.shader; 1034 glDeleteProgram(*shader); 1035 *shader = load_shader(arena, shader_texts, shader_types, shader_count, shader_name); 1036 ctx->frame_view_render_context.updated = 1; 1037 } 1038 1039 function void 1040 complete_queue(BeamformerCtx *ctx, BeamformWorkQueue *q, Arena *arena, iptr gl_context) 1041 { 1042 BeamformerComputeContext * cs = &ctx->compute_context; 1043 BeamformerSharedMemory * sm = ctx->shared_memory; 1044 1045 BeamformWork *work = beamform_work_queue_pop(q); 1046 while (work) { 1047 b32 can_commit = 1; 1048 switch (work->kind) { 1049 case BeamformerWorkKind_ExportBuffer:{ 1050 /* TODO(rnp): better way of handling DispatchCompute barrier */ 1051 post_sync_barrier(ctx->shared_memory, BeamformerSharedMemoryLockKind_DispatchCompute); 1052 beamformer_shared_memory_take_lock(ctx->shared_memory, (i32)work->lock, (u32)-1); 1053 BeamformerExportContext *ec = &work->export_context; 1054 switch (ec->kind) { 1055 case BeamformerExportKind_BeamformedData:{ 1056 BeamformerFrame *frame = ctx->latest_frame; 1057 if (frame) { 1058 assert(frame->ready_to_present); 1059 u32 texture = frame->texture; 1060 iv3 dim = frame->dim; 1061 u32 out_size = (u32)dim.x * (u32)dim.y * (u32)dim.z * 2 * sizeof(f32); 1062 if (out_size <= ec->size) { 1063 glGetTextureImage(texture, 0, GL_RG, GL_FLOAT, (i32)out_size, 1064 beamformer_shared_memory_scratch_arena(sm).beg); 1065 } 1066 } 1067 }break; 1068 case BeamformerExportKind_Stats:{ 1069 ComputeTimingTable *table = ctx->compute_timing_table; 1070 /* NOTE(rnp): do a little spin to let this finish updating */ 1071 spin_wait(table->write_index != atomic_load_u32(&table->read_index)); 1072 ComputeShaderStats *stats = ctx->compute_shader_stats; 1073 if (sizeof(stats->table) <= ec->size) 1074 mem_copy(beamformer_shared_memory_scratch_arena(sm).beg, &stats->table, sizeof(stats->table)); 1075 }break; 1076 InvalidDefaultCase; 1077 } 1078 beamformer_shared_memory_release_lock(ctx->shared_memory, work->lock); 1079 post_sync_barrier(ctx->shared_memory, BeamformerSharedMemoryLockKind_ExportSync); 1080 }break; 1081 case BeamformerWorkKind_CreateFilter:{ 1082 /* TODO(rnp): this should probably get deleted and moved to lazy loading */ 1083 BeamformerCreateFilterContext *fctx = &work->create_filter_context; 1084 u32 block = fctx->parameter_block; 1085 u32 slot = fctx->filter_slot; 1086 BeamformerComputePlan *cp = beamformer_compute_plan_for_block(cs, block, arena); 1087 beamformer_filter_update(cp->filters + slot, fctx->parameters, block, slot, *arena); 1088 }break; 1089 case BeamformerWorkKind_ComputeIndirect:{ 1090 fill_frame_compute_work(ctx, work, work->compute_indirect_context.view_plane, 1091 work->compute_indirect_context.parameter_block, 1); 1092 } /* FALLTHROUGH */ 1093 case BeamformerWorkKind_Compute:{ 1094 DEBUG_DECL(glClearNamedBufferData(cs->ping_pong_ssbos[0], GL_RG32F, GL_RG, GL_FLOAT, 0);) 1095 DEBUG_DECL(glClearNamedBufferData(cs->ping_pong_ssbos[1], GL_RG32F, GL_RG, GL_FLOAT, 0);) 1096 DEBUG_DECL(glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT);) 1097 1098 push_compute_timing_info(ctx->compute_timing_table, 1099 (ComputeTimingInfo){.kind = ComputeTimingInfoKind_ComputeFrameBegin}); 1100 1101 BeamformerComputePlan *cp = beamformer_compute_plan_for_block(cs, work->compute_context.parameter_block, arena); 1102 if (beamformer_parameter_block_dirty(sm, work->compute_context.parameter_block)) { 1103 u32 block = work->compute_context.parameter_block; 1104 beamformer_commit_parameter_block(ctx, cp, block, *arena); 1105 atomic_store_u32(&ctx->ui_dirty_parameter_blocks, (u32)(ctx->beamform_work_queue != q) << block); 1106 } 1107 1108 post_sync_barrier(ctx->shared_memory, work->lock); 1109 1110 u32 dirty_programs = atomic_swap_u32(&cp->dirty_programs, 0); 1111 static_assert(ISPOWEROF2(BeamformerMaxComputeShaderStages), 1112 "max compute shader stages must be power of 2"); 1113 assert((dirty_programs & ~((u32)BeamformerMaxComputeShaderStages - 1)) == 0); 1114 for EachBit(dirty_programs, slot) 1115 load_compute_shader(ctx, cp, (u32)slot, *arena); 1116 1117 atomic_store_u32(&cs->processing_compute, 1); 1118 start_renderdoc_capture(gl_context); 1119 1120 BeamformerFrame *frame = work->compute_context.frame; 1121 1122 GLenum gl_kind = cp->iq_pipeline ? GL_RG32F : GL_R32F; 1123 if (!beamformer_frame_compatible(frame, cp->output_points, gl_kind)) 1124 alloc_beamform_frame(frame, cp->output_points, gl_kind, s8("Beamformed_Data"), *arena); 1125 1126 mem_copy(frame->voxel_transform.E, cp->voxel_transform.E, sizeof(cp->voxel_transform)); 1127 frame->acquisition_kind = cp->acquisition_kind; 1128 frame->compound_count = cp->acquisition_count; 1129 1130 BeamformerComputeContext *cc = &ctx->compute_context; 1131 BeamformerComputePipeline *pipeline = &cp->pipeline; 1132 /* NOTE(rnp): first stage requires access to raw data buffer directly so we break 1133 * it out into a separate step. This way data can get released as soon as possible */ 1134 if (pipeline->shader_count > 0) { 1135 BeamformerRFBuffer *rf = &cs->rf_buffer; 1136 u32 slot = rf->compute_index % countof(rf->compute_syncs); 1137 1138 if (work->kind == BeamformerWorkKind_ComputeIndirect) { 1139 /* NOTE(rnp): compute indirect is used when uploading data. if compute thread 1140 * preempts upload it must wait for the fence to exist. then it must tell the 1141 * GPU to wait for upload to complete before it can start compute */ 1142 spin_wait(!atomic_load_u64(rf->upload_syncs + slot)); 1143 1144 glWaitSync(rf->upload_syncs[slot], 0, GL_TIMEOUT_IGNORED); 1145 glDeleteSync(rf->upload_syncs[slot]); 1146 rf->compute_index++; 1147 } else { 1148 slot = (rf->compute_index - 1) % countof(rf->compute_syncs); 1149 } 1150 1151 glBindBufferRange(GL_SHADER_STORAGE_BUFFER, 1, rf->ssbo, slot * rf->active_rf_size, rf->active_rf_size); 1152 1153 glBeginQuery(GL_TIME_ELAPSED, cc->shader_timer_ids[0]); 1154 do_compute_shader(ctx, cp, frame, pipeline->shaders[0], 0, pipeline->parameters + 0, *arena); 1155 glEndQuery(GL_TIME_ELAPSED); 1156 1157 if (work->kind == BeamformerWorkKind_ComputeIndirect) { 1158 atomic_store_u64(rf->compute_syncs + slot, glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); 1159 atomic_store_u64(rf->upload_syncs + slot, 0); 1160 } 1161 } 1162 1163 b32 did_sum_shader = 0; 1164 for (u32 i = 1; i < pipeline->shader_count; i++) { 1165 did_sum_shader |= pipeline->shaders[i] == BeamformerShaderKind_Sum; 1166 glBeginQuery(GL_TIME_ELAPSED, cc->shader_timer_ids[i]); 1167 do_compute_shader(ctx, cp, frame, pipeline->shaders[i], i, pipeline->parameters + i, *arena); 1168 glEndQuery(GL_TIME_ELAPSED); 1169 } 1170 1171 /* NOTE(rnp): the first of these blocks until work completes */ 1172 for (u32 i = 0; i < pipeline->shader_count; i++) { 1173 ComputeTimingInfo info = {0}; 1174 info.kind = ComputeTimingInfoKind_Shader; 1175 info.shader = pipeline->shaders[i]; 1176 glGetQueryObjectui64v(cc->shader_timer_ids[i], GL_QUERY_RESULT, &info.timer_count); 1177 push_compute_timing_info(ctx->compute_timing_table, info); 1178 } 1179 cs->processing_progress = 1; 1180 1181 frame->ready_to_present = 1; 1182 if (did_sum_shader) { 1183 u32 aframe_index = ((ctx->averaged_frame_index++) % countof(ctx->averaged_frames)); 1184 ctx->averaged_frames[aframe_index].view_plane_tag = frame->view_plane_tag; 1185 ctx->averaged_frames[aframe_index].ready_to_present = 1; 1186 atomic_store_u64((u64 *)&ctx->latest_frame, (u64)(ctx->averaged_frames + aframe_index)); 1187 } else { 1188 atomic_store_u64((u64 *)&ctx->latest_frame, (u64)frame); 1189 } 1190 cs->processing_compute = 0; 1191 1192 push_compute_timing_info(ctx->compute_timing_table, 1193 (ComputeTimingInfo){.kind = ComputeTimingInfoKind_ComputeFrameEnd}); 1194 1195 end_renderdoc_capture(gl_context); 1196 }break; 1197 InvalidDefaultCase; 1198 } 1199 1200 if (can_commit) { 1201 beamform_work_queue_pop_commit(q); 1202 work = beamform_work_queue_pop(q); 1203 } 1204 } 1205 } 1206 1207 function void 1208 coalesce_timing_table(ComputeTimingTable *t, ComputeShaderStats *stats) 1209 { 1210 /* TODO(rnp): we do not currently do anything to handle the potential for a half written 1211 * info item. this could result in garbage entries but they shouldn't really matter */ 1212 1213 u32 target = atomic_load_u32(&t->write_index); 1214 u32 stats_index = (stats->latest_frame_index + 1) % countof(stats->table.times); 1215 1216 static_assert(BeamformerShaderKind_Count + 1 <= 32, "timing coalescence bitfield test"); 1217 u32 seen_info_test = 0; 1218 1219 while (t->read_index != target) { 1220 ComputeTimingInfo info = t->buffer[t->read_index % countof(t->buffer)]; 1221 switch (info.kind) { 1222 case ComputeTimingInfoKind_ComputeFrameBegin:{ 1223 assert(t->compute_frame_active == 0); 1224 t->compute_frame_active = 1; 1225 /* NOTE(rnp): allow multiple instances of same shader to accumulate */ 1226 mem_clear(stats->table.times[stats_index], 0, sizeof(stats->table.times[stats_index])); 1227 }break; 1228 case ComputeTimingInfoKind_ComputeFrameEnd:{ 1229 assert(t->compute_frame_active == 1); 1230 t->compute_frame_active = 0; 1231 stats->latest_frame_index = stats_index; 1232 stats_index = (stats_index + 1) % countof(stats->table.times); 1233 }break; 1234 case ComputeTimingInfoKind_Shader:{ 1235 stats->table.times[stats_index][info.shader] += (f32)info.timer_count / 1.0e9f; 1236 seen_info_test |= (1u << info.shader); 1237 }break; 1238 case ComputeTimingInfoKind_RF_Data:{ 1239 stats->latest_rf_index = (stats->latest_rf_index + 1) % countof(stats->table.rf_time_deltas); 1240 f32 delta = (f32)(info.timer_count - stats->last_rf_timer_count) / 1.0e9f; 1241 stats->table.rf_time_deltas[stats->latest_rf_index] = delta; 1242 stats->last_rf_timer_count = info.timer_count; 1243 seen_info_test |= (1 << BeamformerShaderKind_Count); 1244 }break; 1245 } 1246 /* NOTE(rnp): do this at the end so that stats table is always in a consistent state */ 1247 atomic_add_u32(&t->read_index, 1); 1248 } 1249 1250 if (seen_info_test) { 1251 for EachEnumValue(BeamformerShaderKind, shader) { 1252 if (seen_info_test & (1 << shader)) { 1253 f32 sum = 0; 1254 for EachElement(stats->table.times, i) 1255 sum += stats->table.times[i][shader]; 1256 stats->average_times[shader] = sum / countof(stats->table.times); 1257 } 1258 } 1259 1260 if (seen_info_test & (1 << BeamformerShaderKind_Count)) { 1261 f32 sum = 0; 1262 for EachElement(stats->table.rf_time_deltas, i) 1263 sum += stats->table.rf_time_deltas[i]; 1264 stats->rf_time_delta_average = sum / countof(stats->table.rf_time_deltas); 1265 } 1266 } 1267 } 1268 1269 DEBUG_EXPORT BEAMFORMER_COMPLETE_COMPUTE_FN(beamformer_complete_compute) 1270 { 1271 BeamformerCtx *ctx = (BeamformerCtx *)user_context; 1272 BeamformerSharedMemory *sm = ctx->shared_memory; 1273 complete_queue(ctx, &sm->external_work_queue, arena, gl_context); 1274 complete_queue(ctx, ctx->beamform_work_queue, arena, gl_context); 1275 } 1276 1277 function void 1278 beamformer_rf_buffer_allocate(BeamformerRFBuffer *rf, u32 rf_size, b32 nvidia) 1279 { 1280 assert((rf_size % 64) == 0); 1281 if (!nvidia) glUnmapNamedBuffer(rf->ssbo); 1282 glDeleteBuffers(1, &rf->ssbo); 1283 glCreateBuffers(1, &rf->ssbo); 1284 1285 u32 buffer_flags = GL_DYNAMIC_STORAGE_BIT; 1286 if (!nvidia) buffer_flags |= GL_MAP_PERSISTENT_BIT|GL_MAP_WRITE_BIT; 1287 1288 glNamedBufferStorage(rf->ssbo, countof(rf->compute_syncs) * rf_size, 0, buffer_flags); 1289 1290 if (!nvidia) { 1291 u32 access = GL_MAP_PERSISTENT_BIT|GL_MAP_WRITE_BIT|GL_MAP_FLUSH_EXPLICIT_BIT|GL_MAP_UNSYNCHRONIZED_BIT; 1292 rf->buffer = glMapNamedBufferRange(rf->ssbo, 0, (GLsizei)(countof(rf->compute_syncs) * rf_size), access); 1293 } 1294 1295 LABEL_GL_OBJECT(GL_BUFFER, rf->ssbo, s8("Raw_RF_SSBO")); 1296 rf->size = rf_size; 1297 } 1298 1299 DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload) 1300 { 1301 BeamformerSharedMemory *sm = ctx->shared_memory; 1302 BeamformerSharedMemoryLockKind scratch_lock = BeamformerSharedMemoryLockKind_ScratchSpace; 1303 BeamformerSharedMemoryLockKind upload_lock = BeamformerSharedMemoryLockKind_UploadRF; 1304 1305 u64 rf_block_rf_size; 1306 if (atomic_load_u32(sm->locks + upload_lock) && 1307 (rf_block_rf_size = atomic_swap_u64(&sm->rf_block_rf_size, 0))) 1308 { 1309 beamformer_shared_memory_take_lock(ctx->shared_memory, (i32)scratch_lock, (u32)-1); 1310 1311 BeamformerRFBuffer *rf = ctx->rf_buffer; 1312 BeamformerParameterBlock *b = beamformer_parameter_block(sm, (u32)(rf_block_rf_size >> 32ULL)); 1313 BeamformerParameters *bp = &b->parameters; 1314 BeamformerDataKind data_kind = b->pipeline.data_kind; 1315 1316 b32 nvidia = gl_parameters.vendor_id == GLVendor_NVIDIA; 1317 1318 rf->active_rf_size = (u32)round_up_to(rf_block_rf_size & 0xFFFFFFFFULL, 64); 1319 if unlikely(rf->size < rf->active_rf_size) 1320 beamformer_rf_buffer_allocate(rf, rf->active_rf_size, nvidia); 1321 1322 u32 slot = rf->insertion_index++ % countof(rf->compute_syncs); 1323 1324 /* NOTE(rnp): if the rest of the code is functioning then the first 1325 * time the compute thread processes an upload it must have gone 1326 * through this path. therefore it is safe to spin until it gets processed */ 1327 spin_wait(atomic_load_u64(rf->upload_syncs + slot)); 1328 1329 if (atomic_load_u64(rf->compute_syncs + slot)) { 1330 GLenum sync_result = glClientWaitSync(rf->compute_syncs[slot], 0, 1000000000); 1331 if (sync_result == GL_TIMEOUT_EXPIRED || sync_result == GL_WAIT_FAILED) { 1332 // TODO(rnp): what do? 1333 } 1334 glDeleteSync(rf->compute_syncs[slot]); 1335 } 1336 1337 u32 size = bp->channel_count * bp->acquisition_count * bp->sample_count * beamformer_data_kind_byte_size[data_kind]; 1338 u8 *data = beamformer_shared_memory_scratch_arena(sm).beg; 1339 1340 if (nvidia) glNamedBufferSubData(rf->ssbo, slot * rf->active_rf_size, (i32)size, data); 1341 else memory_copy_non_temporal(rf->buffer + slot * rf->active_rf_size, data, size); 1342 store_fence(); 1343 1344 beamformer_shared_memory_release_lock(ctx->shared_memory, (i32)scratch_lock); 1345 post_sync_barrier(ctx->shared_memory, upload_lock); 1346 1347 if (!nvidia) 1348 glFlushMappedNamedBufferRange(rf->ssbo, slot * rf->active_rf_size, (i32)rf->active_rf_size); 1349 1350 atomic_store_u64(rf->upload_syncs + slot, glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); 1351 atomic_store_u64(rf->compute_syncs + slot, 0); 1352 1353 os_wake_all_waiters(ctx->compute_worker_sync); 1354 1355 ComputeTimingInfo info = {.kind = ComputeTimingInfoKind_RF_Data}; 1356 glGetQueryObjectui64v(rf->data_timestamp_query, GL_QUERY_RESULT, &info.timer_count); 1357 glQueryCounter(rf->data_timestamp_query, GL_TIMESTAMP); 1358 push_compute_timing_info(ctx->compute_timing_table, info); 1359 } 1360 } 1361 1362 function void 1363 beamformer_queue_compute(BeamformerCtx *ctx, BeamformerFrame *frame, u32 parameter_block) 1364 { 1365 BeamformerSharedMemory *sm = ctx->shared_memory; 1366 BeamformerSharedMemoryLockKind dispatch_lock = BeamformerSharedMemoryLockKind_DispatchCompute; 1367 if (!sm->live_imaging_parameters.active && beamformer_shared_memory_take_lock(sm, (i32)dispatch_lock, 0)) 1368 { 1369 BeamformWork *work = beamform_work_queue_push(ctx->beamform_work_queue); 1370 BeamformerViewPlaneTag tag = frame ? frame->view_plane_tag : 0; 1371 if (fill_frame_compute_work(ctx, work, tag, parameter_block, 0)) 1372 beamform_work_queue_push_commit(ctx->beamform_work_queue); 1373 } 1374 os_wake_all_waiters(&ctx->compute_worker.sync_variable); 1375 } 1376 1377 #include "ui.c" 1378 1379 function void 1380 beamformer_process_input_events(BeamformerCtx *ctx, BeamformerInput *input, 1381 BeamformerInputEvent *events, u32 event_count) 1382 { 1383 for (u32 index = 0; index < event_count; index++) { 1384 BeamformerInputEvent *event = events + index; 1385 switch (event->kind) { 1386 1387 case BeamformerInputEventKind_ExecutableReload:{ 1388 ui_init(ctx, ctx->ui_backing_store); 1389 1390 #if BEAMFORMER_RENDERDOC_HOOKS 1391 start_frame_capture = input->renderdoc_start_frame_capture; 1392 end_frame_capture = input->renderdoc_end_frame_capture; 1393 #endif 1394 }break; 1395 1396 case BeamformerInputEventKind_FileEvent:{ 1397 BeamformerFileReloadContext *frc = event->file_watch_user_context; 1398 switch (frc->kind) { 1399 case BeamformerFileReloadKind_Shader:{ 1400 BeamformerShaderReloadContext *src = frc->shader_reload_context; 1401 BeamformerShaderKind kind = beamformer_reloadable_shader_kinds[src->reloadable_info_index]; 1402 beamformer_reload_shader(ctx, src, ctx->arena, beamformer_shader_names[kind]); 1403 }break; 1404 case BeamformerFileReloadKind_ComputeShader:{ 1405 for EachElement(ctx->compute_context.compute_plans, block) { 1406 BeamformerComputePlan *cp = ctx->compute_context.compute_plans[block]; 1407 for (u32 slot = 0; cp && slot < cp->pipeline.shader_count; slot++) { 1408 i32 shader_index = beamformer_shader_reloadable_index_by_shader[cp->pipeline.shaders[slot]]; 1409 if (beamformer_reloadable_shader_kinds[shader_index] == frc->compute_shader_kind) 1410 atomic_or_u32(&cp->dirty_programs, 1 << slot); 1411 } 1412 } 1413 1414 if (ctx->latest_frame) 1415 beamformer_queue_compute(ctx, ctx->latest_frame, ctx->latest_frame->parameter_block); 1416 }break; 1417 InvalidDefaultCase; 1418 } 1419 }break; 1420 1421 InvalidDefaultCase; 1422 } 1423 } 1424 } 1425 1426 BEAMFORMER_EXPORT void 1427 beamformer_frame_step(BeamformerInput *input) 1428 { 1429 BeamformerCtx *ctx = BeamformerContextMemory(input->memory); 1430 1431 u64 current_time = os_timer_count(); 1432 dt_for_frame = (f64)(current_time - ctx->frame_timestamp) / os_system_info()->timer_frequency; 1433 ctx->frame_timestamp = current_time; 1434 1435 if (IsWindowResized()) { 1436 ctx->window_size.h = GetScreenHeight(); 1437 ctx->window_size.w = GetScreenWidth(); 1438 } 1439 1440 coalesce_timing_table(ctx->compute_timing_table, ctx->compute_shader_stats); 1441 1442 beamformer_process_input_events(ctx, input, input->event_queue, input->event_count); 1443 1444 BeamformerSharedMemory *sm = ctx->shared_memory; 1445 if (atomic_load_u32(sm->locks + BeamformerSharedMemoryLockKind_UploadRF)) 1446 os_wake_all_waiters(&ctx->upload_worker.sync_variable); 1447 if (atomic_load_u32(sm->locks + BeamformerSharedMemoryLockKind_DispatchCompute)) 1448 os_wake_all_waiters(&ctx->compute_worker.sync_variable); 1449 1450 BeamformerFrame *frame = ctx->latest_frame; 1451 BeamformerViewPlaneTag tag = frame? frame->view_plane_tag : 0; 1452 draw_ui(ctx, input, frame, tag); 1453 1454 ctx->frame_view_render_context.updated = 0; 1455 }