static.c (21805B)
1 /* See LICENSE for license details. */ 2 #ifndef _DEBUG 3 4 #include "beamformer.c" 5 #define debug_init(...) 6 7 #else 8 9 global void *debug_lib; 10 11 #define DEBUG_ENTRY_POINTS \ 12 X(beamformer_debug_ui_deinit) \ 13 X(beamformer_complete_compute) \ 14 X(beamformer_frame_step) \ 15 X(beamformer_reload_shader) \ 16 X(beamformer_rf_upload) 17 18 #define X(name) global name ##_fn *name; 19 DEBUG_ENTRY_POINTS 20 #undef X 21 22 function FILE_WATCH_CALLBACK_FN(debug_reload) 23 { 24 BeamformerInput *input = (BeamformerInput *)user_data; 25 Stream err = arena_stream(arena); 26 27 /* NOTE(rnp): spin until compute thread finishes its work (we will probably 28 * never reload while compute is in progress but just incase). */ 29 spin_wait(!atomic_load_u32(&os->compute_worker.asleep)); 30 spin_wait(!atomic_load_u32(&os->upload_worker.asleep)); 31 32 os_unload_library(debug_lib); 33 debug_lib = os_load_library(OS_DEBUG_LIB_NAME, OS_DEBUG_LIB_TEMP_NAME, &err); 34 35 #define X(name) name = os_lookup_dynamic_symbol(debug_lib, #name, &err); 36 DEBUG_ENTRY_POINTS 37 #undef X 38 39 stream_append_s8(&err, s8("Reloaded Main Executable\n")); 40 os_write_file(os->error_handle, stream_to_s8(&err)); 41 42 input->executable_reloaded = 1; 43 44 return 1; 45 } 46 47 function void 48 debug_init(OS *os, iptr input, Arena *arena) 49 { 50 os_add_file_watch(os, arena, s8(OS_DEBUG_LIB_NAME), debug_reload, input); 51 debug_reload(os, s8(""), input, *arena); 52 53 Stream err = arena_stream(*arena); 54 void *rdoc = os_get_module(OS_RENDERDOC_SONAME, 0); 55 if (rdoc) { 56 renderdoc_get_api_fn *get_api = os_lookup_dynamic_symbol(rdoc, "RENDERDOC_GetAPI", &err); 57 if (get_api) { 58 RenderDocAPI *api = 0; 59 if (get_api(10600, (void **)&api)) { 60 os->start_frame_capture = RENDERDOC_START_FRAME_CAPTURE(api); 61 os->end_frame_capture = RENDERDOC_END_FRAME_CAPTURE(api); 62 stream_append_s8(&err, s8("loaded: " OS_RENDERDOC_SONAME "\n")); 63 } 64 } 65 } 66 67 os_write_file(os->error_handle, stream_to_s8(&err)); 68 } 69 70 #endif /* _DEBUG */ 71 72 #define static_path_join(a, b) (a OS_PATH_SEPARATOR b) 73 74 struct gl_debug_ctx { 75 Stream stream; 76 iptr os_error_handle; 77 }; 78 79 function void 80 gl_debug_logger(u32 src, u32 type, u32 id, u32 lvl, i32 len, const char *msg, const void *userctx) 81 { 82 struct gl_debug_ctx *ctx = (struct gl_debug_ctx *)userctx; 83 Stream *e = &ctx->stream; 84 stream_append_s8s(e, s8("[OpenGL] "), (s8){.len = len, .data = (u8 *)msg}, s8("\n")); 85 os_write_file(ctx->os_error_handle, stream_to_s8(e)); 86 stream_reset(e, 0); 87 } 88 89 function void 90 get_gl_params(GLParams *gl, Stream *err) 91 { 92 char *vendor = (char *)glGetString(GL_VENDOR); 93 if (!vendor) { 94 stream_append_s8(err, s8("Failed to determine GL Vendor\n")); 95 os_fatal(stream_to_s8(err)); 96 } 97 /* TODO(rnp): str prefix of */ 98 switch (vendor[0]) { 99 case 'A': gl->vendor_id = GL_VENDOR_AMD; break; 100 case 'I': gl->vendor_id = GL_VENDOR_INTEL; break; 101 case 'N': gl->vendor_id = GL_VENDOR_NVIDIA; break; 102 /* NOTE(rnp): freedreno */ 103 case 'f': gl->vendor_id = GL_VENDOR_ARM; break; 104 /* NOTE(rnp): Microsoft Corporation - weird win32 thing (microsoft is just using mesa for the driver) */ 105 case 'M': gl->vendor_id = GL_VENDOR_ARM; break; 106 default: 107 stream_append_s8s(err, s8("Unknown GL Vendor: "), c_str_to_s8(vendor), s8("\n")); 108 os_fatal(stream_to_s8(err)); 109 } 110 111 #define X(glname, name, suffix) glGetIntegerv(GL_##glname, &gl->name); 112 GL_PARAMETERS 113 #undef X 114 } 115 116 function void 117 validate_gl_requirements(GLParams *gl, Arena a) 118 { 119 Stream s = arena_stream(a); 120 121 if (gl->max_ubo_size < (i32)sizeof(BeamformerParameters)) { 122 stream_append_s8(&s, s8("GPU must support UBOs of at least ")); 123 stream_append_i64(&s, sizeof(BeamformerParameters)); 124 stream_append_s8(&s, s8(" bytes!\n")); 125 } 126 127 #define X(name, ret, params) if (!name) stream_append_s8s(&s, s8("missing required GL function:"), s8(#name), s8("\n")); 128 OGLProcedureList 129 #undef X 130 131 if (s.widx) os_fatal(stream_to_s8(&s)); 132 } 133 134 function void 135 dump_gl_params(GLParams *gl, Arena a, OS *os) 136 { 137 #ifdef _DEBUG 138 s8 vendor = s8("vendor:"); 139 i32 max_width = (i32)vendor.len; 140 #define X(glname, name, suffix) if (s8(#name).len > max_width) max_width = (i32)s8(#name ":").len; 141 GL_PARAMETERS 142 #undef X 143 max_width++; 144 145 Stream s = arena_stream(a); 146 stream_append_s8s(&s, s8("---- GL Parameters ----\n"), vendor); 147 stream_pad(&s, ' ', max_width - (i32)vendor.len); 148 switch (gl->vendor_id) { 149 case GL_VENDOR_AMD: stream_append_s8(&s, s8("AMD\n")); break; 150 case GL_VENDOR_ARM: stream_append_s8(&s, s8("ARM\n")); break; 151 case GL_VENDOR_INTEL: stream_append_s8(&s, s8("Intel\n")); break; 152 case GL_VENDOR_NVIDIA: stream_append_s8(&s, s8("nVidia\n")); break; 153 } 154 155 #define X(glname, name, suffix) \ 156 stream_append_s8(&s, s8(#name ":")); \ 157 stream_pad(&s, ' ', max_width - (i32)s8(#name ":").len); \ 158 stream_append_i64(&s, gl->name); \ 159 stream_append_s8(&s, s8(suffix)); \ 160 stream_append_byte(&s, '\n'); 161 GL_PARAMETERS 162 #undef X 163 stream_append_s8(&s, s8("-----------------------\n")); 164 os_write_file(os->error_handle, stream_to_s8(&s)); 165 #endif 166 } 167 168 function FILE_WATCH_CALLBACK_FN(reload_shader) 169 { 170 ShaderReloadContext *ctx = (typeof(ctx))user_data; 171 return beamformer_reload_shader(os, ctx->beamformer_context, ctx, arena, ctx->name); 172 } 173 174 function FILE_WATCH_CALLBACK_FN(reload_shader_indirect) 175 { 176 ShaderReloadContext *src = (typeof(src))user_data; 177 BeamformerCtx *ctx = src->beamformer_context; 178 BeamformWork *work = beamform_work_queue_push(ctx->beamform_work_queue); 179 if (work) { 180 work->kind = BeamformerWorkKind_ReloadShader, 181 work->shader_reload_context = src; 182 beamform_work_queue_push_commit(ctx->beamform_work_queue); 183 os_wake_waiters(&os->compute_worker.sync_variable); 184 } 185 return 1; 186 } 187 188 function FILE_WATCH_CALLBACK_FN(load_cuda_lib) 189 { 190 CudaLib *cl = (CudaLib *)user_data; 191 b32 result = os_file_exists((c8 *)path.data); 192 if (result) { 193 Stream err = arena_stream(arena); 194 195 stream_append_s8(&err, s8("loading CUDA lib: " OS_CUDA_LIB_NAME "\n")); 196 os_unload_library(cl->lib); 197 cl->lib = os_load_library((c8 *)path.data, OS_CUDA_LIB_TEMP_NAME, &err); 198 #define X(name, symname) cl->name = os_lookup_dynamic_symbol(cl->lib, symname, &err); 199 CUDA_LIB_FNS 200 #undef X 201 202 os_write_file(os->error_handle, stream_to_s8(&err)); 203 } 204 205 #define X(name, symname) if (!cl->name) cl->name = cuda_ ## name ## _stub; 206 CUDA_LIB_FNS 207 #undef X 208 209 return result; 210 } 211 212 function BeamformerRenderModel 213 render_model_from_arrays(f32 *vertices, f32 *normals, i32 vertices_size, u16 *indices, i32 index_count) 214 { 215 BeamformerRenderModel result = {0}; 216 217 i32 buffer_size = vertices_size * 2 + index_count * (i32)sizeof(u16); 218 i32 indices_offset = vertices_size * 2; 219 i32 indices_size = index_count * (i32)sizeof(u16); 220 221 result.elements = index_count; 222 result.elements_offset = indices_offset; 223 224 glCreateBuffers(1, &result.buffer); 225 glNamedBufferStorage(result.buffer, buffer_size, 0, GL_DYNAMIC_STORAGE_BIT); 226 glNamedBufferSubData(result.buffer, 0, vertices_size, vertices); 227 glNamedBufferSubData(result.buffer, vertices_size, vertices_size, normals); 228 glNamedBufferSubData(result.buffer, indices_offset, indices_size, indices); 229 230 glCreateVertexArrays(1, &result.vao); 231 glVertexArrayVertexBuffer(result.vao, 0, result.buffer, 0, 3 * sizeof(f32)); 232 glVertexArrayVertexBuffer(result.vao, 1, result.buffer, vertices_size, 3 * sizeof(f32)); 233 glVertexArrayElementBuffer(result.vao, result.buffer); 234 235 glEnableVertexArrayAttrib(result.vao, 0); 236 glEnableVertexArrayAttrib(result.vao, 1); 237 238 glVertexArrayAttribFormat(result.vao, 0, 3, GL_FLOAT, 0, 0); 239 glVertexArrayAttribFormat(result.vao, 1, 3, GL_FLOAT, 0, (u32)vertices_size); 240 241 glVertexArrayAttribBinding(result.vao, 0, 0); 242 glVertexArrayAttribBinding(result.vao, 1, 0); 243 244 return result; 245 } 246 247 #define GLFW_VISIBLE 0x00020004 248 void glfwWindowHint(i32, i32); 249 iptr glfwCreateWindow(i32, i32, char *, iptr, iptr); 250 void glfwMakeContextCurrent(iptr); 251 252 function void 253 worker_thread_sleep(GLWorkerThreadContext *ctx) 254 { 255 for (;;) { 256 i32 expected = 0; 257 if (atomic_cas_u32(&ctx->sync_variable, &expected, 1)) 258 break; 259 260 atomic_store_u32(&ctx->asleep, 1); 261 os_wait_on_value(&ctx->sync_variable, 1, (u32)-1); 262 atomic_store_u32(&ctx->asleep, 0); 263 } 264 } 265 266 function OS_THREAD_ENTRY_POINT_FN(compute_worker_thread_entry_point) 267 { 268 GLWorkerThreadContext *ctx = (GLWorkerThreadContext *)_ctx; 269 270 glfwMakeContextCurrent(ctx->window_handle); 271 ctx->gl_context = os_get_native_gl_context(ctx->window_handle); 272 273 BeamformerCtx *beamformer = (BeamformerCtx *)ctx->user_context; 274 glCreateQueries(GL_TIME_ELAPSED, countof(beamformer->compute_context.shader_timer_ids), 275 beamformer->compute_context.shader_timer_ids); 276 277 for (;;) { 278 worker_thread_sleep(ctx); 279 asan_poison_region(ctx->arena.beg, ctx->arena.end - ctx->arena.beg); 280 beamformer_complete_compute(ctx->user_context, &ctx->arena, ctx->gl_context); 281 } 282 283 unreachable(); 284 285 return 0; 286 } 287 288 function OS_THREAD_ENTRY_POINT_FN(upload_worker_thread_entry_point) 289 { 290 GLWorkerThreadContext *ctx = (GLWorkerThreadContext *)_ctx; 291 glfwMakeContextCurrent(ctx->window_handle); 292 ctx->gl_context = os_get_native_gl_context(ctx->window_handle); 293 294 BeamformerUploadThreadContext *up = (typeof(up))ctx->user_context; 295 glCreateQueries(GL_TIMESTAMP, 1, &up->rf_buffer->data_timestamp_query); 296 /* NOTE(rnp): start this here so we don't have to worry about it being started or not */ 297 glQueryCounter(up->rf_buffer->data_timestamp_query, GL_TIMESTAMP); 298 299 for (;;) { 300 worker_thread_sleep(ctx); 301 asan_poison_region(ctx->arena.beg, ctx->arena.end - ctx->arena.beg); 302 beamformer_rf_upload(up, ctx->arena); 303 } 304 305 unreachable(); 306 307 return 0; 308 } 309 310 function void 311 setup_beamformer(Arena *memory, BeamformerCtx **o_ctx, BeamformerInput **o_input) 312 { 313 Arena compute_arena = sub_arena(memory, MB(2), KB(4)); 314 Arena upload_arena = sub_arena(memory, KB(64), KB(4)); 315 Stream error = stream_alloc(memory, MB(1)); 316 Arena ui_arena = sub_arena(memory, MB(2), KB(4)); 317 318 BeamformerCtx *ctx = *o_ctx = push_struct(memory, typeof(*ctx)); 319 BeamformerInput *input = *o_input = push_struct(memory, typeof(*input)); 320 321 ctx->window_size = (iv2){{1280, 840}}; 322 ctx->error_stream = error; 323 ctx->ui_backing_store = ui_arena; 324 input->executable_reloaded = 1; 325 326 os_init(&ctx->os, memory); 327 ctx->os.compute_worker.arena = compute_arena; 328 ctx->os.compute_worker.asleep = 1; 329 ctx->os.upload_worker.arena = upload_arena; 330 ctx->os.upload_worker.asleep = 1; 331 332 debug_init(&ctx->os, (iptr)input, memory); 333 334 SetConfigFlags(FLAG_VSYNC_HINT|FLAG_WINDOW_ALWAYS_RUN); 335 InitWindow(ctx->window_size.w, ctx->window_size.h, "OGL Beamformer"); 336 /* NOTE: do this after initing so that the window starts out floating in tiling wm */ 337 SetWindowState(FLAG_WINDOW_RESIZABLE); 338 SetWindowMinSize(840, ctx->window_size.h); 339 340 glfwWindowHint(GLFW_VISIBLE, 0); 341 iptr raylib_window_handle = (iptr)GetPlatformWindowHandle(); 342 343 #define X(name, ret, params) name = (name##_fn *)os_gl_proc_address(#name); 344 OGLProcedureList 345 #undef X 346 /* NOTE: Gather information about the GPU */ 347 get_gl_params(&ctx->gl, &ctx->error_stream); 348 dump_gl_params(&ctx->gl, *memory, &ctx->os); 349 validate_gl_requirements(&ctx->gl, *memory); 350 351 ctx->beamform_work_queue = push_struct(memory, BeamformWorkQueue); 352 ctx->compute_shader_stats = push_struct(memory, ComputeShaderStats); 353 ctx->compute_timing_table = push_struct(memory, ComputeTimingTable); 354 355 /* TODO(rnp): I'm not sure if its a good idea to pre-reserve a bunch of semaphores 356 * on w32 but thats what we are doing for now */ 357 u32 lock_count = BeamformerSharedMemoryLockKind_Count + BeamformerMaxParameterBlockSlots; 358 ctx->shared_memory = os_create_shared_memory_area(memory, OS_SHARED_MEMORY_NAME, lock_count, 359 BEAMFORMER_SHARED_MEMORY_SIZE); 360 BeamformerSharedMemory *sm = ctx->shared_memory.region; 361 if (!sm) os_fatal(s8("Get more ram lol\n")); 362 mem_clear(sm, 0, sizeof(*sm)); 363 364 sm->version = BEAMFORMER_SHARED_MEMORY_VERSION; 365 sm->reserved_parameter_blocks = 1; 366 367 BeamformerComputeContext *cs = &ctx->compute_context; 368 369 GLWorkerThreadContext *worker = &ctx->os.compute_worker; 370 /* TODO(rnp): we should lock this down after we have something working */ 371 worker->user_context = (iptr)ctx; 372 worker->window_handle = glfwCreateWindow(1, 1, "", 0, raylib_window_handle); 373 worker->handle = os_create_thread(*memory, (iptr)worker, s8("[compute]"), 374 compute_worker_thread_entry_point); 375 376 GLWorkerThreadContext *upload = &ctx->os.upload_worker; 377 BeamformerUploadThreadContext *upctx = push_struct(memory, typeof(*upctx)); 378 upload->user_context = (iptr)upctx; 379 upctx->rf_buffer = &cs->rf_buffer; 380 upctx->shared_memory = &ctx->shared_memory; 381 upctx->compute_timing_table = ctx->compute_timing_table; 382 upctx->compute_worker_sync = &ctx->os.compute_worker.sync_variable; 383 upload->window_handle = glfwCreateWindow(1, 1, "", 0, raylib_window_handle); 384 upload->handle = os_create_thread(*memory, (iptr)upload, s8("[upload]"), 385 upload_worker_thread_entry_point); 386 387 glfwMakeContextCurrent(raylib_window_handle); 388 389 if (ctx->gl.vendor_id == GL_VENDOR_NVIDIA 390 && load_cuda_lib(&ctx->os, s8(OS_CUDA_LIB_NAME), (iptr)&cs->cuda_lib, *memory)) 391 { 392 os_add_file_watch(&ctx->os, memory, s8(OS_CUDA_LIB_NAME), load_cuda_lib, (iptr)&cs->cuda_lib); 393 } else { 394 #define X(name, symname) if (!cs->cuda_lib.name) cs->cuda_lib.name = cuda_ ## name ## _stub; 395 CUDA_LIB_FNS 396 #undef X 397 } 398 399 /* NOTE: set up OpenGL debug logging */ 400 struct gl_debug_ctx *gl_debug_ctx = push_struct(memory, typeof(*gl_debug_ctx)); 401 gl_debug_ctx->stream = stream_alloc(memory, 1024); 402 gl_debug_ctx->os_error_handle = ctx->os.error_handle; 403 glDebugMessageCallback(gl_debug_logger, gl_debug_ctx); 404 #ifdef _DEBUG 405 glEnable(GL_DEBUG_OUTPUT); 406 #endif 407 408 read_only local_persist s8 compute_headers[BeamformerShaderKind_ComputeCount] = { 409 #define X(name, type, size, __e, gltype, glsize, comment) "\t" #gltype " " #name #glsize "; " comment "\n" 410 [BeamformerShaderKind_DAS] = s8_comp("layout(std140, binding = 0) uniform parameters {\n" 411 BEAMFORMER_PARAMS_HEAD 412 BEAMFORMER_UI_PARAMS 413 BEAMFORMER_PARAMS_TAIL 414 "};\n\n" 415 ), 416 #undef X 417 /* X(name, type, gltype) */ 418 #define X(name, t, gltype) "\t" #gltype " " #name ";\n" 419 [BeamformerShaderKind_Decode] = s8_comp("layout(std140, binding = 0) uniform parameters {\n" 420 BEAMFORMER_DECODE_UBO_PARAM_LIST 421 "};\n\n" 422 ), 423 [BeamformerShaderKind_Demodulate] = s8_comp("layout(std140, binding = 0) uniform parameters {\n" 424 BEAMFORMER_DEMOD_UBO_PARAM_LIST 425 "};\n\n" 426 ), 427 #undef X 428 }; 429 430 #define X(e, sn, f, pretty_name) do if (s8(f).len > 0) { \ 431 ShaderReloadContext *src = push_struct(memory, typeof(*src)); \ 432 src->beamformer_context = ctx; \ 433 src->header = compute_headers[BeamformerShaderKind_##e]; \ 434 src->path = s8(static_path_join("shaders", f ".glsl")); \ 435 src->name = src->path; \ 436 src->shader = cs->programs + BeamformerShaderKind_##e; \ 437 src->gl_type = GL_COMPUTE_SHADER; \ 438 src->kind = BeamformerShaderKind_##e; \ 439 src->link = src; \ 440 os_add_file_watch(&ctx->os, memory, src->path, reload_shader_indirect, (iptr)src); \ 441 reload_shader_indirect(&ctx->os, src->path, (iptr)src, *memory); \ 442 } while (0); 443 COMPUTE_SHADERS_INTERNAL 444 #undef X 445 os_wake_waiters(&worker->sync_variable); 446 447 FrameViewRenderContext *fvr = &ctx->frame_view_render_context; 448 glCreateFramebuffers(countof(fvr->framebuffers), fvr->framebuffers); 449 LABEL_GL_OBJECT(GL_FRAMEBUFFER, fvr->framebuffers[0], s8("Frame View Framebuffer")); 450 LABEL_GL_OBJECT(GL_FRAMEBUFFER, fvr->framebuffers[1], s8("Frame View Resolving Framebuffer")); 451 452 glCreateRenderbuffers(countof(fvr->renderbuffers), fvr->renderbuffers); 453 i32 msaa_samples = ctx->gl.vendor_id == GL_VENDOR_ARM? 4 : 8; 454 glNamedRenderbufferStorageMultisample(fvr->renderbuffers[0], msaa_samples, GL_RGBA8, 455 FRAME_VIEW_RENDER_TARGET_SIZE); 456 glNamedRenderbufferStorageMultisample(fvr->renderbuffers[1], msaa_samples, GL_DEPTH_COMPONENT24, 457 FRAME_VIEW_RENDER_TARGET_SIZE); 458 459 ShaderReloadContext *render_3d = push_struct(memory, typeof(*render_3d)); 460 render_3d->beamformer_context = ctx; 461 render_3d->path = s8(static_path_join("shaders", "render_3d.frag.glsl")); 462 render_3d->name = s8("shaders/render_3d.glsl"); 463 render_3d->gl_type = GL_FRAGMENT_SHADER; 464 render_3d->kind = BeamformerShaderKind_Render3D; 465 render_3d->shader = &fvr->shader; 466 render_3d->header = s8("" 467 "layout(location = 0) in vec3 normal;\n" 468 "layout(location = 1) in vec3 texture_coordinate;\n\n" 469 "layout(location = 2) in vec3 test_texture_coordinate;\n\n" 470 "layout(location = 0) out vec4 out_colour;\n\n" 471 "layout(location = " str(FRAME_VIEW_DYNAMIC_RANGE_LOC) ") uniform float u_db_cutoff = 60;\n" 472 "layout(location = " str(FRAME_VIEW_THRESHOLD_LOC) ") uniform float u_threshold = 40;\n" 473 "layout(location = " str(FRAME_VIEW_GAMMA_LOC) ") uniform float u_gamma = 1;\n" 474 "layout(location = " str(FRAME_VIEW_LOG_SCALE_LOC) ") uniform bool u_log_scale;\n" 475 "layout(location = " str(FRAME_VIEW_BB_COLOUR_LOC) ") uniform vec4 u_bb_colour = vec4(" str(FRAME_VIEW_BB_COLOUR) ");\n" 476 "layout(location = " str(FRAME_VIEW_BB_FRACTION_LOC) ") uniform float u_bb_fraction = " str(FRAME_VIEW_BB_FRACTION) ";\n" 477 "layout(location = " str(FRAME_VIEW_SOLID_BB_LOC) ") uniform bool u_solid_bb;\n" 478 "\n" 479 "layout(binding = 0) uniform sampler3D u_texture;\n"); 480 481 render_3d->link = push_struct(memory, typeof(*render_3d)); 482 render_3d->link->gl_type = GL_VERTEX_SHADER; 483 render_3d->link->link = render_3d; 484 render_3d->link->header = s8("" 485 "layout(location = 0) in vec3 v_position;\n" 486 "layout(location = 1) in vec3 v_normal;\n" 487 "\n" 488 "layout(location = 0) out vec3 f_normal;\n" 489 "layout(location = 1) out vec3 f_texture_coordinate;\n" 490 "layout(location = 2) out vec3 f_orig_texture_coordinate;\n" 491 "\n" 492 "layout(location = " str(FRAME_VIEW_MODEL_MATRIX_LOC) ") uniform mat4 u_model;\n" 493 "layout(location = " str(FRAME_VIEW_VIEW_MATRIX_LOC) ") uniform mat4 u_view;\n" 494 "layout(location = " str(FRAME_VIEW_PROJ_MATRIX_LOC) ") uniform mat4 u_projection;\n" 495 "\n" 496 "\n" 497 "void main()\n" 498 "{\n" 499 "\tvec3 pos = v_position;\n" 500 "\tf_orig_texture_coordinate = (2 * v_position + 1) / 2;\n" 501 //"\tif (v_position.y == -1) pos.x = clamp(v_position.x, -u_clip_fraction, u_clip_fraction);\n" 502 "\tvec3 tex_coord = (2 * pos + 1) / 2;\n" 503 "\tf_texture_coordinate = tex_coord.xzy;\n" 504 //"\tf_texture_coordinate = u_swizzle? tex_coord.xzy : tex_coord;\n" 505 //"\tf_normal = normalize(mat3(u_model) * v_normal);\n" 506 "\tf_normal = v_normal;\n" 507 "\tgl_Position = u_projection * u_view * u_model * vec4(pos, 1);\n" 508 "}\n"); 509 reload_shader(&ctx->os, render_3d->path, (iptr)render_3d, *memory); 510 os_add_file_watch(&ctx->os, memory, render_3d->path, reload_shader, (iptr)render_3d); 511 512 f32 unit_cube_vertices[] = { 513 0.5f, 0.5f, -0.5f, 514 0.5f, 0.5f, -0.5f, 515 0.5f, 0.5f, -0.5f, 516 0.5f, -0.5f, -0.5f, 517 0.5f, -0.5f, -0.5f, 518 0.5f, -0.5f, -0.5f, 519 0.5f, 0.5f, 0.5f, 520 0.5f, 0.5f, 0.5f, 521 0.5f, 0.5f, 0.5f, 522 0.5f, -0.5f, 0.5f, 523 0.5f, -0.5f, 0.5f, 524 0.5f, -0.5f, 0.5f, 525 -0.5f, 0.5f, -0.5f, 526 -0.5f, 0.5f, -0.5f, 527 -0.5f, 0.5f, -0.5f, 528 -0.5f, -0.5f, -0.5f, 529 -0.5f, -0.5f, -0.5f, 530 -0.5f, -0.5f, -0.5f, 531 -0.5f, 0.5f, 0.5f, 532 -0.5f, 0.5f, 0.5f, 533 -0.5f, 0.5f, 0.5f, 534 -0.5f, -0.5f, 0.5f, 535 -0.5f, -0.5f, 0.5f, 536 -0.5f, -0.5f, 0.5f 537 }; 538 f32 unit_cube_normals[] = { 539 0.0f, 0.0f, -1.0f, 540 0.0f, 1.0f, 0.0f, 541 1.0f, 0.0f, 0.0f, 542 0.0f, 0.0f, -1.0f, 543 0.0f, -1.0f, 0.0f, 544 1.0f, 0.0f, 0.0f, 545 0.0f, 0.0f, 1.0f, 546 0.0f, 1.0f, 0.0f, 547 1.0f, 0.0f, 0.0f, 548 0.0f, 0.0f, 1.0f, 549 0.0f, -1.0f, 0.0f, 550 1.0f, 0.0f, 0.0f, 551 0.0f, 0.0f, -1.0f, 552 0.0f, 1.0f, 0.0f, 553 -1.0f, 0.0f, 0.0f, 554 0.0f, 0.0f, -1.0f, 555 0.0f, -1.0f, 0.0f, 556 -1.0f, 0.0f, 0.0f, 557 0.0f, 0.0f, 1.0f, 558 0.0f, 1.0f, 0.0f, 559 -1.0f, 0.0f, 0.0f, 560 0.0f, 0.0f, 1.0f, 561 0.0f, -1.0f, 0.0f, 562 -1.0f, 0.0f, 0.0f 563 }; 564 u16 unit_cube_indices[] = { 565 1, 13, 19, 566 1, 19, 7, 567 9, 6, 18, 568 9, 18, 21, 569 23, 20, 14, 570 23, 14, 17, 571 16, 4, 10, 572 16, 10, 22, 573 5, 2, 8, 574 5, 8, 11, 575 15, 12, 0, 576 15, 0, 3 577 }; 578 579 cs->unit_cube_model = render_model_from_arrays(unit_cube_vertices, unit_cube_normals, 580 sizeof(unit_cube_vertices), 581 unit_cube_indices, countof(unit_cube_indices)); 582 583 /* stfu gcc this is used */ 584 DEBUG_DECL((void)BeamformerParameterBlockRegionOffsets;) 585 } 586 587 function void 588 beamformer_invalidate_shared_memory(BeamformerCtx *ctx) 589 { 590 /* NOTE(rnp): work around pebkac when the beamformer is closed while we are doing live 591 * imaging. if the verasonics is blocked in an external function (calling the library 592 * to start compute) it is impossible for us to get it to properly shut down which 593 * will sometimes result in us needing to power cycle the system. set the shared memory 594 * into an error state and release dispatch lock so that future calls will error instead 595 * of blocking. 596 */ 597 BeamformerSharedMemory *sm = ctx->shared_memory.region; 598 BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_DispatchCompute; 599 atomic_store_u32(&sm->invalid, 1); 600 atomic_store_u32(&sm->external_work_queue.ridx, sm->external_work_queue.widx); 601 DEBUG_DECL(if (sm->locks[lock])) { 602 os_shared_memory_region_unlock(&ctx->shared_memory, sm->locks, (i32)lock); 603 } 604 605 atomic_or_u32(&sm->live_imaging_dirty_flags, BeamformerLiveImagingDirtyFlags_StopImaging); 606 }