static.c (19004B)
1 /* See LICENSE for license details. */ 2 #ifndef _DEBUG 3 4 #include "beamformer.c" 5 #define debug_init(...) 6 7 #else 8 9 global void *debug_lib; 10 11 #define DEBUG_ENTRY_POINTS \ 12 X(beamformer_frame_step) \ 13 X(beamformer_complete_compute) \ 14 X(beamformer_compute_setup) \ 15 X(beamformer_reload_shader) \ 16 X(beamform_work_queue_push) \ 17 X(beamform_work_queue_push_commit) 18 19 #define X(name) global name ##_fn *name; 20 DEBUG_ENTRY_POINTS 21 #undef X 22 23 function FILE_WATCH_CALLBACK_FN(debug_reload) 24 { 25 BeamformerInput *input = (BeamformerInput *)user_data; 26 Stream err = arena_stream(arena); 27 28 /* NOTE(rnp): spin until compute thread finishes its work (we will probably 29 * never reload while compute is in progress but just incase). */ 30 while (!atomic_load_u32(&os->compute_worker.asleep)); 31 32 os_unload_library(debug_lib); 33 debug_lib = os_load_library(OS_DEBUG_LIB_NAME, OS_DEBUG_LIB_TEMP_NAME, &err); 34 35 #define X(name) name = os_lookup_dynamic_symbol(debug_lib, #name, &err); 36 DEBUG_ENTRY_POINTS 37 #undef X 38 39 stream_append_s8(&err, s8("Reloaded Main Executable\n")); 40 os_write_file(os->error_handle, stream_to_s8(&err)); 41 42 input->executable_reloaded = 1; 43 44 return 1; 45 } 46 47 function void 48 debug_init(OS *os, iptr input, Arena *arena) 49 { 50 os_add_file_watch(os, arena, s8(OS_DEBUG_LIB_NAME), debug_reload, input); 51 debug_reload(os, s8(""), input, *arena); 52 53 Stream err = arena_stream(*arena); 54 void *rdoc = os_get_module(OS_RENDERDOC_SONAME, 0); 55 if (rdoc) { 56 renderdoc_get_api_fn *get_api = os_lookup_dynamic_symbol(rdoc, "RENDERDOC_GetAPI", &err); 57 if (get_api) { 58 RenderDocAPI *api = 0; 59 if (get_api(10600, (void **)&api)) { 60 os->start_frame_capture = RENDERDOC_START_FRAME_CAPTURE(api); 61 os->end_frame_capture = RENDERDOC_END_FRAME_CAPTURE(api); 62 stream_append_s8(&err, s8("loaded: " OS_RENDERDOC_SONAME "\n")); 63 } 64 } 65 } 66 67 os_write_file(os->error_handle, stream_to_s8(&err)); 68 } 69 70 #endif /* _DEBUG */ 71 72 #define static_path_join(a, b) (a OS_PATH_SEPARATOR b) 73 74 struct gl_debug_ctx { 75 Stream stream; 76 iptr os_error_handle; 77 }; 78 79 function void 80 gl_debug_logger(u32 src, u32 type, u32 id, u32 lvl, i32 len, const char *msg, const void *userctx) 81 { 82 (void)src; (void)type; (void)id; 83 84 struct gl_debug_ctx *ctx = (struct gl_debug_ctx *)userctx; 85 Stream *e = &ctx->stream; 86 stream_append_s8s(e, s8("[OpenGL] "), (s8){.len = len, .data = (u8 *)msg}, s8("\n")); 87 os_write_file(ctx->os_error_handle, stream_to_s8(e)); 88 stream_reset(e, 0); 89 } 90 91 function void 92 get_gl_params(GLParams *gl, Stream *err) 93 { 94 char *vendor = (char *)glGetString(GL_VENDOR); 95 if (!vendor) { 96 stream_append_s8(err, s8("Failed to determine GL Vendor\n")); 97 os_fatal(stream_to_s8(err)); 98 } 99 /* TODO(rnp): str prefix of */ 100 switch (vendor[0]) { 101 case 'A': gl->vendor_id = GL_VENDOR_AMD; break; 102 case 'I': gl->vendor_id = GL_VENDOR_INTEL; break; 103 case 'N': gl->vendor_id = GL_VENDOR_NVIDIA; break; 104 /* NOTE(rnp): freedreno */ 105 case 'f': gl->vendor_id = GL_VENDOR_ARM; break; 106 /* NOTE(rnp): Microsoft Corporation - weird win32 thing (microsoft is just using mesa for the driver) */ 107 case 'M': gl->vendor_id = GL_VENDOR_ARM; break; 108 default: 109 stream_append_s8s(err, s8("Unknown GL Vendor: "), c_str_to_s8(vendor), s8("\n")); 110 os_fatal(stream_to_s8(err)); 111 } 112 113 #define X(glname, name, suffix) glGetIntegerv(GL_##glname, &gl->name); 114 GL_PARAMETERS 115 #undef X 116 } 117 118 function void 119 validate_gl_requirements(GLParams *gl, Arena a) 120 { 121 Stream s = arena_stream(a); 122 123 if (gl->max_ubo_size < sizeof(BeamformerParameters)) { 124 stream_append_s8(&s, s8("GPU must support UBOs of at least ")); 125 stream_append_i64(&s, sizeof(BeamformerParameters)); 126 stream_append_s8(&s, s8(" bytes!\n")); 127 } 128 129 #define X(name, ret, params) if (!name) stream_append_s8s(&s, s8("missing required GL function:"), s8(#name), s8("\n")); 130 OGLProcedureList 131 #undef X 132 133 if (s.widx) os_fatal(stream_to_s8(&s)); 134 } 135 136 function void 137 dump_gl_params(GLParams *gl, Arena a, OS *os) 138 { 139 (void)gl; (void)a; 140 #ifdef _DEBUG 141 s8 vendor = s8("vendor:"); 142 iz max_width = vendor.len; 143 #define X(glname, name, suffix) if (s8(#name).len > max_width) max_width = s8(#name ":").len; 144 GL_PARAMETERS 145 #undef X 146 max_width++; 147 148 Stream s = arena_stream(a); 149 stream_append_s8s(&s, s8("---- GL Parameters ----\n"), vendor); 150 stream_pad(&s, ' ', max_width - vendor.len); 151 switch (gl->vendor_id) { 152 case GL_VENDOR_AMD: stream_append_s8(&s, s8("AMD\n")); break; 153 case GL_VENDOR_ARM: stream_append_s8(&s, s8("ARM\n")); break; 154 case GL_VENDOR_INTEL: stream_append_s8(&s, s8("Intel\n")); break; 155 case GL_VENDOR_NVIDIA: stream_append_s8(&s, s8("nVidia\n")); break; 156 } 157 158 #define X(glname, name, suffix) \ 159 stream_append_s8(&s, s8(#name ":")); \ 160 stream_pad(&s, ' ', max_width - s8(#name ":").len); \ 161 stream_append_i64(&s, gl->name); \ 162 stream_append_s8(&s, s8(suffix)); \ 163 stream_append_byte(&s, '\n'); 164 GL_PARAMETERS 165 #undef X 166 stream_append_s8(&s, s8("-----------------------\n")); 167 os_write_file(os->error_handle, stream_to_s8(&s)); 168 #endif 169 } 170 171 function FILE_WATCH_CALLBACK_FN(reload_shader) 172 { 173 ShaderReloadContext *ctx = (typeof(ctx))user_data; 174 return beamformer_reload_shader(ctx->beamformer_context, ctx, arena, ctx->name); 175 } 176 177 function FILE_WATCH_CALLBACK_FN(reload_shader_indirect) 178 { 179 ShaderReloadContext *src = (typeof(src))user_data; 180 BeamformerCtx *ctx = src->beamformer_context; 181 BeamformWork *work = beamform_work_queue_push(ctx->beamform_work_queue); 182 if (work) { 183 work->kind = BeamformerWorkKind_ReloadShader, 184 work->shader_reload_context = src; 185 beamform_work_queue_push_commit(ctx->beamform_work_queue); 186 os_wake_waiters(&os->compute_worker.sync_variable); 187 } 188 return 1; 189 } 190 191 function FILE_WATCH_CALLBACK_FN(load_cuda_lib) 192 { 193 CudaLib *cl = (CudaLib *)user_data; 194 b32 result = os_file_exists((c8 *)path.data); 195 if (result) { 196 Stream err = arena_stream(arena); 197 198 stream_append_s8(&err, s8("loading CUDA lib: " OS_CUDA_LIB_NAME "\n")); 199 os_unload_library(cl->lib); 200 cl->lib = os_load_library((c8 *)path.data, OS_CUDA_LIB_TEMP_NAME, &err); 201 #define X(name, symname) cl->name = os_lookup_dynamic_symbol(cl->lib, symname, &err); 202 CUDA_LIB_FNS 203 #undef X 204 205 os_write_file(os->error_handle, stream_to_s8(&err)); 206 } 207 208 #define X(name, symname) if (!cl->name) cl->name = cuda_ ## name ## _stub; 209 CUDA_LIB_FNS 210 #undef X 211 212 return result; 213 } 214 215 function BeamformerRenderModel 216 render_model_from_arrays(f32 *vertices, f32 *normals, u32 vertices_size, u16 *indices, u32 index_count) 217 { 218 BeamformerRenderModel result = {0}; 219 220 i32 buffer_size = vertices_size * 2 + index_count * sizeof(u16); 221 i32 indices_offset = vertices_size * 2; 222 i32 indices_size = index_count * sizeof(u16); 223 224 result.elements = index_count; 225 result.elements_offset = indices_offset; 226 227 glCreateBuffers(1, &result.buffer); 228 glNamedBufferStorage(result.buffer, buffer_size, 0, GL_DYNAMIC_STORAGE_BIT); 229 glNamedBufferSubData(result.buffer, 0, vertices_size, vertices); 230 glNamedBufferSubData(result.buffer, vertices_size, vertices_size, normals); 231 glNamedBufferSubData(result.buffer, indices_offset, indices_size, indices); 232 233 glCreateVertexArrays(1, &result.vao); 234 glVertexArrayVertexBuffer(result.vao, 0, result.buffer, 0, 3 * sizeof(f32)); 235 glVertexArrayVertexBuffer(result.vao, 1, result.buffer, vertices_size, 3 * sizeof(f32)); 236 glVertexArrayElementBuffer(result.vao, result.buffer); 237 238 glEnableVertexArrayAttrib(result.vao, 0); 239 glEnableVertexArrayAttrib(result.vao, 1); 240 241 glVertexArrayAttribFormat(result.vao, 0, 3, GL_FLOAT, 0, 0); 242 glVertexArrayAttribFormat(result.vao, 1, 3, GL_FLOAT, 0, vertices_size); 243 244 glVertexArrayAttribBinding(result.vao, 0, 0); 245 glVertexArrayAttribBinding(result.vao, 1, 0); 246 247 return result; 248 } 249 250 #define GLFW_VISIBLE 0x00020004 251 void glfwWindowHint(i32, i32); 252 iptr glfwCreateWindow(i32, i32, char *, iptr, iptr); 253 void glfwMakeContextCurrent(iptr); 254 255 function OS_THREAD_ENTRY_POINT_FN(compute_worker_thread_entry_point) 256 { 257 GLWorkerThreadContext *ctx = (GLWorkerThreadContext *)_ctx; 258 259 glfwMakeContextCurrent(ctx->window_handle); 260 ctx->gl_context = os_get_native_gl_context(ctx->window_handle); 261 262 beamformer_compute_setup(ctx->user_context, ctx->arena, ctx->gl_context); 263 264 for (;;) { 265 for (;;) { 266 i32 expected = 0; 267 if (atomic_cas_u32(&ctx->sync_variable, &expected, 1)) 268 break; 269 270 atomic_store_u32(&ctx->asleep, 1); 271 os_wait_on_value(&ctx->sync_variable, 1, -1); 272 atomic_store_u32(&ctx->asleep, 0); 273 } 274 beamformer_complete_compute(ctx->user_context, ctx->arena, ctx->gl_context); 275 } 276 277 unreachable(); 278 279 return 0; 280 } 281 282 function void 283 setup_beamformer(BeamformerCtx *ctx, BeamformerInput *input, Arena *memory) 284 { 285 debug_init(&ctx->os, (iptr)input, memory); 286 287 ctx->window_size = (uv2){.w = 1280, .h = 840}; 288 289 SetConfigFlags(FLAG_VSYNC_HINT|FLAG_WINDOW_ALWAYS_RUN); 290 InitWindow(ctx->window_size.w, ctx->window_size.h, "OGL Beamformer"); 291 /* NOTE: do this after initing so that the window starts out floating in tiling wm */ 292 SetWindowState(FLAG_WINDOW_RESIZABLE); 293 SetWindowMinSize(840, ctx->window_size.h); 294 295 glfwWindowHint(GLFW_VISIBLE, 0); 296 iptr raylib_window_handle = (iptr)GetPlatformWindowHandle(); 297 298 #define X(name, ret, params) name = (name##_fn *)os_gl_proc_address(#name); 299 OGLProcedureList 300 #undef X 301 /* NOTE: Gather information about the GPU */ 302 get_gl_params(&ctx->gl, &ctx->error_stream); 303 dump_gl_params(&ctx->gl, *memory, &ctx->os); 304 validate_gl_requirements(&ctx->gl, *memory); 305 306 ctx->latest_frame = ctx->beamform_frames; 307 ctx->beamform_work_queue = push_struct(memory, BeamformWorkQueue); 308 ctx->compute_shader_stats = push_struct(memory, ComputeShaderStats); 309 ctx->compute_timing_table = push_struct(memory, ComputeTimingTable); 310 311 ctx->shared_memory = os_create_shared_memory_area(memory, OS_SHARED_MEMORY_NAME, 312 BeamformerSharedMemoryLockKind_Count, 313 BEAMFORMER_SHARED_MEMORY_SIZE); 314 BeamformerSharedMemory *sm = ctx->shared_memory.region; 315 if (!sm) os_fatal(s8("Get more ram lol\n")); 316 mem_clear(sm, 0, sizeof(*sm)); 317 318 sm->version = BEAMFORMER_SHARED_MEMORY_VERSION; 319 320 /* NOTE: default compute shader pipeline */ 321 sm->compute_stages[0] = BeamformerShaderKind_Decode; 322 sm->compute_stages[1] = BeamformerShaderKind_DASCompute; 323 sm->compute_stages_count = 2; 324 325 GLWorkerThreadContext *worker = &ctx->os.compute_worker; 326 /* TODO(rnp): we should lock this down after we have something working */ 327 worker->user_context = (iptr)ctx; 328 worker->window_handle = glfwCreateWindow(320, 240, "", 0, raylib_window_handle); 329 worker->handle = os_create_thread(*memory, (iptr)worker, s8("[compute]"), 330 compute_worker_thread_entry_point); 331 332 glfwMakeContextCurrent(raylib_window_handle); 333 334 if (ctx->gl.vendor_id == GL_VENDOR_NVIDIA 335 && load_cuda_lib(&ctx->os, s8(OS_CUDA_LIB_NAME), (iptr)&ctx->cuda_lib, *memory)) 336 { 337 os_add_file_watch(&ctx->os, memory, s8(OS_CUDA_LIB_NAME), load_cuda_lib, 338 (iptr)&ctx->cuda_lib); 339 } else { 340 #define X(name, symname) if (!ctx->cuda_lib.name) ctx->cuda_lib.name = cuda_ ## name ## _stub; 341 CUDA_LIB_FNS 342 #undef X 343 } 344 345 /* NOTE: set up OpenGL debug logging */ 346 struct gl_debug_ctx *gl_debug_ctx = push_struct(memory, typeof(*gl_debug_ctx)); 347 gl_debug_ctx->stream = stream_alloc(memory, 1024); 348 gl_debug_ctx->os_error_handle = ctx->os.error_handle; 349 glDebugMessageCallback(gl_debug_logger, gl_debug_ctx); 350 #ifdef _DEBUG 351 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); 352 #endif 353 354 #define X(name, type, size, gltype, glsize, comment) "\t" #gltype " " #name #glsize "; " comment "\n" 355 read_only local_persist s8 compute_parameters_header = s8_comp("" 356 "layout(std140, binding = 0) uniform parameters {\n" 357 BEAMFORMER_PARAMS_HEAD 358 BEAMFORMER_UI_PARAMS 359 BEAMFORMER_PARAMS_TAIL 360 "};\n\n" 361 ); 362 #undef X 363 364 ComputeShaderCtx *cs = &ctx->csctx; 365 #define X(e, sn, f, nh, pretty_name) do if (s8(f).len > 0) { \ 366 ShaderReloadContext *src = push_struct(memory, typeof(*src)); \ 367 src->beamformer_context = ctx; \ 368 if (nh) src->header = compute_parameters_header; \ 369 src->path = s8(static_path_join("shaders", f ".glsl")); \ 370 src->name = src->path; \ 371 src->shader = cs->programs + BeamformerShaderKind_##e; \ 372 src->gl_type = GL_COMPUTE_SHADER; \ 373 src->kind = BeamformerShaderKind_##e; \ 374 src->link = src; \ 375 os_add_file_watch(&ctx->os, memory, src->path, reload_shader_indirect, (iptr)src); \ 376 reload_shader_indirect(&ctx->os, src->path, (iptr)src, *memory); \ 377 } while (0); 378 COMPUTE_SHADERS 379 #undef X 380 os_wake_waiters(&worker->sync_variable); 381 382 FrameViewRenderContext *fvr = &ctx->frame_view_render_context; 383 glCreateFramebuffers(countof(fvr->framebuffers), fvr->framebuffers); 384 LABEL_GL_OBJECT(GL_FRAMEBUFFER, fvr->framebuffers[0], s8("Frame View Framebuffer")); 385 LABEL_GL_OBJECT(GL_FRAMEBUFFER, fvr->framebuffers[1], s8("Frame View Resolving Framebuffer")); 386 387 glCreateRenderbuffers(countof(fvr->renderbuffers), fvr->renderbuffers); 388 i32 msaa_samples = ctx->gl.vendor_id == GL_VENDOR_ARM? 4 : 8; 389 glNamedRenderbufferStorageMultisample(fvr->renderbuffers[0], msaa_samples, GL_RGBA8, 390 FRAME_VIEW_RENDER_TARGET_SIZE); 391 glNamedRenderbufferStorageMultisample(fvr->renderbuffers[1], msaa_samples, GL_DEPTH_COMPONENT24, 392 FRAME_VIEW_RENDER_TARGET_SIZE); 393 394 ShaderReloadContext *render_3d = push_struct(memory, typeof(*render_3d)); 395 render_3d->beamformer_context = ctx; 396 render_3d->path = s8(static_path_join("shaders", "render_3d.frag.glsl")); 397 render_3d->name = s8("shaders/render_3d.glsl"); 398 render_3d->gl_type = GL_FRAGMENT_SHADER; 399 render_3d->kind = BeamformerShaderKind_Render3D; 400 render_3d->shader = &fvr->shader; 401 render_3d->header = s8("" 402 "layout(location = 0) in vec3 normal;\n" 403 "layout(location = 1) in vec3 texture_coordinate;\n\n" 404 "layout(location = 2) in vec3 test_texture_coordinate;\n\n" 405 "layout(location = 0) out vec4 out_colour;\n\n" 406 "layout(location = " str(FRAME_VIEW_DYNAMIC_RANGE_LOC) ") uniform float u_db_cutoff = 60;\n" 407 "layout(location = " str(FRAME_VIEW_THRESHOLD_LOC) ") uniform float u_threshold = 40;\n" 408 "layout(location = " str(FRAME_VIEW_GAMMA_LOC) ") uniform float u_gamma = 1;\n" 409 "layout(location = " str(FRAME_VIEW_LOG_SCALE_LOC) ") uniform bool u_log_scale;\n" 410 "layout(location = " str(FRAME_VIEW_BB_COLOUR_LOC) ") uniform vec4 u_bb_colour = vec4(" str(FRAME_VIEW_BB_COLOUR) ");\n" 411 "layout(location = " str(FRAME_VIEW_BB_FRACTION_LOC) ") uniform float u_bb_fraction = " str(FRAME_VIEW_BB_FRACTION) ";\n" 412 "layout(location = " str(FRAME_VIEW_SOLID_BB_LOC) ") uniform bool u_solid_bb;\n" 413 "\n" 414 "layout(binding = 0) uniform sampler3D u_texture;\n"); 415 416 render_3d->link = push_struct(memory, typeof(*render_3d)); 417 render_3d->link->gl_type = GL_VERTEX_SHADER; 418 render_3d->link->link = render_3d; 419 render_3d->link->header = s8("" 420 "layout(location = 0) in vec3 v_position;\n" 421 "layout(location = 1) in vec3 v_normal;\n" 422 "\n" 423 "layout(location = 0) out vec3 f_normal;\n" 424 "layout(location = 1) out vec3 f_texture_coordinate;\n" 425 "layout(location = 2) out vec3 f_orig_texture_coordinate;\n" 426 "\n" 427 "layout(location = " str(FRAME_VIEW_MODEL_MATRIX_LOC) ") uniform mat4 u_model;\n" 428 "layout(location = " str(FRAME_VIEW_VIEW_MATRIX_LOC) ") uniform mat4 u_view;\n" 429 "layout(location = " str(FRAME_VIEW_PROJ_MATRIX_LOC) ") uniform mat4 u_projection;\n" 430 "\n" 431 "\n" 432 "void main()\n" 433 "{\n" 434 "\tvec3 pos = v_position;\n" 435 "\tf_orig_texture_coordinate = (2 * v_position + 1) / 2;\n" 436 //"\tif (v_position.y == -1) pos.x = clamp(v_position.x, -u_clip_fraction, u_clip_fraction);\n" 437 "\tvec3 tex_coord = (2 * pos + 1) / 2;\n" 438 "\tf_texture_coordinate = tex_coord.xzy;\n" 439 //"\tf_texture_coordinate = u_swizzle? tex_coord.xzy : tex_coord;\n" 440 //"\tf_normal = normalize(mat3(u_model) * v_normal);\n" 441 "\tf_normal = v_normal;\n" 442 "\tgl_Position = u_projection * u_view * u_model * vec4(pos, 1);\n" 443 "}\n"); 444 reload_shader(&ctx->os, render_3d->path, (iptr)render_3d, *memory); 445 os_add_file_watch(&ctx->os, memory, render_3d->path, reload_shader, (iptr)render_3d); 446 447 f32 unit_cube_vertices[] = { 448 0.5f, 0.5f, -0.5f, 449 0.5f, 0.5f, -0.5f, 450 0.5f, 0.5f, -0.5f, 451 0.5f, -0.5f, -0.5f, 452 0.5f, -0.5f, -0.5f, 453 0.5f, -0.5f, -0.5f, 454 0.5f, 0.5f, 0.5f, 455 0.5f, 0.5f, 0.5f, 456 0.5f, 0.5f, 0.5f, 457 0.5f, -0.5f, 0.5f, 458 0.5f, -0.5f, 0.5f, 459 0.5f, -0.5f, 0.5f, 460 -0.5f, 0.5f, -0.5f, 461 -0.5f, 0.5f, -0.5f, 462 -0.5f, 0.5f, -0.5f, 463 -0.5f, -0.5f, -0.5f, 464 -0.5f, -0.5f, -0.5f, 465 -0.5f, -0.5f, -0.5f, 466 -0.5f, 0.5f, 0.5f, 467 -0.5f, 0.5f, 0.5f, 468 -0.5f, 0.5f, 0.5f, 469 -0.5f, -0.5f, 0.5f, 470 -0.5f, -0.5f, 0.5f, 471 -0.5f, -0.5f, 0.5f 472 }; 473 f32 unit_cube_normals[] = { 474 0.0f, 0.0f, -1.0f, 475 0.0f, 1.0f, 0.0f, 476 1.0f, 0.0f, 0.0f, 477 0.0f, 0.0f, -1.0f, 478 0.0f, -1.0f, 0.0f, 479 1.0f, 0.0f, 0.0f, 480 0.0f, 0.0f, 1.0f, 481 0.0f, 1.0f, 0.0f, 482 1.0f, 0.0f, 0.0f, 483 0.0f, 0.0f, 1.0f, 484 0.0f, -1.0f, 0.0f, 485 1.0f, 0.0f, 0.0f, 486 0.0f, 0.0f, -1.0f, 487 0.0f, 1.0f, 0.0f, 488 -1.0f, 0.0f, 0.0f, 489 0.0f, 0.0f, -1.0f, 490 0.0f, -1.0f, 0.0f, 491 -1.0f, 0.0f, 0.0f, 492 0.0f, 0.0f, 1.0f, 493 0.0f, 1.0f, 0.0f, 494 -1.0f, 0.0f, 0.0f, 495 0.0f, 0.0f, 1.0f, 496 0.0f, -1.0f, 0.0f, 497 -1.0f, 0.0f, 0.0f 498 }; 499 u16 unit_cube_indices[] = { 500 1, 13, 19, 501 1, 19, 7, 502 9, 6, 18, 503 9, 18, 21, 504 23, 20, 14, 505 23, 14, 17, 506 16, 4, 10, 507 16, 10, 22, 508 5, 2, 8, 509 5, 8, 11, 510 15, 12, 0, 511 15, 0, 3 512 }; 513 514 cs->unit_cube_model = render_model_from_arrays(unit_cube_vertices, unit_cube_normals, 515 sizeof(unit_cube_vertices), 516 unit_cube_indices, countof(unit_cube_indices)); 517 } 518 519 function void 520 beamformer_invalidate_shared_memory(BeamformerCtx *ctx) 521 { 522 /* NOTE(rnp): work around pebkac when the beamformer is closed while we are doing live 523 * imaging. if the verasonics is blocked in an external function (calling the library 524 * to start compute) it is impossible for us to get it to properly shut down which 525 * will sometimes result in us needing to power cycle the system. set the shared memory 526 * into an error state and release dispatch lock so that future calls will error instead 527 * of blocking. 528 */ 529 BeamformerSharedMemory *sm = ctx->shared_memory.region; 530 BeamformerSharedMemoryLockKind lock = BeamformerSharedMemoryLockKind_DispatchCompute; 531 atomic_store_u32(&sm->invalid, 1); 532 atomic_store_u32(&sm->external_work_queue.ridx, sm->external_work_queue.widx); 533 DEBUG_DECL(if (sm->locks[lock])) { 534 os_shared_memory_region_unlock(&ctx->shared_memory, sm->locks, lock); 535 } 536 537 atomic_or_u32(&sm->live_imaging_dirty_flags, BeamformerLiveImagingDirtyFlags_StopImaging); 538 }