Commit: 612c98af0e59cf66e7501c48eb2114fb55342282
Parent: 15ca2d03fb84519df736079d3686fc1cc8ddfda3
Author: Randy Palamar
Date: Tue, 6 Jan 2026 09:45:29 -0700
core: add non temporal memory copy and AVX2 acceleration
When we are bulk uploading many MB of data to the GPU and not
doing anything with it on the CPU we really don't want to flood
the cache with that data. The CPU's non temporal instructions at
least hint that we would prefer if the data didn't go through the
cache.
Diffstat:
5 files changed, 60 insertions(+), 22 deletions(-)
diff --git a/.gitignore b/.gitignore
@@ -3,7 +3,7 @@
!assets
!external
!generated
-!helpers
+!lib
!shaders
!tests
!*.c
diff --git a/beamformer.c b/beamformer.c
@@ -1426,7 +1426,7 @@ DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload)
b32 nvidia = gl_parameters.vendor_id == GLVendor_NVIDIA;
rf->active_rf_size = (u32)round_up_to(rf_block_rf_size & 0xFFFFFFFFULL, 64);
- if (rf->size < rf->active_rf_size)
+ if unlikely(rf->size < rf->active_rf_size)
beamformer_rf_buffer_allocate(rf, rf->active_rf_size, nvidia);
u32 slot = rf->insertion_index++ % countof(rf->compute_syncs);
@@ -1448,7 +1448,8 @@ DEBUG_EXPORT BEAMFORMER_RF_UPLOAD_FN(beamformer_rf_upload)
u8 *data = beamformer_shared_memory_scratch_arena(sm).beg;
if (nvidia) glNamedBufferSubData(rf->ssbo, slot * rf->active_rf_size, (i32)size, data);
- else mem_copy(rf->buffer + slot * rf->active_rf_size, data, size);
+ else memory_copy_non_temporal(rf->buffer + slot * rf->active_rf_size, data, size);
+ store_fence();
os_shared_memory_region_unlock(ctx->shared_memory, sm->locks, (i32)scratch_lock);
post_sync_barrier(ctx->shared_memory, upload_lock, sm->locks);
diff --git a/intrinsics.c b/intrinsics.c
@@ -23,15 +23,18 @@
#define pack_struct(s) __pragma(pack(push, 1)) s __pragma(pack(pop))
#define no_return __declspec(noreturn)
- #define debugbreak() __debugbreak()
- #define unreachable() __assume(0)
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+
+ #define assume(x) __assume(x)
+ #define debugbreak() __debugbreak()
+ #define unreachable() __assume(0)
#if ARCH_ARM64
- #define cpu_yield() __yield()
+ #define cpu_yield() __yield()
+ #define store_fence() __dmb(0x0A) // 0x0A: ishst
#endif
- #define memory_write_barrier() _WriteBarrier()
-
#define atomic_add_u32(ptr, n) _InterlockedExchangeAdd((volatile u32 *)(ptr), (n))
#define atomic_add_u64(ptr, n) _InterlockedExchangeAdd64((volatile u64 *)(ptr), (n))
#define atomic_and_u32(ptr, n) _InterlockedAnd((volatile u32 *)(ptr), (n))
@@ -61,16 +64,23 @@
#define pack_struct(s) s __attribute__((packed))
#define no_return __attribute__((noreturn))
+ #define likely(x) (__builtin_expect(!!(x), 1))
+ #define unlikely(x) (__builtin_expect(!!(x), 0))
+
+ #if COMPILER_CLANG
+ #define assume(x) __builtin_assume(x)
+ #else
+ #define assume(x) __attribute__((assume(x)))
+ #endif
+ #define unreachable() __builtin_unreachable()
#if ARCH_ARM64
/* TODO? debuggers just loop here forever and need a manual PC increment (step over) */
- #define debugbreak() asm volatile ("brk 0xf000")
- #define cpu_yield() asm volatile ("yield")
+ #define debugbreak() asm volatile ("brk 0xf000")
+ #define cpu_yield() asm volatile ("yield")
+ #define store_fence() asm volatile ("dmb ishst" ::: "memory")
#else
- #define debugbreak() asm volatile ("int3; nop")
+ #define debugbreak() asm volatile ("int3; nop")
#endif
- #define unreachable() __builtin_unreachable()
-
- #define memory_write_barrier() asm volatile ("" ::: "memory")
#define atomic_add_u64(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
#define atomic_and_u64(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
@@ -212,6 +222,7 @@ typedef __m128i u32x4;
#define store_i32x4(o, a) _mm_storeu_si128((i32x4 *)o, a)
#define sub_f32x4(a, b) _mm_sub_ps(a, b)
-#define cpu_yield() _mm_pause()
+#define cpu_yield _mm_pause
+#define store_fence _mm_sfence
#endif
diff --git a/lib/ogl_beamformer_lib.c b/lib/ogl_beamformer_lib.c
@@ -658,7 +658,7 @@ beamformer_set_live_parameters(BeamformerLiveImagingParameters *new)
b32 result = 0;
if (check_shared_memory()) {
mem_copy(&g_beamformer_library_context.bp->live_imaging_parameters, new, sizeof(*new));
- memory_write_barrier();
+ store_fence();
result = 1;
}
return result;
diff --git a/util.c b/util.c
@@ -19,14 +19,40 @@ mem_copy(void *restrict dest, void *restrict src, uz n)
{
u8 *s = src, *d = dest;
#ifdef __AVX512BW__
- for (; n >= 64; n -= 64, s += 64, d += 64)
- _mm512_storeu_epi8(d, _mm512_loadu_epi8(s));
- if (n > 0) {
- __mmask64 k = _cvtu64_mask64(_bzhi_u64(-1, n));
- _mm512_mask_storeu_epi8(d, k, _mm512_maskz_loadu_epi8(k, s));
+ {
+ for (; n >= 64; n -= 64, s += 64, d += 64)
+ _mm512_storeu_epi8(d, _mm512_loadu_epi8(s));
+ if (n > 0) {
+ __mmask64 k = _cvtu64_mask64(_bzhi_u64(-1, n));
+ _mm512_mask_storeu_epi8(d, k, _mm512_maskz_loadu_epi8(k, s));
+ }
+ }
+ #else
+ for (; n; n--) *d++ = *s++;
+ #endif
+}
+
+/* IMPORTANT: this function may fault if dest, src, and n are not multiples of 64 */
+function void
+memory_copy_non_temporal(void *restrict dest, void *restrict src, uz n)
+{
+ assume(((u64)dest & 63) == 0);
+ assume(((u64)src & 63) == 0);
+ assume(((u64)n & 63) == 0);
+ uint8_t *s = src, *d = dest;
+
+ #if defined(__AVX512BW__)
+ {
+ for (; n >= 64; n -= 64, s += 64, d += 64)
+ _mm512_stream_si512((__m512i *)d, _mm512_stream_load_si512((__m512i *)s));
+ }
+ #elif defined(__AVX2__)
+ {
+ for (; n >= 32; n -= 32, s += 32, d += 32)
+ _mm256_stream_si256((__m256i *)d, _mm256_stream_load_si256((__m256i *)s));
}
#else
- for (; n; n--) *d++ = *s++;
+ mem_copy(d, s, n);
#endif
}