ogl_beamforming

Ultrasound Beamforming Implemented with OpenGL
git clone anongit@rnpnr.xyz:ogl_beamforming.git
Log | Files | Refs | Feed | Submodules | README | LICENSE

util.c (15547B)


      1 /* See LICENSE for license details. */
      2 #define zero_struct(s) mem_clear(s, 0, sizeof(*s))
      3 function void *
      4 mem_clear(void *restrict p_, u8 c, iz size)
      5 {
      6 	u8 *p = p_;
      7 	while (size > 0) p[--size] = c;
      8 	return p;
      9 }
     10 
     11 function void
     12 mem_copy(void *restrict dest, void *restrict src, uz n)
     13 {
     14 	u8 *s = src, *d = dest;
     15 	for (; n; n--) *d++ = *s++;
     16 }
     17 
     18 function void
     19 mem_move(u8 *dest, u8 *src, uz n)
     20 {
     21 	if (dest < src) mem_copy(dest, src, n);
     22 	else            while (n) { n--; dest[n] = src[n]; }
     23 }
     24 
     25 function void *
     26 arena_aligned_start(Arena a, uz alignment)
     27 {
     28 	uz padding = -(uintptr_t)a.beg & (alignment - 1);
     29 	u8 *result = a.beg + padding;
     30 	return result;
     31 }
     32 
     33 #define arena_capacity(a, t) arena_capacity_(a, sizeof(t), alignof(t))
     34 function iz
     35 arena_capacity_(Arena *a, iz size, uz alignment)
     36 {
     37 	iz available = a->end - (u8 *)arena_aligned_start(*a, alignment);
     38 	iz result    = available / size;
     39 	return result;
     40 }
     41 
     42 function u8 *
     43 arena_commit(Arena *a, iz size)
     44 {
     45 	assert(a->end - a->beg >= size);
     46 	u8 *result = a->beg;
     47 	a->beg += size;
     48 	return result;
     49 }
     50 
     51 function void
     52 arena_pop(Arena *a, iz length)
     53 {
     54 	a->beg -= length;
     55 }
     56 
     57 #define push_array(a, t, n) (t *)arena_alloc(a, sizeof(t), _Alignof(t), n)
     58 #define push_struct(a, t)   (t *)arena_alloc(a, sizeof(t), _Alignof(t), 1)
     59 function void *
     60 arena_alloc(Arena *a, iz len, uz align, iz count)
     61 {
     62 	void *result = 0;
     63 	if (a->beg) {
     64 		u8 *start = arena_aligned_start(*a, align);
     65 		iz available = a->end - start;
     66 		assert((available >= 0 && count <= available / len));
     67 		asan_unpoison_region(start, count * len);
     68 		a->beg = start + count * len;
     69 		/* TODO: Performance? */
     70 		result = mem_clear(start, 0, count * len);
     71 	}
     72 	return result;
     73 }
     74 
     75 enum { DA_INITIAL_CAP = 16 };
     76 
     77 #define da_index(it, s) ((it) - (s)->data)
     78 #define da_reserve(a, s, n) \
     79   (s)->data = da_reserve_((a), (s)->data, &(s)->capacity, (s)->count + n, \
     80                           _Alignof(typeof(*(s)->data)), sizeof(*(s)->data))
     81 
     82 #define da_append_count(a, s, items, item_count) do { \
     83 	da_reserve((a), (s), (item_count));                                             \
     84 	mem_copy((s)->data + (s)->count, (items), sizeof(*(items)) * (uz)(item_count)); \
     85 	(s)->count += (item_count);                                                     \
     86 } while (0)
     87 
     88 #define da_push(a, s) \
     89   ((s)->count == (s)->capacity  \
     90     ? da_reserve(a, s, 1),      \
     91       (s)->data + (s)->count++  \
     92     : (s)->data + (s)->count++)
     93 
     94 function void *
     95 da_reserve_(Arena *a, void *data, iz *capacity, iz needed, uz align, iz size)
     96 {
     97 	iz cap = *capacity;
     98 
     99 	/* NOTE(rnp): handle both 0 initialized DAs and DAs that need to be moved (they started
    100 	 * on the stack or someone allocated something in the middle of the arena during usage) */
    101 	if (!data || a->beg != (u8 *)data + cap * size) {
    102 		void *copy = arena_alloc(a, size, align, cap);
    103 		if (data) mem_copy(copy, data, (uz)(cap * size));
    104 		data = copy;
    105 	}
    106 
    107 	if (!cap) cap = DA_INITIAL_CAP;
    108 	while (cap < needed) cap *= 2;
    109 	arena_alloc(a, size, align, cap - *capacity);
    110 	*capacity = cap;
    111 	return data;
    112 }
    113 
    114 function Arena
    115 sub_arena(Arena *a, iz len, uz align)
    116 {
    117 	Arena result = {0};
    118 
    119 	uz padding = -(uintptr_t)a->beg & (align - 1);
    120 	result.beg   = a->beg + padding;
    121 	result.end   = result.beg + len;
    122 	arena_commit(a, len + (iz)padding);
    123 
    124 	return result;
    125 }
    126 
    127 function TempArena
    128 begin_temp_arena(Arena *a)
    129 {
    130 	TempArena result = {.arena = a, .old_beg = a->beg};
    131 	return result;
    132 }
    133 
    134 function void
    135 end_temp_arena(TempArena ta)
    136 {
    137 	Arena *a = ta.arena;
    138 	if (a) {
    139 		assert(a->beg >= ta.old_beg);
    140 		a->beg = ta.old_beg;
    141 	}
    142 }
    143 
    144 function u32
    145 utf8_encode(u8 *out, u32 cp)
    146 {
    147 	u32 result = 1;
    148 	if (cp <= 0x7F) {
    149 		out[0] = cp & 0x7F;
    150 	} else if (cp <= 0x7FF) {
    151 		result = 2;
    152 		out[0] = ((cp >>  6) & 0x1F) | 0xC0;
    153 		out[1] = ((cp >>  0) & 0x3F) | 0x80;
    154 	} else if (cp <= 0xFFFF) {
    155 		result = 3;
    156 		out[0] = ((cp >> 12) & 0x0F) | 0xE0;
    157 		out[1] = ((cp >>  6) & 0x3F) | 0x80;
    158 		out[2] = ((cp >>  0) & 0x3F) | 0x80;
    159 	} else if (cp <= 0x10FFFF) {
    160 		result = 4;
    161 		out[0] = ((cp >> 18) & 0x07) | 0xF0;
    162 		out[1] = ((cp >> 12) & 0x3F) | 0x80;
    163 		out[2] = ((cp >>  6) & 0x3F) | 0x80;
    164 		out[3] = ((cp >>  0) & 0x3F) | 0x80;
    165 	} else {
    166 		out[0] = '?';
    167 	}
    168 	return result;
    169 }
    170 
    171 function UnicodeDecode
    172 utf16_decode(u16 *data, iz length)
    173 {
    174 	UnicodeDecode result = {.cp = U32_MAX};
    175 	if (length) {
    176 		result.consumed = 1;
    177 		result.cp = data[0];
    178 		if (length > 1 && BETWEEN(data[0], 0xD800u, 0xDBFFu)
    179 		               && BETWEEN(data[1], 0xDC00u, 0xDFFFu))
    180 		{
    181 			result.consumed = 2;
    182 			result.cp = ((data[0] - 0xD800u) << 10u) | ((data[1] - 0xDC00u) + 0x10000u);
    183 		}
    184 	}
    185 	return result;
    186 }
    187 
    188 function u32
    189 utf16_encode(u16 *out, u32 cp)
    190 {
    191 	u32 result = 1;
    192 	if (cp == U32_MAX) {
    193 		out[0] = '?';
    194 	} else if (cp < 0x10000u) {
    195 		out[0] = (u16)cp;
    196 	} else {
    197 		u32 value = cp - 0x10000u;
    198 		out[0] = (u16)(0xD800u + (value >> 10u));
    199 		out[1] = (u16)(0xDC00u + (value & 0x3FFu));
    200 		result = 2;
    201 	}
    202 	return result;
    203 }
    204 
    205 function Stream
    206 stream_from_buffer(u8 *buffer, u32 capacity)
    207 {
    208 	Stream result = {.data = buffer, .cap = (i32)capacity};
    209 	return result;
    210 }
    211 
    212 function Stream
    213 stream_alloc(Arena *a, i32 cap)
    214 {
    215 	Stream result = stream_from_buffer(arena_commit(a, cap), (u32)cap);
    216 	return result;
    217 }
    218 
    219 function s8
    220 stream_to_s8(Stream *s)
    221 {
    222 	s8 result = s8("");
    223 	if (!s->errors) result = (s8){.len = s->widx, .data = s->data};
    224 	return result;
    225 }
    226 
    227 function void
    228 stream_reset(Stream *s, i32 index)
    229 {
    230 	s->errors = s->cap <= index;
    231 	if (!s->errors)
    232 		s->widx = index;
    233 }
    234 
    235 function void
    236 stream_commit(Stream *s, i32 count)
    237 {
    238 	s->errors |= !BETWEEN(s->widx + count, 0, s->cap);
    239 	if (!s->errors)
    240 		s->widx += count;
    241 }
    242 
    243 function void
    244 stream_append(Stream *s, void *data, iz count)
    245 {
    246 	s->errors |= (s->cap - s->widx) < count;
    247 	if (!s->errors) {
    248 		mem_copy(s->data + s->widx, data, (uz)count);
    249 		s->widx += (i32)count;
    250 	}
    251 }
    252 
    253 function void
    254 stream_append_byte(Stream *s, u8 b)
    255 {
    256 	stream_append(s, &b, 1);
    257 }
    258 
    259 function void
    260 stream_pad(Stream *s, u8 b, i32 n)
    261 {
    262 	while (n > 0) stream_append_byte(s, b), n--;
    263 }
    264 
    265 function void
    266 stream_append_s8(Stream *s, s8 str)
    267 {
    268 	stream_append(s, str.data, str.len);
    269 }
    270 
    271 #define stream_append_s8s(s, ...) stream_append_s8s_(s, arg_list(s8, ##__VA_ARGS__))
    272 function void
    273 stream_append_s8s_(Stream *s, s8 *strs, iz count)
    274 {
    275 	for (iz i = 0; i < count; i++)
    276 		stream_append(s, strs[i].data, strs[i].len);
    277 }
    278 
    279 function void
    280 stream_append_u64_width(Stream *s, u64 n, u64 min_width)
    281 {
    282 	u8 tmp[64];
    283 	u8 *end = tmp + sizeof(tmp);
    284 	u8 *beg = end;
    285 	min_width = MIN(sizeof(tmp), min_width);
    286 
    287 	do { *--beg = (u8)('0' + (n % 10)); } while (n /= 10);
    288 	while (end - beg > 0 && (uz)(end - beg) < min_width)
    289 		*--beg = '0';
    290 
    291 	stream_append(s, beg, end - beg);
    292 }
    293 
    294 function void
    295 stream_append_u64(Stream *s, u64 n)
    296 {
    297 	stream_append_u64_width(s, n, 0);
    298 }
    299 
    300 function void
    301 stream_append_hex_u64_width(Stream *s, u64 n, iz width)
    302 {
    303 	assert(width <= 16);
    304 	if (!s->errors) {
    305 		u8  buf[16];
    306 		u8 *end = buf + sizeof(buf);
    307 		u8 *beg = end;
    308 		while (n) {
    309 			*--beg = (u8)"0123456789abcdef"[n & 0x0F];
    310 			n >>= 4;
    311 		}
    312 		while (end - beg < width)
    313 			*--beg = '0';
    314 		stream_append(s, beg, end - beg);
    315 	}
    316 }
    317 
    318 function void
    319 stream_append_hex_u64(Stream *s, u64 n)
    320 {
    321 	stream_append_hex_u64_width(s, n, 2);
    322 }
    323 
    324 function void
    325 stream_append_i64(Stream *s, i64 n)
    326 {
    327 	if (n < 0) {
    328 		stream_append_byte(s, '-');
    329 		n *= -1;
    330 	}
    331 	stream_append_u64(s, (u64)n);
    332 }
    333 
    334 function void
    335 stream_append_f64(Stream *s, f64 f, u64 prec)
    336 {
    337 	if (f < 0) {
    338 		stream_append_byte(s, '-');
    339 		f *= -1;
    340 	}
    341 
    342 	/* NOTE: round last digit */
    343 	f += 0.5f / (f64)prec;
    344 
    345 	if (f >= (f64)(-1UL >> 1)) {
    346 		stream_append_s8(s, s8("inf"));
    347 	} else {
    348 		u64 integral = (u64)f;
    349 		u64 fraction = (u64)((f - (f64)integral) * (f64)prec);
    350 		stream_append_u64(s, integral);
    351 		stream_append_byte(s, '.');
    352 		for (u64 i = prec / 10; i > 1; i /= 10) {
    353 			if (i > fraction)
    354 				stream_append_byte(s, '0');
    355 		}
    356 		stream_append_u64(s, fraction);
    357 	}
    358 }
    359 
    360 function void
    361 stream_append_f64_e(Stream *s, f64 f)
    362 {
    363 	/* TODO: there should be a better way of doing this */
    364 	#if 0
    365 	/* NOTE: we ignore subnormal numbers for now */
    366 	union { f64 f; u64 u; } u = {.f = f};
    367 	i32 exponent = ((u.u >> 52) & 0x7ff) - 1023;
    368 	f32 log_10_of_2 = 0.301f;
    369 	i32 scale       = (exponent * log_10_of_2);
    370 	/* NOTE: normalize f */
    371 	for (i32 i = ABS(scale); i > 0; i--)
    372 		f *= (scale > 0)? 0.1f : 10.0f;
    373 	#else
    374 	i32 scale = 0;
    375 	if (f != 0) {
    376 		while (f > 1) {
    377 			f *= 0.1f;
    378 			scale++;
    379 		}
    380 		while (f < 1) {
    381 			f *= 10.0f;
    382 			scale--;
    383 		}
    384 	}
    385 	#endif
    386 
    387 	u32 prec = 100;
    388 	stream_append_f64(s, f, prec);
    389 	stream_append_byte(s, 'e');
    390 	stream_append_byte(s, scale >= 0? '+' : '-');
    391 	for (u32 i = prec / 10; i > 1; i /= 10)
    392 		stream_append_byte(s, '0');
    393 	stream_append_u64(s, (u64)ABS(scale));
    394 }
    395 
    396 function void
    397 stream_append_v2(Stream *s, v2 v)
    398 {
    399 	stream_append_byte(s, '{');
    400 	stream_append_f64(s, v.x, 100);
    401 	stream_append_s8(s, s8(", "));
    402 	stream_append_f64(s, v.y, 100);
    403 	stream_append_byte(s, '}');
    404 }
    405 
    406 function Stream
    407 arena_stream(Arena a)
    408 {
    409 	Stream result = {0};
    410 	result.data   = a.beg;
    411 	result.cap    = (i32)(a.end - a.beg);
    412 
    413 	/* TODO(rnp): no idea what to do here if we want to maintain the ergonomics */
    414 	asan_unpoison_region(result.data, result.cap);
    415 
    416 	return result;
    417 }
    418 
    419 function s8
    420 arena_stream_commit(Arena *a, Stream *s)
    421 {
    422 	ASSERT(s->data == a->beg);
    423 	s8 result = stream_to_s8(s);
    424 	arena_commit(a, result.len);
    425 	return result;
    426 }
    427 
    428 function s8
    429 arena_stream_commit_zero(Arena *a, Stream *s)
    430 {
    431 	b32 error = s->errors || s->widx == s->cap;
    432 	if (!error)
    433 		s->data[s->widx] = 0;
    434 	s8 result = stream_to_s8(s);
    435 	arena_commit(a, result.len + 1);
    436 	return result;
    437 }
    438 
    439 function s8
    440 arena_stream_commit_and_reset(Arena *arena, Stream *s)
    441 {
    442 	s8 result = arena_stream_commit_zero(arena, s);
    443 	*s = arena_stream(*arena);
    444 	return result;
    445 }
    446 
    447 #if !defined(XXH_IMPLEMENTATION)
    448 # define XXH_INLINE_ALL
    449 # define XXH_IMPLEMENTATION
    450 # define XXH_STATIC_LINKING_ONLY
    451 # include "external/xxhash.h"
    452 #endif
    453 
    454 function u128
    455 u128_hash_from_data(void *data, uz size)
    456 {
    457 	u128 result = {0};
    458 	XXH128_hash_t hash = XXH3_128bits_withSeed(data, size, 4969);
    459 	mem_copy(&result, &hash, sizeof(result));
    460 	return result;
    461 }
    462 
    463 function u64
    464 u64_hash_from_s8(s8 v)
    465 {
    466 	u64 result = XXH3_64bits_withSeed(v.data, (uz)v.len, 4969);
    467 	return result;
    468 }
    469 
    470 function s8
    471 c_str_to_s8(char *cstr)
    472 {
    473 	s8 result = {.data = (u8 *)cstr};
    474 	if (cstr) { while (*cstr) { result.len++; cstr++; } }
    475 	return result;
    476 }
    477 
    478 /* NOTE(rnp): returns < 0 if byte is not found */
    479 function iz
    480 s8_scan_backwards(s8 s, u8 byte)
    481 {
    482 	iz result = s.len;
    483 	while (result && s.data[result - 1] != byte) result--;
    484 	result--;
    485 	return result;
    486 }
    487 
    488 function s8
    489 s8_cut_head(s8 s, iz cut)
    490 {
    491 	s8 result = s;
    492 	if (cut > 0) {
    493 		result.data += cut;
    494 		result.len  -= cut;
    495 	}
    496 	return result;
    497 }
    498 
    499 function s8
    500 s8_alloc(Arena *a, iz len)
    501 {
    502 	s8 result = {.data = push_array(a, u8, len), .len = len};
    503 	return result;
    504 }
    505 
    506 function s8
    507 s16_to_s8(Arena *a, s16 in)
    508 {
    509 	s8 result = s8("");
    510 	if (in.len) {
    511 		iz commit = in.len * 4;
    512 		iz length = 0;
    513 		u8 *data = arena_commit(a, commit + 1);
    514 		u16 *beg = in.data;
    515 		u16 *end = in.data + in.len;
    516 		while (beg < end) {
    517 			UnicodeDecode decode = utf16_decode(beg, end - beg);
    518 			length += utf8_encode(data + length, decode.cp);
    519 			beg    += decode.consumed;
    520 		}
    521 		data[length] = 0;
    522 		result = (s8){.len = length, .data = data};
    523 		arena_pop(a, commit - length);
    524 	}
    525 	return result;
    526 }
    527 
    528 function s16
    529 s8_to_s16(Arena *a, s8 in)
    530 {
    531 	s16 result = {0};
    532 	if (in.len) {
    533 		iz required = 2 * in.len + 1;
    534 		u16 *data   = push_array(a, u16, required);
    535 		iz length   = 0;
    536 		/* TODO(rnp): utf8_decode */
    537 		for (iz i = 0; i < in.len; i++) {
    538 			u32 cp  = in.data[i];
    539 			length += utf16_encode(data + length, cp);
    540 		}
    541 		result = (s16){.len = length, .data = data};
    542 		arena_pop(a, required - length);
    543 	}
    544 	return result;
    545 }
    546 
    547 #define push_s8_from_parts(a, j, ...) push_s8_from_parts_((a), (j), arg_list(s8, __VA_ARGS__))
    548 function s8
    549 push_s8_from_parts_(Arena *arena, s8 joiner, s8 *parts, iz count)
    550 {
    551 	iz length = joiner.len * (count - 1);
    552 	for (iz i = 0; i < count; i++)
    553 		length += parts[i].len;
    554 
    555 	s8 result = {.len = length, .data = arena_commit(arena, length + 1)};
    556 
    557 	iz offset = 0;
    558 	for (iz i = 0; i < count; i++) {
    559 		if (i != 0) {
    560 			mem_copy(result.data + offset, joiner.data, (uz)joiner.len);
    561 			offset += joiner.len;
    562 		}
    563 		mem_copy(result.data + offset, parts[i].data, (uz)parts[i].len);
    564 		offset += parts[i].len;
    565 	}
    566 	result.data[result.len] = 0;
    567 
    568 	return result;
    569 }
    570 
    571 function s8
    572 push_s8(Arena *a, s8 str)
    573 {
    574 	s8 result   = s8_alloc(a, str.len + 1);
    575 	result.len -= 1;
    576 	mem_copy(result.data, str.data, (uz)result.len);
    577 	return result;
    578 }
    579 
    580 function force_inline u32
    581 round_down_power_of_2(u32 a)
    582 {
    583 	u32 result = 0x80000000UL >> clz_u32(a);
    584 	return result;
    585 }
    586 
    587 function force_inline u32
    588 round_up_power_of_2(u32 a)
    589 {
    590 	u32 result = 0x80000000UL >> (clz_u32(a - 1) - 1);
    591 	return result;
    592 }
    593 
    594 function force_inline iz
    595 round_up_to(iz value, iz multiple)
    596 {
    597 	iz result = value;
    598 	if (value % multiple != 0)
    599 		result += multiple - value % multiple;
    600 	return result;
    601 }
    602 
    603 function void
    604 split_rect_horizontal(Rect rect, f32 fraction, Rect *left, Rect *right)
    605 {
    606 	if (left) {
    607 		left->pos    = rect.pos;
    608 		left->size.h = rect.size.h;
    609 		left->size.w = rect.size.w * fraction;
    610 	}
    611 	if (right) {
    612 		right->pos    = rect.pos;
    613 		right->pos.x += rect.size.w * fraction;
    614 		right->size.h = rect.size.h;
    615 		right->size.w = rect.size.w * (1.0f - fraction);
    616 	}
    617 }
    618 
    619 function void
    620 split_rect_vertical(Rect rect, f32 fraction, Rect *top, Rect *bot)
    621 {
    622 	if (top) {
    623 		top->pos    = rect.pos;
    624 		top->size.w = rect.size.w;
    625 		top->size.h = rect.size.h * fraction;
    626 	}
    627 	if (bot) {
    628 		bot->pos    = rect.pos;
    629 		bot->pos.y += rect.size.h * fraction;
    630 		bot->size.w = rect.size.w;
    631 		bot->size.h = rect.size.h * (1.0f - fraction);
    632 	}
    633 }
    634 
    635 function void
    636 cut_rect_horizontal(Rect rect, f32 at, Rect *left, Rect *right)
    637 {
    638 	at = MIN(at, rect.size.w);
    639 	if (left) {
    640 		*left = rect;
    641 		left->size.w = at;
    642 	}
    643 	if (right) {
    644 		*right = rect;
    645 		right->pos.x  += at;
    646 		right->size.w -= at;
    647 	}
    648 }
    649 
    650 function void
    651 cut_rect_vertical(Rect rect, f32 at, Rect *top, Rect *bot)
    652 {
    653 	at = MIN(at, rect.size.h);
    654 	if (top) {
    655 		*top = rect;
    656 		top->size.h = at;
    657 	}
    658 	if (bot) {
    659 		*bot = rect;
    660 		bot->pos.y  += at;
    661 		bot->size.h -= at;
    662 	}
    663 }
    664 
    665 function IntegerConversion
    666 integer_from_s8(s8 raw)
    667 {
    668 	IntegerConversion result = {0};
    669 
    670 	iz  i     = 0;
    671 	i64 scale = 1;
    672 	if (raw.len && raw.data[0] == '-') {
    673 		scale = -1;
    674 		i     =  1;
    675 	}
    676 
    677 	for (; i < raw.len; i++) {
    678 		i64 digit = (i64)raw.data[i] - '0';
    679 		if (BETWEEN(digit, 0, 9)) {
    680 			if (result.U64 > (U64_MAX - (u64)digit) / 10) {
    681 				result.result = IntegerConversionResult_OutOfRange;
    682 				result.U64    = U64_MAX;
    683 			} else {
    684 				result.U64 = 10 * result.U64 + (u64)digit;
    685 			}
    686 		} else {
    687 			break;
    688 		}
    689 	}
    690 	result.unparsed = (s8){.len = raw.len - i, .data = raw.data + i};
    691 	result.result   = IntegerConversionResult_Success;
    692 	result.S64      = (i64)result.U64 * scale;
    693 
    694 	return result;
    695 }
    696 
    697 function f64
    698 parse_f64(s8 s)
    699 {
    700 	IntegerConversion integral = integer_from_s8(s);
    701 
    702 	s = integral.unparsed;
    703 	if (*s.data == '.') { s.data++; s.len--; }
    704 	while (s.len > 0 && s.data[s.len - 1] == '0') s.len--;
    705 
    706 	IntegerConversion fractional = integer_from_s8(s);
    707 
    708 	u64 power = (u64)(fractional.unparsed.data - s.data);
    709 	f64 frac  = (f64)fractional.U64;
    710 	while (power > 0) { frac /= 10.0; power--; }
    711 
    712 	f64 result = (f64)integral.S64 + frac;
    713 	return result;
    714 }
    715 
    716 function FileWatchDirectory *
    717 lookup_file_watch_directory(FileWatchDirectoryList *ctx, u64 hash)
    718 {
    719 	FileWatchDirectory *result = 0;
    720 	for (u32 i = 0; i < ctx->count; i++) {
    721 		FileWatchDirectory *test = ctx->data + i;
    722 		if (test->hash == hash) {
    723 			result = test;
    724 			break;
    725 		}
    726 	}
    727 	return result;
    728 }