ogl_beamforming

Ultrasound Beamforming Implemented with OpenGL
git clone anongit@rnpnr.xyz:ogl_beamforming.git
Log | Files | Refs | Feed | Submodules | README | LICENSE

util.c (16692B)


      1 /* See LICENSE for license details. */
      2 #if   COMPILER_CLANG
      3   #pragma GCC diagnostic ignored "-Winitializer-overrides"
      4 #elif COMPILER_GCC
      5   #pragma GCC diagnostic ignored "-Woverride-init"
      6 #endif
      7 
      8 #define zero_struct(s) mem_clear(s, 0, sizeof(*s))
      9 function void *
     10 mem_clear(void *restrict p_, u8 c, iz size)
     11 {
     12 	u8 *p = p_;
     13 	while (size > 0) p[--size] = c;
     14 	return p;
     15 }
     16 
     17 function void
     18 mem_copy(void *restrict dest, void *restrict src, uz n)
     19 {
     20 	u8 *s = src, *d = dest;
     21 	#ifdef __AVX512BW__
     22 	for (; n >= 64; n -= 64, s += 64, d += 64)
     23 		_mm512_storeu_epi8(d, _mm512_loadu_epi8(s));
     24 	if (n > 0) {
     25 		__mmask64 k = _cvtu64_mask64(_bzhi_u64(-1, n));
     26 		_mm512_mask_storeu_epi8(d, k, _mm512_maskz_loadu_epi8(k, s));
     27 	}
     28 	#else
     29 	for (; n; n--) *d++ = *s++;
     30 	#endif
     31 }
     32 
     33 function void
     34 mem_move(u8 *dest, u8 *src, uz n)
     35 {
     36 	if (dest < src) mem_copy(dest, src, n);
     37 	else            while (n) { n--; dest[n] = src[n]; }
     38 }
     39 
     40 function void *
     41 memory_scan_backwards(void *memory, u8 byte, iz n)
     42 {
     43 	void *result = 0;
     44 	u8   *s      = memory;
     45 	while (n > 0) if (s[--n] == byte) { result = s + n; break; }
     46 	return result;
     47 }
     48 
     49 function void *
     50 arena_aligned_start(Arena a, uz alignment)
     51 {
     52 	uz padding = -(uintptr_t)a.beg & (alignment - 1);
     53 	u8 *result = a.beg + padding;
     54 	return result;
     55 }
     56 
     57 #define arena_capacity(a, t) arena_capacity_(a, sizeof(t), alignof(t))
     58 function iz
     59 arena_capacity_(Arena *a, iz size, uz alignment)
     60 {
     61 	iz available = a->end - (u8 *)arena_aligned_start(*a, alignment);
     62 	iz result    = available / size;
     63 	return result;
     64 }
     65 
     66 function u8 *
     67 arena_commit(Arena *a, iz size)
     68 {
     69 	assert(a->end - a->beg >= size);
     70 	u8 *result = a->beg;
     71 	a->beg += size;
     72 	return result;
     73 }
     74 
     75 function void
     76 arena_pop(Arena *a, iz length)
     77 {
     78 	a->beg -= length;
     79 }
     80 
     81 typedef enum {
     82 	ArenaAllocateFlags_NoZero = 1 << 0,
     83 } ArenaAllocateFlags;
     84 
     85 typedef struct {
     86 	iz size;
     87 	uz align;
     88 	iz count;
     89 	ArenaAllocateFlags flags;
     90 } ArenaAllocateInfo;
     91 
     92 #define arena_alloc(a, ...)         arena_alloc_(a, (ArenaAllocateInfo){.align = 8, .count = 1, ##__VA_ARGS__})
     93 #define push_array(a, t, n)         (t *)arena_alloc(a, .size = sizeof(t), .align = alignof(t), .count = n)
     94 #define push_array_no_zero(a, t, n) (t *)arena_alloc(a, .size = sizeof(t), .align = alignof(t), .count = n, .flags = ArenaAllocateFlags_NoZero)
     95 #define push_struct(a, t)           push_array(a, t, 1)
     96 #define push_struct_no_zero(a, t)   push_array_no_zero(a, t, 1)
     97 
     98 function void *
     99 arena_alloc_(Arena *a, ArenaAllocateInfo info)
    100 {
    101 	void *result = 0;
    102 	if (a->beg) {
    103 		u8 *start = arena_aligned_start(*a, info.align);
    104 		iz available = a->end - start;
    105 		assert((available >= 0 && info.count <= available / info.size));
    106 		asan_unpoison_region(start, info.count * info.size);
    107 		a->beg = start + info.count * info.size;
    108 		result = start;
    109 		if ((info.flags & ArenaAllocateFlags_NoZero) == 0)
    110 			result = mem_clear(start, 0, info.count * info.size);
    111 	}
    112 	return result;
    113 }
    114 
    115 function Arena
    116 sub_arena(Arena *a, iz size, uz align)
    117 {
    118 	Arena result = {.beg = arena_alloc(a, .size = size, .align = align, .flags = ArenaAllocateFlags_NoZero)};
    119 	result.end   = result.beg + size;
    120 	return result;
    121 }
    122 
    123 function Arena
    124 sub_arena_end(Arena *a, iz len, uz align)
    125 {
    126 	Arena result;
    127 	result.beg = (u8 *)((uintptr_t)(a->end - len) & ~(align - 1)),
    128 	result.end = a->end,
    129 
    130 	a->end = result.beg;
    131 	assert(a->end >= a->beg);
    132 
    133 	return result;
    134 }
    135 
    136 function TempArena
    137 begin_temp_arena(Arena *a)
    138 {
    139 	TempArena result = {.arena = a, .old_beg = a->beg};
    140 	return result;
    141 }
    142 
    143 function void
    144 end_temp_arena(TempArena ta)
    145 {
    146 	Arena *a = ta.arena;
    147 	if (a) {
    148 		assert(a->beg >= ta.old_beg);
    149 		a->beg = ta.old_beg;
    150 	}
    151 }
    152 
    153 
    154 enum { DA_INITIAL_CAP = 16 };
    155 
    156 #define da_index(it, s) ((it) - (s)->data)
    157 #define da_reserve(a, s, n) \
    158   (s)->data = da_reserve_((a), (s)->data, &(s)->capacity, (s)->count + n, \
    159                           _Alignof(typeof(*(s)->data)), sizeof(*(s)->data))
    160 
    161 #define da_append_count(a, s, items, item_count) do { \
    162 	da_reserve((a), (s), (item_count));                                             \
    163 	mem_copy((s)->data + (s)->count, (items), sizeof(*(items)) * (uz)(item_count)); \
    164 	(s)->count += (item_count);                                                     \
    165 } while (0)
    166 
    167 #define da_push(a, s) \
    168   ((s)->count == (s)->capacity  \
    169     ? da_reserve(a, s, 1),      \
    170       (s)->data + (s)->count++  \
    171     : (s)->data + (s)->count++)
    172 
    173 function void *
    174 da_reserve_(Arena *a, void *data, iz *capacity, iz needed, uz align, iz size)
    175 {
    176 	iz cap = *capacity;
    177 
    178 	/* NOTE(rnp): handle both 0 initialized DAs and DAs that need to be moved (they started
    179 	 * on the stack or someone allocated something in the middle of the arena during usage) */
    180 	if (!data || a->beg != (u8 *)data + cap * size) {
    181 		void *copy = arena_alloc(a, .size = size, .align = align, .count = cap);
    182 		if (data) mem_copy(copy, data, (uz)(cap * size));
    183 		data = copy;
    184 	}
    185 
    186 	if (!cap) cap = DA_INITIAL_CAP;
    187 	while (cap < needed) cap *= 2;
    188 	arena_alloc(a, .size = size, .align = align, .count = cap - *capacity);
    189 	*capacity = cap;
    190 	return data;
    191 }
    192 
    193 function u32
    194 utf8_encode(u8 *out, u32 cp)
    195 {
    196 	u32 result = 1;
    197 	if (cp <= 0x7F) {
    198 		out[0] = cp & 0x7F;
    199 	} else if (cp <= 0x7FF) {
    200 		result = 2;
    201 		out[0] = ((cp >>  6) & 0x1F) | 0xC0;
    202 		out[1] = ((cp >>  0) & 0x3F) | 0x80;
    203 	} else if (cp <= 0xFFFF) {
    204 		result = 3;
    205 		out[0] = ((cp >> 12) & 0x0F) | 0xE0;
    206 		out[1] = ((cp >>  6) & 0x3F) | 0x80;
    207 		out[2] = ((cp >>  0) & 0x3F) | 0x80;
    208 	} else if (cp <= 0x10FFFF) {
    209 		result = 4;
    210 		out[0] = ((cp >> 18) & 0x07) | 0xF0;
    211 		out[1] = ((cp >> 12) & 0x3F) | 0x80;
    212 		out[2] = ((cp >>  6) & 0x3F) | 0x80;
    213 		out[3] = ((cp >>  0) & 0x3F) | 0x80;
    214 	} else {
    215 		out[0] = '?';
    216 	}
    217 	return result;
    218 }
    219 
    220 function UnicodeDecode
    221 utf16_decode(u16 *data, iz length)
    222 {
    223 	UnicodeDecode result = {.cp = U32_MAX};
    224 	if (length) {
    225 		result.consumed = 1;
    226 		result.cp = data[0];
    227 		if (length > 1 && BETWEEN(data[0], 0xD800u, 0xDBFFu)
    228 		               && BETWEEN(data[1], 0xDC00u, 0xDFFFu))
    229 		{
    230 			result.consumed = 2;
    231 			result.cp = ((data[0] - 0xD800u) << 10u) | ((data[1] - 0xDC00u) + 0x10000u);
    232 		}
    233 	}
    234 	return result;
    235 }
    236 
    237 function u32
    238 utf16_encode(u16 *out, u32 cp)
    239 {
    240 	u32 result = 1;
    241 	if (cp == U32_MAX) {
    242 		out[0] = '?';
    243 	} else if (cp < 0x10000u) {
    244 		out[0] = (u16)cp;
    245 	} else {
    246 		u32 value = cp - 0x10000u;
    247 		out[0] = (u16)(0xD800u + (value >> 10u));
    248 		out[1] = (u16)(0xDC00u + (value & 0x3FFu));
    249 		result = 2;
    250 	}
    251 	return result;
    252 }
    253 
    254 function Stream
    255 stream_from_buffer(u8 *buffer, u32 capacity)
    256 {
    257 	Stream result = {.data = buffer, .cap = (i32)capacity};
    258 	return result;
    259 }
    260 
    261 function Stream
    262 stream_alloc(Arena *a, i32 cap)
    263 {
    264 	Stream result = stream_from_buffer(arena_commit(a, cap), (u32)cap);
    265 	return result;
    266 }
    267 
    268 function s8
    269 stream_to_s8(Stream *s)
    270 {
    271 	s8 result = s8("");
    272 	if (!s->errors) result = (s8){.len = s->widx, .data = s->data};
    273 	return result;
    274 }
    275 
    276 function void
    277 stream_reset(Stream *s, i32 index)
    278 {
    279 	s->errors = s->cap <= index;
    280 	if (!s->errors)
    281 		s->widx = index;
    282 }
    283 
    284 function void
    285 stream_commit(Stream *s, i32 count)
    286 {
    287 	s->errors |= !BETWEEN(s->widx + count, 0, s->cap);
    288 	if (!s->errors)
    289 		s->widx += count;
    290 }
    291 
    292 function void
    293 stream_append(Stream *s, void *data, iz count)
    294 {
    295 	s->errors |= (s->cap - s->widx) < count;
    296 	if (!s->errors) {
    297 		mem_copy(s->data + s->widx, data, (uz)count);
    298 		s->widx += (i32)count;
    299 	}
    300 }
    301 
    302 function void
    303 stream_append_byte(Stream *s, u8 b)
    304 {
    305 	stream_append(s, &b, 1);
    306 }
    307 
    308 function void
    309 stream_pad(Stream *s, u8 b, i32 n)
    310 {
    311 	while (n > 0) stream_append_byte(s, b), n--;
    312 }
    313 
    314 function void
    315 stream_append_s8(Stream *s, s8 str)
    316 {
    317 	stream_append(s, str.data, str.len);
    318 }
    319 
    320 #define stream_append_s8s(s, ...) stream_append_s8s_(s, arg_list(s8, ##__VA_ARGS__))
    321 function void
    322 stream_append_s8s_(Stream *s, s8 *strs, iz count)
    323 {
    324 	for (iz i = 0; i < count; i++)
    325 		stream_append(s, strs[i].data, strs[i].len);
    326 }
    327 
    328 function void
    329 stream_append_u64_width(Stream *s, u64 n, u64 min_width)
    330 {
    331 	u8 tmp[64];
    332 	u8 *end = tmp + sizeof(tmp);
    333 	u8 *beg = end;
    334 	min_width = MIN(sizeof(tmp), min_width);
    335 
    336 	do { *--beg = (u8)('0' + (n % 10)); } while (n /= 10);
    337 	while (end - beg > 0 && (uz)(end - beg) < min_width)
    338 		*--beg = '0';
    339 
    340 	stream_append(s, beg, end - beg);
    341 }
    342 
    343 function void
    344 stream_append_u64(Stream *s, u64 n)
    345 {
    346 	stream_append_u64_width(s, n, 0);
    347 }
    348 
    349 function void
    350 stream_append_hex_u64_width(Stream *s, u64 n, iz width)
    351 {
    352 	assert(width <= 16);
    353 	if (!s->errors) {
    354 		u8  buf[16];
    355 		u8 *end = buf + sizeof(buf);
    356 		u8 *beg = end;
    357 		while (n) {
    358 			*--beg = (u8)"0123456789abcdef"[n & 0x0F];
    359 			n >>= 4;
    360 		}
    361 		while (end - beg < width)
    362 			*--beg = '0';
    363 		stream_append(s, beg, end - beg);
    364 	}
    365 }
    366 
    367 function void
    368 stream_append_hex_u64(Stream *s, u64 n)
    369 {
    370 	stream_append_hex_u64_width(s, n, 2);
    371 }
    372 
    373 function void
    374 stream_append_i64(Stream *s, i64 n)
    375 {
    376 	if (n < 0) {
    377 		stream_append_byte(s, '-');
    378 		n *= -1;
    379 	}
    380 	stream_append_u64(s, (u64)n);
    381 }
    382 
    383 function void
    384 stream_append_f64(Stream *s, f64 f, u64 prec)
    385 {
    386 	if (f < 0) {
    387 		stream_append_byte(s, '-');
    388 		f *= -1;
    389 	}
    390 
    391 	/* NOTE: round last digit */
    392 	f += 0.5f / (f64)prec;
    393 
    394 	if (f >= (f64)(-1UL >> 1)) {
    395 		stream_append_s8(s, s8("inf"));
    396 	} else {
    397 		u64 integral = (u64)f;
    398 		u64 fraction = (u64)((f - (f64)integral) * (f64)prec);
    399 		stream_append_u64(s, integral);
    400 		stream_append_byte(s, '.');
    401 		for (u64 i = prec / 10; i > 1; i /= 10) {
    402 			if (i > fraction)
    403 				stream_append_byte(s, '0');
    404 		}
    405 		stream_append_u64(s, fraction);
    406 	}
    407 }
    408 
    409 function void
    410 stream_append_f64_e(Stream *s, f64 f)
    411 {
    412 	/* TODO: there should be a better way of doing this */
    413 	#if 0
    414 	/* NOTE: we ignore subnormal numbers for now */
    415 	union { f64 f; u64 u; } u = {.f = f};
    416 	i32 exponent = ((u.u >> 52) & 0x7ff) - 1023;
    417 	f32 log_10_of_2 = 0.301f;
    418 	i32 scale       = (exponent * log_10_of_2);
    419 	/* NOTE: normalize f */
    420 	for (i32 i = ABS(scale); i > 0; i--)
    421 		f *= (scale > 0)? 0.1f : 10.0f;
    422 	#else
    423 	i32 scale = 0;
    424 	if (f != 0) {
    425 		while (f > 1) {
    426 			f *= 0.1f;
    427 			scale++;
    428 		}
    429 		while (f < 1) {
    430 			f *= 10.0f;
    431 			scale--;
    432 		}
    433 	}
    434 	#endif
    435 
    436 	u32 prec = 100;
    437 	stream_append_f64(s, f, prec);
    438 	stream_append_byte(s, 'e');
    439 	stream_append_byte(s, scale >= 0? '+' : '-');
    440 	for (u32 i = prec / 10; i > 1; i /= 10)
    441 		stream_append_byte(s, '0');
    442 	stream_append_u64(s, (u64)ABS(scale));
    443 }
    444 
    445 function void
    446 stream_append_v2(Stream *s, v2 v)
    447 {
    448 	stream_append_byte(s, '{');
    449 	stream_append_f64(s, v.x, 100);
    450 	stream_append_s8(s, s8(", "));
    451 	stream_append_f64(s, v.y, 100);
    452 	stream_append_byte(s, '}');
    453 }
    454 
    455 function Stream
    456 arena_stream(Arena a)
    457 {
    458 	Stream result = {0};
    459 	result.data   = a.beg;
    460 	result.cap    = (i32)(a.end - a.beg);
    461 
    462 	/* TODO(rnp): no idea what to do here if we want to maintain the ergonomics */
    463 	asan_unpoison_region(result.data, result.cap);
    464 
    465 	return result;
    466 }
    467 
    468 function s8
    469 arena_stream_commit(Arena *a, Stream *s)
    470 {
    471 	ASSERT(s->data == a->beg);
    472 	s8 result = stream_to_s8(s);
    473 	arena_commit(a, result.len);
    474 	return result;
    475 }
    476 
    477 function s8
    478 arena_stream_commit_zero(Arena *a, Stream *s)
    479 {
    480 	b32 error = s->errors || s->widx == s->cap;
    481 	if (!error)
    482 		s->data[s->widx] = 0;
    483 	s8 result = stream_to_s8(s);
    484 	arena_commit(a, result.len + 1);
    485 	return result;
    486 }
    487 
    488 function s8
    489 arena_stream_commit_and_reset(Arena *arena, Stream *s)
    490 {
    491 	s8 result = arena_stream_commit_zero(arena, s);
    492 	*s = arena_stream(*arena);
    493 	return result;
    494 }
    495 
    496 #if !defined(XXH_IMPLEMENTATION)
    497 # define XXH_INLINE_ALL
    498 # define XXH_IMPLEMENTATION
    499 # define XXH_STATIC_LINKING_ONLY
    500 # include "external/xxhash.h"
    501 #endif
    502 
    503 function u128
    504 u128_hash_from_data(void *data, uz size)
    505 {
    506 	u128 result = {0};
    507 	XXH128_hash_t hash = XXH3_128bits_withSeed(data, size, 4969);
    508 	mem_copy(&result, &hash, sizeof(result));
    509 	return result;
    510 }
    511 
    512 function u64
    513 u64_hash_from_s8(s8 v)
    514 {
    515 	u64 result = XXH3_64bits_withSeed(v.data, (uz)v.len, 4969);
    516 	return result;
    517 }
    518 
    519 function s8
    520 c_str_to_s8(char *cstr)
    521 {
    522 	s8 result = {.data = (u8 *)cstr};
    523 	if (cstr) { while (*cstr) { result.len++; cstr++; } }
    524 	return result;
    525 }
    526 
    527 /* NOTE(rnp): returns < 0 if byte is not found */
    528 function iz
    529 s8_scan_backwards(s8 s, u8 byte)
    530 {
    531 	iz result = (u8 *)memory_scan_backwards(s.data, byte, s.len) - s.data;
    532 	return result;
    533 }
    534 
    535 function s8
    536 s8_cut_head(s8 s, iz cut)
    537 {
    538 	s8 result = s;
    539 	if (cut > 0) {
    540 		result.data += cut;
    541 		result.len  -= cut;
    542 	}
    543 	return result;
    544 }
    545 
    546 function s8
    547 s8_alloc(Arena *a, iz len)
    548 {
    549 	s8 result = {.data = push_array(a, u8, len), .len = len};
    550 	return result;
    551 }
    552 
    553 function s8
    554 s16_to_s8(Arena *a, s16 in)
    555 {
    556 	s8 result = s8("");
    557 	if (in.len) {
    558 		iz commit = in.len * 4;
    559 		iz length = 0;
    560 		u8 *data = arena_commit(a, commit + 1);
    561 		u16 *beg = in.data;
    562 		u16 *end = in.data + in.len;
    563 		while (beg < end) {
    564 			UnicodeDecode decode = utf16_decode(beg, end - beg);
    565 			length += utf8_encode(data + length, decode.cp);
    566 			beg    += decode.consumed;
    567 		}
    568 		data[length] = 0;
    569 		result = (s8){.len = length, .data = data};
    570 		arena_pop(a, commit - length);
    571 	}
    572 	return result;
    573 }
    574 
    575 function s16
    576 s8_to_s16(Arena *a, s8 in)
    577 {
    578 	s16 result = {0};
    579 	if (in.len) {
    580 		iz required = 2 * in.len + 1;
    581 		u16 *data   = push_array(a, u16, required);
    582 		iz length   = 0;
    583 		/* TODO(rnp): utf8_decode */
    584 		for (iz i = 0; i < in.len; i++) {
    585 			u32 cp  = in.data[i];
    586 			length += utf16_encode(data + length, cp);
    587 		}
    588 		result = (s16){.len = length, .data = data};
    589 		arena_pop(a, required - length);
    590 	}
    591 	return result;
    592 }
    593 
    594 #define push_s8_from_parts(a, j, ...) push_s8_from_parts_((a), (j), arg_list(s8, __VA_ARGS__))
    595 function s8
    596 push_s8_from_parts_(Arena *arena, s8 joiner, s8 *parts, iz count)
    597 {
    598 	iz length = joiner.len * (count - 1);
    599 	for (iz i = 0; i < count; i++)
    600 		length += parts[i].len;
    601 
    602 	s8 result = {.len = length, .data = arena_commit(arena, length + 1)};
    603 
    604 	iz offset = 0;
    605 	for (iz i = 0; i < count; i++) {
    606 		if (i != 0) {
    607 			mem_copy(result.data + offset, joiner.data, (uz)joiner.len);
    608 			offset += joiner.len;
    609 		}
    610 		mem_copy(result.data + offset, parts[i].data, (uz)parts[i].len);
    611 		offset += parts[i].len;
    612 	}
    613 	result.data[result.len] = 0;
    614 
    615 	return result;
    616 }
    617 
    618 function s8
    619 push_s8(Arena *a, s8 str)
    620 {
    621 	s8 result   = s8_alloc(a, str.len + 1);
    622 	result.len -= 1;
    623 	mem_copy(result.data, str.data, (uz)result.len);
    624 	return result;
    625 }
    626 
    627 function force_inline u32
    628 round_down_power_of_2(u32 a)
    629 {
    630 	u32 result = 0x80000000UL >> clz_u32(a);
    631 	return result;
    632 }
    633 
    634 function force_inline u32
    635 round_up_power_of_2(u32 a)
    636 {
    637 	u32 result = 0x80000000UL >> (clz_u32(a - 1) - 1);
    638 	return result;
    639 }
    640 
    641 function force_inline iz
    642 round_up_to(iz value, iz multiple)
    643 {
    644 	iz result = value;
    645 	if (value % multiple != 0)
    646 		result += multiple - value % multiple;
    647 	return result;
    648 }
    649 
    650 function void
    651 split_rect_horizontal(Rect rect, f32 fraction, Rect *left, Rect *right)
    652 {
    653 	if (left) {
    654 		left->pos    = rect.pos;
    655 		left->size.h = rect.size.h;
    656 		left->size.w = rect.size.w * fraction;
    657 	}
    658 	if (right) {
    659 		right->pos    = rect.pos;
    660 		right->pos.x += rect.size.w * fraction;
    661 		right->size.h = rect.size.h;
    662 		right->size.w = rect.size.w * (1.0f - fraction);
    663 	}
    664 }
    665 
    666 function void
    667 split_rect_vertical(Rect rect, f32 fraction, Rect *top, Rect *bot)
    668 {
    669 	if (top) {
    670 		top->pos    = rect.pos;
    671 		top->size.w = rect.size.w;
    672 		top->size.h = rect.size.h * fraction;
    673 	}
    674 	if (bot) {
    675 		bot->pos    = rect.pos;
    676 		bot->pos.y += rect.size.h * fraction;
    677 		bot->size.w = rect.size.w;
    678 		bot->size.h = rect.size.h * (1.0f - fraction);
    679 	}
    680 }
    681 
    682 function void
    683 cut_rect_horizontal(Rect rect, f32 at, Rect *left, Rect *right)
    684 {
    685 	at = MIN(at, rect.size.w);
    686 	if (left) {
    687 		*left = rect;
    688 		left->size.w = at;
    689 	}
    690 	if (right) {
    691 		*right = rect;
    692 		right->pos.x  += at;
    693 		right->size.w -= at;
    694 	}
    695 }
    696 
    697 function void
    698 cut_rect_vertical(Rect rect, f32 at, Rect *top, Rect *bot)
    699 {
    700 	at = MIN(at, rect.size.h);
    701 	if (top) {
    702 		*top = rect;
    703 		top->size.h = at;
    704 	}
    705 	if (bot) {
    706 		*bot = rect;
    707 		bot->pos.y  += at;
    708 		bot->size.h -= at;
    709 	}
    710 }
    711 
    712 function IntegerConversion
    713 integer_from_s8(s8 raw)
    714 {
    715 	IntegerConversion result = {0};
    716 
    717 	iz  i     = 0;
    718 	i64 scale = 1;
    719 	if (raw.len && raw.data[0] == '-') {
    720 		scale = -1;
    721 		i     =  1;
    722 	}
    723 
    724 	for (; i < raw.len; i++) {
    725 		i64 digit = (i64)raw.data[i] - '0';
    726 		if (BETWEEN(digit, 0, 9)) {
    727 			if (result.U64 > (U64_MAX - (u64)digit) / 10) {
    728 				result.result = IntegerConversionResult_OutOfRange;
    729 				result.U64    = U64_MAX;
    730 			} else {
    731 				result.U64 = 10 * result.U64 + (u64)digit;
    732 			}
    733 		} else {
    734 			break;
    735 		}
    736 	}
    737 	result.unparsed = (s8){.len = raw.len - i, .data = raw.data + i};
    738 	result.result   = IntegerConversionResult_Success;
    739 	result.S64      = (i64)result.U64 * scale;
    740 
    741 	return result;
    742 }
    743 
    744 function f64
    745 parse_f64(s8 s)
    746 {
    747 	IntegerConversion integral = integer_from_s8(s);
    748 
    749 	s = integral.unparsed;
    750 	if (*s.data == '.') { s.data++; s.len--; }
    751 	while (s.len > 0 && s.data[s.len - 1] == '0') s.len--;
    752 
    753 	IntegerConversion fractional = integer_from_s8(s);
    754 
    755 	u64 power = (u64)(fractional.unparsed.data - s.data);
    756 	f64 frac  = (f64)fractional.U64;
    757 	while (power > 0) { frac /= 10.0; power--; }
    758 
    759 	f64 result = (f64)integral.S64 + frac;
    760 	return result;
    761 }