util.c (15725B)
1 /* See LICENSE for license details. */ 2 #define zero_struct(s) mem_clear(s, 0, sizeof(*s)) 3 function void * 4 mem_clear(void *restrict p_, u8 c, iz size) 5 { 6 u8 *p = p_; 7 while (size > 0) p[--size] = c; 8 return p; 9 } 10 11 function void 12 mem_copy(void *restrict dest, void *restrict src, uz n) 13 { 14 u8 *s = src, *d = dest; 15 for (; n; n--) *d++ = *s++; 16 } 17 18 function void 19 mem_move(u8 *dest, u8 *src, uz n) 20 { 21 if (dest < src) mem_copy(dest, src, n); 22 else while (n) { n--; dest[n] = src[n]; } 23 } 24 25 function void * 26 memory_scan_backwards(void *memory, u8 byte, iz n) 27 { 28 void *result = 0; 29 u8 *s = memory; 30 while (n > 0) if (s[--n] == byte) { result = s + n; break; } 31 return result; 32 } 33 34 function void * 35 arena_aligned_start(Arena a, uz alignment) 36 { 37 uz padding = -(uintptr_t)a.beg & (alignment - 1); 38 u8 *result = a.beg + padding; 39 return result; 40 } 41 42 #define arena_capacity(a, t) arena_capacity_(a, sizeof(t), alignof(t)) 43 function iz 44 arena_capacity_(Arena *a, iz size, uz alignment) 45 { 46 iz available = a->end - (u8 *)arena_aligned_start(*a, alignment); 47 iz result = available / size; 48 return result; 49 } 50 51 function u8 * 52 arena_commit(Arena *a, iz size) 53 { 54 assert(a->end - a->beg >= size); 55 u8 *result = a->beg; 56 a->beg += size; 57 return result; 58 } 59 60 function void 61 arena_pop(Arena *a, iz length) 62 { 63 a->beg -= length; 64 } 65 66 #define push_array(a, t, n) (t *)arena_alloc(a, sizeof(t), _Alignof(t), n) 67 #define push_struct(a, t) (t *)arena_alloc(a, sizeof(t), _Alignof(t), 1) 68 function void * 69 arena_alloc(Arena *a, iz len, uz align, iz count) 70 { 71 void *result = 0; 72 if (a->beg) { 73 u8 *start = arena_aligned_start(*a, align); 74 iz available = a->end - start; 75 assert((available >= 0 && count <= available / len)); 76 asan_unpoison_region(start, count * len); 77 a->beg = start + count * len; 78 /* TODO: Performance? */ 79 result = mem_clear(start, 0, count * len); 80 } 81 return result; 82 } 83 84 enum { DA_INITIAL_CAP = 16 }; 85 86 #define da_index(it, s) ((it) - (s)->data) 87 #define da_reserve(a, s, n) \ 88 (s)->data = da_reserve_((a), (s)->data, &(s)->capacity, (s)->count + n, \ 89 _Alignof(typeof(*(s)->data)), sizeof(*(s)->data)) 90 91 #define da_append_count(a, s, items, item_count) do { \ 92 da_reserve((a), (s), (item_count)); \ 93 mem_copy((s)->data + (s)->count, (items), sizeof(*(items)) * (uz)(item_count)); \ 94 (s)->count += (item_count); \ 95 } while (0) 96 97 #define da_push(a, s) \ 98 ((s)->count == (s)->capacity \ 99 ? da_reserve(a, s, 1), \ 100 (s)->data + (s)->count++ \ 101 : (s)->data + (s)->count++) 102 103 function void * 104 da_reserve_(Arena *a, void *data, iz *capacity, iz needed, uz align, iz size) 105 { 106 iz cap = *capacity; 107 108 /* NOTE(rnp): handle both 0 initialized DAs and DAs that need to be moved (they started 109 * on the stack or someone allocated something in the middle of the arena during usage) */ 110 if (!data || a->beg != (u8 *)data + cap * size) { 111 void *copy = arena_alloc(a, size, align, cap); 112 if (data) mem_copy(copy, data, (uz)(cap * size)); 113 data = copy; 114 } 115 116 if (!cap) cap = DA_INITIAL_CAP; 117 while (cap < needed) cap *= 2; 118 arena_alloc(a, size, align, cap - *capacity); 119 *capacity = cap; 120 return data; 121 } 122 123 function Arena 124 sub_arena(Arena *a, iz len, uz align) 125 { 126 Arena result = {0}; 127 128 uz padding = -(uintptr_t)a->beg & (align - 1); 129 result.beg = a->beg + padding; 130 result.end = result.beg + len; 131 arena_commit(a, len + (iz)padding); 132 133 return result; 134 } 135 136 function TempArena 137 begin_temp_arena(Arena *a) 138 { 139 TempArena result = {.arena = a, .old_beg = a->beg}; 140 return result; 141 } 142 143 function void 144 end_temp_arena(TempArena ta) 145 { 146 Arena *a = ta.arena; 147 if (a) { 148 assert(a->beg >= ta.old_beg); 149 a->beg = ta.old_beg; 150 } 151 } 152 153 function u32 154 utf8_encode(u8 *out, u32 cp) 155 { 156 u32 result = 1; 157 if (cp <= 0x7F) { 158 out[0] = cp & 0x7F; 159 } else if (cp <= 0x7FF) { 160 result = 2; 161 out[0] = ((cp >> 6) & 0x1F) | 0xC0; 162 out[1] = ((cp >> 0) & 0x3F) | 0x80; 163 } else if (cp <= 0xFFFF) { 164 result = 3; 165 out[0] = ((cp >> 12) & 0x0F) | 0xE0; 166 out[1] = ((cp >> 6) & 0x3F) | 0x80; 167 out[2] = ((cp >> 0) & 0x3F) | 0x80; 168 } else if (cp <= 0x10FFFF) { 169 result = 4; 170 out[0] = ((cp >> 18) & 0x07) | 0xF0; 171 out[1] = ((cp >> 12) & 0x3F) | 0x80; 172 out[2] = ((cp >> 6) & 0x3F) | 0x80; 173 out[3] = ((cp >> 0) & 0x3F) | 0x80; 174 } else { 175 out[0] = '?'; 176 } 177 return result; 178 } 179 180 function UnicodeDecode 181 utf16_decode(u16 *data, iz length) 182 { 183 UnicodeDecode result = {.cp = U32_MAX}; 184 if (length) { 185 result.consumed = 1; 186 result.cp = data[0]; 187 if (length > 1 && BETWEEN(data[0], 0xD800u, 0xDBFFu) 188 && BETWEEN(data[1], 0xDC00u, 0xDFFFu)) 189 { 190 result.consumed = 2; 191 result.cp = ((data[0] - 0xD800u) << 10u) | ((data[1] - 0xDC00u) + 0x10000u); 192 } 193 } 194 return result; 195 } 196 197 function u32 198 utf16_encode(u16 *out, u32 cp) 199 { 200 u32 result = 1; 201 if (cp == U32_MAX) { 202 out[0] = '?'; 203 } else if (cp < 0x10000u) { 204 out[0] = (u16)cp; 205 } else { 206 u32 value = cp - 0x10000u; 207 out[0] = (u16)(0xD800u + (value >> 10u)); 208 out[1] = (u16)(0xDC00u + (value & 0x3FFu)); 209 result = 2; 210 } 211 return result; 212 } 213 214 function Stream 215 stream_from_buffer(u8 *buffer, u32 capacity) 216 { 217 Stream result = {.data = buffer, .cap = (i32)capacity}; 218 return result; 219 } 220 221 function Stream 222 stream_alloc(Arena *a, i32 cap) 223 { 224 Stream result = stream_from_buffer(arena_commit(a, cap), (u32)cap); 225 return result; 226 } 227 228 function s8 229 stream_to_s8(Stream *s) 230 { 231 s8 result = s8(""); 232 if (!s->errors) result = (s8){.len = s->widx, .data = s->data}; 233 return result; 234 } 235 236 function void 237 stream_reset(Stream *s, i32 index) 238 { 239 s->errors = s->cap <= index; 240 if (!s->errors) 241 s->widx = index; 242 } 243 244 function void 245 stream_commit(Stream *s, i32 count) 246 { 247 s->errors |= !BETWEEN(s->widx + count, 0, s->cap); 248 if (!s->errors) 249 s->widx += count; 250 } 251 252 function void 253 stream_append(Stream *s, void *data, iz count) 254 { 255 s->errors |= (s->cap - s->widx) < count; 256 if (!s->errors) { 257 mem_copy(s->data + s->widx, data, (uz)count); 258 s->widx += (i32)count; 259 } 260 } 261 262 function void 263 stream_append_byte(Stream *s, u8 b) 264 { 265 stream_append(s, &b, 1); 266 } 267 268 function void 269 stream_pad(Stream *s, u8 b, i32 n) 270 { 271 while (n > 0) stream_append_byte(s, b), n--; 272 } 273 274 function void 275 stream_append_s8(Stream *s, s8 str) 276 { 277 stream_append(s, str.data, str.len); 278 } 279 280 #define stream_append_s8s(s, ...) stream_append_s8s_(s, arg_list(s8, ##__VA_ARGS__)) 281 function void 282 stream_append_s8s_(Stream *s, s8 *strs, iz count) 283 { 284 for (iz i = 0; i < count; i++) 285 stream_append(s, strs[i].data, strs[i].len); 286 } 287 288 function void 289 stream_append_u64_width(Stream *s, u64 n, u64 min_width) 290 { 291 u8 tmp[64]; 292 u8 *end = tmp + sizeof(tmp); 293 u8 *beg = end; 294 min_width = MIN(sizeof(tmp), min_width); 295 296 do { *--beg = (u8)('0' + (n % 10)); } while (n /= 10); 297 while (end - beg > 0 && (uz)(end - beg) < min_width) 298 *--beg = '0'; 299 300 stream_append(s, beg, end - beg); 301 } 302 303 function void 304 stream_append_u64(Stream *s, u64 n) 305 { 306 stream_append_u64_width(s, n, 0); 307 } 308 309 function void 310 stream_append_hex_u64_width(Stream *s, u64 n, iz width) 311 { 312 assert(width <= 16); 313 if (!s->errors) { 314 u8 buf[16]; 315 u8 *end = buf + sizeof(buf); 316 u8 *beg = end; 317 while (n) { 318 *--beg = (u8)"0123456789abcdef"[n & 0x0F]; 319 n >>= 4; 320 } 321 while (end - beg < width) 322 *--beg = '0'; 323 stream_append(s, beg, end - beg); 324 } 325 } 326 327 function void 328 stream_append_hex_u64(Stream *s, u64 n) 329 { 330 stream_append_hex_u64_width(s, n, 2); 331 } 332 333 function void 334 stream_append_i64(Stream *s, i64 n) 335 { 336 if (n < 0) { 337 stream_append_byte(s, '-'); 338 n *= -1; 339 } 340 stream_append_u64(s, (u64)n); 341 } 342 343 function void 344 stream_append_f64(Stream *s, f64 f, u64 prec) 345 { 346 if (f < 0) { 347 stream_append_byte(s, '-'); 348 f *= -1; 349 } 350 351 /* NOTE: round last digit */ 352 f += 0.5f / (f64)prec; 353 354 if (f >= (f64)(-1UL >> 1)) { 355 stream_append_s8(s, s8("inf")); 356 } else { 357 u64 integral = (u64)f; 358 u64 fraction = (u64)((f - (f64)integral) * (f64)prec); 359 stream_append_u64(s, integral); 360 stream_append_byte(s, '.'); 361 for (u64 i = prec / 10; i > 1; i /= 10) { 362 if (i > fraction) 363 stream_append_byte(s, '0'); 364 } 365 stream_append_u64(s, fraction); 366 } 367 } 368 369 function void 370 stream_append_f64_e(Stream *s, f64 f) 371 { 372 /* TODO: there should be a better way of doing this */ 373 #if 0 374 /* NOTE: we ignore subnormal numbers for now */ 375 union { f64 f; u64 u; } u = {.f = f}; 376 i32 exponent = ((u.u >> 52) & 0x7ff) - 1023; 377 f32 log_10_of_2 = 0.301f; 378 i32 scale = (exponent * log_10_of_2); 379 /* NOTE: normalize f */ 380 for (i32 i = ABS(scale); i > 0; i--) 381 f *= (scale > 0)? 0.1f : 10.0f; 382 #else 383 i32 scale = 0; 384 if (f != 0) { 385 while (f > 1) { 386 f *= 0.1f; 387 scale++; 388 } 389 while (f < 1) { 390 f *= 10.0f; 391 scale--; 392 } 393 } 394 #endif 395 396 u32 prec = 100; 397 stream_append_f64(s, f, prec); 398 stream_append_byte(s, 'e'); 399 stream_append_byte(s, scale >= 0? '+' : '-'); 400 for (u32 i = prec / 10; i > 1; i /= 10) 401 stream_append_byte(s, '0'); 402 stream_append_u64(s, (u64)ABS(scale)); 403 } 404 405 function void 406 stream_append_v2(Stream *s, v2 v) 407 { 408 stream_append_byte(s, '{'); 409 stream_append_f64(s, v.x, 100); 410 stream_append_s8(s, s8(", ")); 411 stream_append_f64(s, v.y, 100); 412 stream_append_byte(s, '}'); 413 } 414 415 function Stream 416 arena_stream(Arena a) 417 { 418 Stream result = {0}; 419 result.data = a.beg; 420 result.cap = (i32)(a.end - a.beg); 421 422 /* TODO(rnp): no idea what to do here if we want to maintain the ergonomics */ 423 asan_unpoison_region(result.data, result.cap); 424 425 return result; 426 } 427 428 function s8 429 arena_stream_commit(Arena *a, Stream *s) 430 { 431 ASSERT(s->data == a->beg); 432 s8 result = stream_to_s8(s); 433 arena_commit(a, result.len); 434 return result; 435 } 436 437 function s8 438 arena_stream_commit_zero(Arena *a, Stream *s) 439 { 440 b32 error = s->errors || s->widx == s->cap; 441 if (!error) 442 s->data[s->widx] = 0; 443 s8 result = stream_to_s8(s); 444 arena_commit(a, result.len + 1); 445 return result; 446 } 447 448 function s8 449 arena_stream_commit_and_reset(Arena *arena, Stream *s) 450 { 451 s8 result = arena_stream_commit_zero(arena, s); 452 *s = arena_stream(*arena); 453 return result; 454 } 455 456 #if !defined(XXH_IMPLEMENTATION) 457 # define XXH_INLINE_ALL 458 # define XXH_IMPLEMENTATION 459 # define XXH_STATIC_LINKING_ONLY 460 # include "external/xxhash.h" 461 #endif 462 463 function u128 464 u128_hash_from_data(void *data, uz size) 465 { 466 u128 result = {0}; 467 XXH128_hash_t hash = XXH3_128bits_withSeed(data, size, 4969); 468 mem_copy(&result, &hash, sizeof(result)); 469 return result; 470 } 471 472 function u64 473 u64_hash_from_s8(s8 v) 474 { 475 u64 result = XXH3_64bits_withSeed(v.data, (uz)v.len, 4969); 476 return result; 477 } 478 479 function s8 480 c_str_to_s8(char *cstr) 481 { 482 s8 result = {.data = (u8 *)cstr}; 483 if (cstr) { while (*cstr) { result.len++; cstr++; } } 484 return result; 485 } 486 487 /* NOTE(rnp): returns < 0 if byte is not found */ 488 function iz 489 s8_scan_backwards(s8 s, u8 byte) 490 { 491 iz result = (u8 *)memory_scan_backwards(s.data, byte, s.len) - s.data; 492 return result; 493 } 494 495 function s8 496 s8_cut_head(s8 s, iz cut) 497 { 498 s8 result = s; 499 if (cut > 0) { 500 result.data += cut; 501 result.len -= cut; 502 } 503 return result; 504 } 505 506 function s8 507 s8_alloc(Arena *a, iz len) 508 { 509 s8 result = {.data = push_array(a, u8, len), .len = len}; 510 return result; 511 } 512 513 function s8 514 s16_to_s8(Arena *a, s16 in) 515 { 516 s8 result = s8(""); 517 if (in.len) { 518 iz commit = in.len * 4; 519 iz length = 0; 520 u8 *data = arena_commit(a, commit + 1); 521 u16 *beg = in.data; 522 u16 *end = in.data + in.len; 523 while (beg < end) { 524 UnicodeDecode decode = utf16_decode(beg, end - beg); 525 length += utf8_encode(data + length, decode.cp); 526 beg += decode.consumed; 527 } 528 data[length] = 0; 529 result = (s8){.len = length, .data = data}; 530 arena_pop(a, commit - length); 531 } 532 return result; 533 } 534 535 function s16 536 s8_to_s16(Arena *a, s8 in) 537 { 538 s16 result = {0}; 539 if (in.len) { 540 iz required = 2 * in.len + 1; 541 u16 *data = push_array(a, u16, required); 542 iz length = 0; 543 /* TODO(rnp): utf8_decode */ 544 for (iz i = 0; i < in.len; i++) { 545 u32 cp = in.data[i]; 546 length += utf16_encode(data + length, cp); 547 } 548 result = (s16){.len = length, .data = data}; 549 arena_pop(a, required - length); 550 } 551 return result; 552 } 553 554 #define push_s8_from_parts(a, j, ...) push_s8_from_parts_((a), (j), arg_list(s8, __VA_ARGS__)) 555 function s8 556 push_s8_from_parts_(Arena *arena, s8 joiner, s8 *parts, iz count) 557 { 558 iz length = joiner.len * (count - 1); 559 for (iz i = 0; i < count; i++) 560 length += parts[i].len; 561 562 s8 result = {.len = length, .data = arena_commit(arena, length + 1)}; 563 564 iz offset = 0; 565 for (iz i = 0; i < count; i++) { 566 if (i != 0) { 567 mem_copy(result.data + offset, joiner.data, (uz)joiner.len); 568 offset += joiner.len; 569 } 570 mem_copy(result.data + offset, parts[i].data, (uz)parts[i].len); 571 offset += parts[i].len; 572 } 573 result.data[result.len] = 0; 574 575 return result; 576 } 577 578 function s8 579 push_s8(Arena *a, s8 str) 580 { 581 s8 result = s8_alloc(a, str.len + 1); 582 result.len -= 1; 583 mem_copy(result.data, str.data, (uz)result.len); 584 return result; 585 } 586 587 function force_inline u32 588 round_down_power_of_2(u32 a) 589 { 590 u32 result = 0x80000000UL >> clz_u32(a); 591 return result; 592 } 593 594 function force_inline u32 595 round_up_power_of_2(u32 a) 596 { 597 u32 result = 0x80000000UL >> (clz_u32(a - 1) - 1); 598 return result; 599 } 600 601 function force_inline iz 602 round_up_to(iz value, iz multiple) 603 { 604 iz result = value; 605 if (value % multiple != 0) 606 result += multiple - value % multiple; 607 return result; 608 } 609 610 function void 611 split_rect_horizontal(Rect rect, f32 fraction, Rect *left, Rect *right) 612 { 613 if (left) { 614 left->pos = rect.pos; 615 left->size.h = rect.size.h; 616 left->size.w = rect.size.w * fraction; 617 } 618 if (right) { 619 right->pos = rect.pos; 620 right->pos.x += rect.size.w * fraction; 621 right->size.h = rect.size.h; 622 right->size.w = rect.size.w * (1.0f - fraction); 623 } 624 } 625 626 function void 627 split_rect_vertical(Rect rect, f32 fraction, Rect *top, Rect *bot) 628 { 629 if (top) { 630 top->pos = rect.pos; 631 top->size.w = rect.size.w; 632 top->size.h = rect.size.h * fraction; 633 } 634 if (bot) { 635 bot->pos = rect.pos; 636 bot->pos.y += rect.size.h * fraction; 637 bot->size.w = rect.size.w; 638 bot->size.h = rect.size.h * (1.0f - fraction); 639 } 640 } 641 642 function void 643 cut_rect_horizontal(Rect rect, f32 at, Rect *left, Rect *right) 644 { 645 at = MIN(at, rect.size.w); 646 if (left) { 647 *left = rect; 648 left->size.w = at; 649 } 650 if (right) { 651 *right = rect; 652 right->pos.x += at; 653 right->size.w -= at; 654 } 655 } 656 657 function void 658 cut_rect_vertical(Rect rect, f32 at, Rect *top, Rect *bot) 659 { 660 at = MIN(at, rect.size.h); 661 if (top) { 662 *top = rect; 663 top->size.h = at; 664 } 665 if (bot) { 666 *bot = rect; 667 bot->pos.y += at; 668 bot->size.h -= at; 669 } 670 } 671 672 function IntegerConversion 673 integer_from_s8(s8 raw) 674 { 675 IntegerConversion result = {0}; 676 677 iz i = 0; 678 i64 scale = 1; 679 if (raw.len && raw.data[0] == '-') { 680 scale = -1; 681 i = 1; 682 } 683 684 for (; i < raw.len; i++) { 685 i64 digit = (i64)raw.data[i] - '0'; 686 if (BETWEEN(digit, 0, 9)) { 687 if (result.U64 > (U64_MAX - (u64)digit) / 10) { 688 result.result = IntegerConversionResult_OutOfRange; 689 result.U64 = U64_MAX; 690 } else { 691 result.U64 = 10 * result.U64 + (u64)digit; 692 } 693 } else { 694 break; 695 } 696 } 697 result.unparsed = (s8){.len = raw.len - i, .data = raw.data + i}; 698 result.result = IntegerConversionResult_Success; 699 result.S64 = (i64)result.U64 * scale; 700 701 return result; 702 } 703 704 function f64 705 parse_f64(s8 s) 706 { 707 IntegerConversion integral = integer_from_s8(s); 708 709 s = integral.unparsed; 710 if (*s.data == '.') { s.data++; s.len--; } 711 while (s.len > 0 && s.data[s.len - 1] == '0') s.len--; 712 713 IntegerConversion fractional = integer_from_s8(s); 714 715 u64 power = (u64)(fractional.unparsed.data - s.data); 716 f64 frac = (f64)fractional.U64; 717 while (power > 0) { frac /= 10.0; power--; } 718 719 f64 result = (f64)integral.S64 + frac; 720 return result; 721 } 722 723 function FileWatchDirectory * 724 lookup_file_watch_directory(FileWatchDirectoryList *ctx, u64 hash) 725 { 726 FileWatchDirectory *result = 0; 727 for (u32 i = 0; i < ctx->count; i++) { 728 FileWatchDirectory *test = ctx->data + i; 729 if (test->hash == hash) { 730 result = test; 731 break; 732 } 733 } 734 return result; 735 }