solidc
Robust collection of general-purpose cross-platform C libraries and data structures designed for rapid and safe development in C
Loading...
Searching...
No Matches
arena.c
1#include "arena.h"
2#include "aligned_alloc.h"
3
4#include <stdlib.h>
5#include <string.h>
6
7#if defined(_WIN32)
8#include <windows.h>
9#else
10#include <sys/mman.h>
11#include <unistd.h>
12#endif
13
14#define STATIC_BUFFER_SIZE (1024 * 1024)
15
16static _Thread_local char static_buffer[STATIC_BUFFER_SIZE] ARENA_ALIGNED(64);
17static _Thread_local bool static_buffer_in_use = false;
18
19static ARENA_INLINE size_t get_page_size(void) {
20#if defined(_WIN32)
21 SYSTEM_INFO si;
22 GetSystemInfo(&si);
23 return si.dwPageSize;
24#else
25 return (size_t)sysconf(_SC_PAGESIZE);
26#endif
27}
28
29void arena_init(Arena* restrict a, void* restrict buf, size_t size) {
30 memset(a, 0, sizeof(Arena));
31 a->page_size = get_page_size();
32 a->heap_allocated = false;
33
34 a->first_block.base = (char*)buf;
35 a->first_block.end = (char*)buf + size;
36 a->first_block.is_static = true;
37 a->first_block.next = NULL;
38
39 a->head = &a->first_block;
40 a->current_block = a->head;
41 a->curr = a->head->base;
42 a->end = a->head->end;
43 a->total_committed = size;
44}
45
46Arena* arena_create(size_t reserve_size) {
47 Arena* a = (Arena*)aligned_alloc_xp(64, sizeof(Arena));
48 if (!a) return NULL;
49
50 if (!static_buffer_in_use && reserve_size <= STATIC_BUFFER_SIZE) {
51 arena_init(a, static_buffer, STATIC_BUFFER_SIZE);
52 static_buffer_in_use = true;
53 } else {
54 size_t initial_size = reserve_size > 0 ? reserve_size : ARENA_MIN_BLOCK_SIZE;
55 initial_size = (initial_size + get_page_size() - 1) & ~(get_page_size() - 1);
56
57 char* buf = (char*)aligned_alloc_xp(64, initial_size);
58 if (!buf) {
59 aligned_free_xp(a);
60 return NULL;
61 }
62 arena_init(a, buf, initial_size);
63 a->first_block.is_static = false; // We own this buffer; arena_destroy must free it.
64 }
65
66 a->heap_allocated = true;
67 return a;
68}
69
70void* _arena_alloc_slow(Arena* restrict a, size_t size, size_t alignment) {
71 // Try the next cached block before allocating a new one.
72 ArenaBlock* next = a->current_block->next;
73 if (next) {
74 uintptr_t aligned = ((uintptr_t)next->base + alignment - 1) & ~(alignment - 1);
75 if (aligned + size <= (uintptr_t)next->end) {
76 a->current_block = next;
77 a->curr = (char*)(aligned + size);
78 a->end = next->end;
79 return (void*)aligned;
80 }
81 }
82
83 // Allocate a new block. The ArenaBlock header lives at the start of the
84 // allocation itself, eliminating the separate malloc() call.
85 size_t current_size = (size_t)(a->current_block->end - a->current_block->base);
86 size_t needed = sizeof(ArenaBlock) + alignment + size;
87 size_t next_size = current_size * 2;
88
89 if (next_size < needed) next_size = needed;
90 if (next_size < ARENA_MIN_BLOCK_SIZE) next_size = ARENA_MIN_BLOCK_SIZE;
91 next_size = (next_size + a->page_size - 1) & ~(a->page_size - 1);
92
93 // Single allocation: header at [ptr], usable memory at [ptr + sizeof(ArenaBlock)].
94 char* ptr = (char*)aligned_alloc_xp(64, next_size);
95 if (!ptr) return NULL;
96
97 ArenaBlock* block = (ArenaBlock*)ptr;
98 block->base = (char*)(((uintptr_t)(ptr + sizeof(ArenaBlock)) + alignment - 1) & ~(alignment - 1));
99 block->end = ptr + next_size;
100 block->is_static = false;
101 block->next = a->current_block->next;
102
103 a->current_block->next = block;
104 a->current_block = block;
105 a->total_committed += next_size;
106
107 uintptr_t aligned = ((uintptr_t)block->base + alignment - 1) & ~(alignment - 1);
108 a->curr = (char*)(aligned + size);
109 a->end = block->end;
110
111 return (void*)aligned;
112}
113
114void arena_destroy(Arena* restrict a) {
115 if (!a) return;
116
117 ArenaBlock* block = a->head;
118
119 // Free the first block's buffer if we heap-allocated it.
120 if (block && !block->is_static) aligned_free_xp(block->base);
121
122 // Walk overflow blocks. Each was allocated as a single slab where the
123 // ArenaBlock header lives at the start of the pointer returned by
124 // aligned_alloc_xp, so we free the block pointer itself (not block->base).
125 block = block ? block->next : NULL;
126 while (block) {
127 ArenaBlock* temp = block;
128 block = block->next;
129 aligned_free_xp(temp); // frees the whole slab (header + data)
130 }
131
132 if (a->heap_allocated) aligned_free_xp(a);
133}
Aligned memory allocation functions for cross-platform support.
char * end
Definition arena.h:65
bool is_static
Definition arena.h:66