solidc
Robust collection of general-purpose cross-platform C libraries and data structures designed for rapid and safe development in C
Loading...
Searching...
No Matches
cache.c
1#include "../include/cache.h"
2
3#include "../include/align.h"
4#include "../include/aligned_alloc.h"
5#include "../include/spinlock.h"
6
7#include <errno.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <string.h>
11#include <time.h>
12
13#if defined(__linux__) && defined(MADV_HUGEPAGE)
14#include <sys/mman.h>
15#endif
16
17#define CACHE_LINE_SIZE 64 // Size of CPU cache line for alignment
18#define INITIAL_BUCKET_MULTIPLIER 2 // Load factor: buckets = capacity * 2
19#define CACHE_FILE_MAGIC 0x45484346 // ASCII "FCHE" (Fast Cache) in Little Endian
20#define CACHE_FILE_VERSION 1
21
22// --- Packed Metadata Constants ---
23// We use a 64-bit integer to store {Hash:32, KeyLen:32}
24// We reserve Hash values 0 and 1 for control flags.
25#define HASH_EMPTY 0 // Slot never used
26#define HASH_DELETED 1 // Slot previously used, now tombstone
27#define HASH_MIN_VAL 2 // Minimum valid hash value (ensures no collision with control flags)
28
29// Branch prediction hints for better CPU pipeline utilization
30#if defined(__GNUC__) || defined(__clang__)
31#define likely(x) __builtin_expect(!!(x), 1)
32#define unlikely(x) __builtin_expect(!!(x), 0)
33#else
34#define likely(x) (x)
35#define unlikely(x) (x)
36#endif
37
38// --- Data Structures ---
39
49typedef struct ALIGN(CACHE_LINE_SIZE) {
50 atomic_int ref_count; // Reference count for safe concurrent access
51 _Atomic uint8_t clock_bit; // CLOCK algorithm: 1 = recently used, 0 = candidate for eviction
52 time_t expires_at; // Absolute expiration timestamp
53 uint32_t hash; // Full 32-bit hash of the key
54 uint32_t key_len; // Length of the key string (excluding null terminator)
55 size_t value_len; // Length of the value data
56 // Flexible array: [key]['\0'][back_ptr][value]
57#if defined(_MSC_VER) && !defined(__cplusplus)
58 unsigned char data[1]; // MSVC C mode workaround
59#else
60 unsigned char data[]; // C99 flexible array member
61#endif
62} cache_entry_t;
63
74typedef struct {
84 uint64_t metadata;
85 cache_entry_t* entry; // Pointer to heap-allocated entry; NULL when slot is empty/deleted
86} cache_slot_t;
87
95typedef struct ALIGN(CACHE_LINE_SIZE) {
96 cache_slot_t* slots; // Array of 16-byte slots; size is always a power of 2
97 size_t bucket_count; // Hash table size (always power of 2 for fast modulo via bitwise AND)
98 size_t size; // Current number of live (non-tombstone) entries
99 size_t capacity; // Maximum live entries before eviction triggers
100 size_t tombstone_count; // Number of HASH_DELETED slots; drives compaction decisions
101 size_t clock_hand; // CLOCK algorithm: index of the next slot to examine for eviction
102 fast_rwlock_t lock; // Reader-writer spinlock; readers share, writers are exclusive
103} aligned_cache_shard_t;
104
105// Verify at compile time that the struct is exactly one cache line.
106// If this fires, a struct member was added that pushes the size past 64 bytes,
107// which would cause two shards to share a cache line (false sharing).
108// (Requires C11. If using older C, you can remove this check.)
109_Static_assert(sizeof(aligned_cache_shard_t) == CACHE_LINE_SIZE, "Shard size mismatch: False sharing will occur");
110
111struct cache_s {
112 aligned_cache_shard_t shards[CACHE_SHARD_COUNT]; // Fixed array of shards; no heap indirection
113 uint32_t default_ttl; // Default time-to-live in seconds, used when ttl=0 is passed to cache_set
114};
115
120static inline size_t next_power_of_2(size_t n) {
121 if (n && !(n & (n - 1))) return n;
122 n--;
123 n |= n >> 1;
124 n |= n >> 2;
125 n |= n >> 4;
126 n |= n >> 8;
127 n |= n >> 16;
128 return n + 1;
129}
130
135static inline uint32_t hash_key(const char* key, size_t len) {
136 uint32_t hash = 2166136261u; // FNV offset basis
137 for (size_t i = 0; i < len; i++) {
138 hash ^= (uint32_t)(unsigned char)key[i];
139 hash *= 16777619u; // FNV prime
140 }
141
142 // Ensure hash doesn't collide with HASH_EMPTY or HASH_DELETED
143 if (unlikely(hash < HASH_MIN_VAL)) return hash + HASH_MIN_VAL;
144 return hash;
145}
146
151static inline size_t get_shard_idx(uint32_t hash) {
152 hash ^= hash >> 16;
153 return hash & (CACHE_SHARD_COUNT - 1);
154}
155
161static inline uint64_t pack_meta(uint32_t hash, uint32_t len) { return ((uint64_t)hash << 32) | (uint32_t)len; }
162
166static inline uint32_t meta_hash(uint64_t meta) { return (uint32_t)(meta >> 32); }
167
172static inline const void* value_ptr_from_entry(const cache_entry_t* entry) {
173 return (const void*)(entry->data + entry->key_len + 1 + sizeof(cache_entry_t*));
174}
175
180static inline cache_entry_t* entry_from_value(const void* value_ptr) {
181 if (unlikely(!value_ptr)) return NULL;
182 cache_entry_t* const* back_ptr = (cache_entry_t* const*)((const char*)value_ptr - sizeof(cache_entry_t*));
183 return *back_ptr;
184}
185
191static inline void entry_ref_inc(cache_entry_t* entry) {
192 if (entry) atomic_fetch_add_explicit(&entry->ref_count, 1, memory_order_relaxed);
193}
194
201static inline void entry_ref_dec(cache_entry_t* entry) {
202 if (!entry) return;
203 int old_val = atomic_fetch_sub_explicit(&entry->ref_count, 1, memory_order_acq_rel);
204 if (unlikely(old_val == 1)) free(entry);
205}
206
219static size_t find_slot(aligned_cache_shard_t* shard, uint32_t hash, const char* key, size_t klen, bool* found) {
220 size_t mask = shard->bucket_count - 1; // Power-of-2 allows fast modulo via bitwise AND
221 size_t idx = hash & mask; // Initial probe position
222 size_t first_tombstone = SIZE_MAX; // Track first deleted slot for insertion reuse
223
224 // Pre-pack target once; avoids redundant pack_meta calls inside the loop
225 uint64_t target = pack_meta(hash, (uint32_t)klen);
226 cache_slot_t* slots = shard->slots;
227
228 *found = false;
229
230 for (size_t probe_count = 0; probe_count < shard->bucket_count; probe_count++) {
231 uint64_t meta = slots[idx].metadata;
232 uint32_t h = meta_hash(meta);
233
234 if (h == HASH_EMPTY) {
235 // Empty slot terminates the probe chain: the key cannot exist past this point.
236 // Return the first tombstone if we passed one; it's a better insertion target
237 // because reusing tombstones reduces the need for compaction.
238 return (first_tombstone != SIZE_MAX) ? first_tombstone : idx;
239 }
240
241 if (h == HASH_DELETED) {
242 // Tombstone: key may still exist further along the chain, keep probing.
243 // Record the first tombstone we see for potential insertion reuse.
244 if (first_tombstone == SIZE_MAX) first_tombstone = idx;
245 } else if (meta == target) {
246 // Full metadata match (hash + key length). Now confirm the key itself.
247 // This memcmp is a pointer chase into a separately malloc'd block and
248 // may cause an L1 miss, but only occurs on genuine hash+length matches,
249 // so false positives are rare with a good hash function.
250 cache_entry_t* entry = slots[idx].entry;
251 if (memcmp(entry->data, key, klen) == 0) {
252 *found = true;
253 return idx;
254 }
255 }
256
257 idx = (idx + 1) & mask; // Linear probe: advance to next slot (wraps via mask)
258 }
259
260 // Exhausted all slots (table is completely full of live entries and tombstones).
261 // This should not happen under normal operation because eviction prevents the
262 // table from reaching 100% occupancy, but return the best available fallback.
263 return (first_tombstone != SIZE_MAX) ? first_tombstone : idx;
264}
265
279static void compact_shard(aligned_cache_shard_t* shard) {
280 size_t old_bucket_count = shard->bucket_count;
281 cache_slot_t* old_slots = shard->slots;
282
283 cache_slot_t* new_slots = ALIGNED_ALLOC(CACHE_LINE_SIZE, old_bucket_count * sizeof(cache_slot_t));
284 if (!new_slots) return; // Graceful fallback: we keep running with existing setup
285
286#ifdef MADV_HUGEPAGE
287 madvise(new_slots, old_bucket_count * sizeof(cache_slot_t), MADV_HUGEPAGE);
288#endif
289 memset(new_slots, 0, old_bucket_count * sizeof(cache_slot_t));
290
291 size_t mask = old_bucket_count - 1;
292 for (size_t i = 0; i < old_bucket_count; i++) {
293 uint64_t meta = old_slots[i].metadata;
294 if (meta_hash(meta) >= HASH_MIN_VAL) {
295 // Re-insert live entry using its stored hash; no need to rehash the key.
296 uint32_t hash = meta_hash(meta);
297 size_t idx = hash & mask;
298 // Linear probe to find an empty slot in the new array.
299 // No tombstones exist in new_slots, so HASH_EMPTY is the only terminator.
300 while (meta_hash(new_slots[idx].metadata) != HASH_EMPTY) {
301 idx = (idx + 1) & mask;
302 }
303 new_slots[idx].metadata = meta;
304 new_slots[idx].entry = old_slots[i].entry;
305 }
306 }
307
308 shard->slots = new_slots;
309 shard->tombstone_count = 0; // All tombstones eliminated by the re-hash
310 shard->clock_hand = 0; // Reset eviction scan position; old offsets are invalid
311
312 // free() is correct here: C11 aligned_alloc() memory is released with free().
313 // See note in function header if using a non-standard allocator.
314 free(old_slots);
315}
316
331static bool clock_evict(aligned_cache_shard_t* shard) {
332 size_t scanned = 0;
333 size_t mask = shard->bucket_count - 1;
334 cache_slot_t* slots = shard->slots;
335
336 // Single pass: give recently-used entries one reprieve, evict the first stale one.
337 while (scanned < shard->bucket_count) {
338 size_t idx = shard->clock_hand;
339 shard->clock_hand = (shard->clock_hand + 1) & mask;
340
341 uint64_t meta = slots[idx].metadata;
342
343 if (meta_hash(meta) < HASH_MIN_VAL) {
344 // Empty or tombstone slot; nothing to evict here, advance hand.
345 scanned++;
346 continue;
347 }
348
349 cache_entry_t* entry = slots[idx].entry;
350 uint8_t bit = atomic_load_explicit(&entry->clock_bit, memory_order_relaxed);
351
352 if (bit == 0) {
353 // Clock bit is clear: this entry has not been accessed since the last
354 // sweep. Evict it by marking the slot as a tombstone.
355 slots[idx].metadata = pack_meta(HASH_DELETED, 0);
356 slots[idx].entry = NULL;
357 shard->size--;
358 shard->tombstone_count++;
359 entry_ref_dec(entry);
360 return true;
361 } else {
362 // Clock bit is set: entry was recently used. Clear the bit to give it
363 // one more pass before eviction and continue scanning.
364 atomic_store_explicit(&entry->clock_bit, 0, memory_order_relaxed);
365 }
366 scanned++;
367 }
368
369 // Fallback: scan forward from clock_hand until a live slot is found.
370 // After one full pass all clock bits are cleared, so a live slot is
371 // guaranteed to exist when shard->size > 0.
372 for (size_t i = 0; i < shard->bucket_count; i++) {
373 size_t idx = shard->clock_hand;
374 shard->clock_hand = (shard->clock_hand + 1) & mask;
375 if (meta_hash(slots[idx].metadata) >= HASH_MIN_VAL) {
376 cache_entry_t* entry = slots[idx].entry;
377 slots[idx].metadata = pack_meta(HASH_DELETED, 0);
378 slots[idx].entry = NULL;
379 shard->size--;
380 shard->tombstone_count++;
381 entry_ref_dec(entry);
382 return true;
383 }
384 }
385 return false; // Unreachable when shard->size > 0; satisfies the compiler
386}
387
391cache_t* cache_create(size_t capacity, uint32_t default_ttl) {
392 struct cache_s* c = calloc(1, sizeof(struct cache_s));
393 if (!c) return NULL;
394
395 if (capacity == 0) capacity = 1000;
396 size_t shard_cap = capacity / CACHE_SHARD_COUNT;
397 if (shard_cap < 1) shard_cap = 1;
398
399 c->default_ttl = default_ttl ? default_ttl : CACHE_DEFAULT_TTL;
400 for (size_t i = 0; i < CACHE_SHARD_COUNT; i++) {
401 aligned_cache_shard_t* s = &c->shards[i];
402 s->capacity = shard_cap;
403
404 size_t desired = (size_t)(shard_cap * INITIAL_BUCKET_MULTIPLIER);
405 s->bucket_count = next_power_of_2(desired);
406
407 s->slots = ALIGNED_ALLOC(CACHE_LINE_SIZE, s->bucket_count * sizeof(cache_slot_t));
408 if (!s->slots) {
409 goto cleanup_error;
410 }
411
412#ifdef MADV_HUGEPAGE
413 madvise(s->slots, s->bucket_count * sizeof(cache_slot_t), MADV_HUGEPAGE);
414#endif
415 memset(s->slots, 0, s->bucket_count * sizeof(cache_slot_t));
416
417 fast_rwlock_init(&s->lock);
418 }
419
420 return (cache_t*)c;
421
422cleanup_error:
423 for (size_t j = 0; j < CACHE_SHARD_COUNT; j++) {
424 aligned_cache_shard_t* s = &c->shards[j];
425 free(s->slots);
426 }
427 free(c);
428 return NULL;
429}
430
434void cache_destroy(cache_t* cache_ptr) {
435 if (!cache_ptr) return;
436 struct cache_s* cache = (struct cache_s*)cache_ptr;
437
438 for (int i = 0; i < CACHE_SHARD_COUNT; i++) {
439 fast_rwlock_wrlock(&cache->shards[i].lock);
440 if (cache->shards[i].slots) {
441 for (size_t j = 0; j < cache->shards[i].bucket_count; j++) {
442 if (meta_hash(cache->shards[i].slots[j].metadata) >= HASH_MIN_VAL) {
443 entry_ref_dec(cache->shards[i].slots[j].entry);
444 }
445 }
446 free(cache->shards[i].slots);
447 }
448 fast_rwlock_unlock_wr(&cache->shards[i].lock);
449 }
450 free(cache);
451}
452
467const void* cache_get(cache_t* cache_ptr, const char* key, size_t klen, size_t* out_len) {
468 if (unlikely(!cache_ptr || !key || !klen)) return NULL;
469
470 struct cache_s* cache = (struct cache_s*)cache_ptr;
471 uint32_t hash = hash_key(key, klen);
472 aligned_cache_shard_t* shard = &cache->shards[get_shard_idx(hash)];
473
474 fast_rwlock_rdlock(&shard->lock);
475
476 bool found;
477 size_t idx = find_slot(shard, hash, key, klen, &found);
478
479 if (!found) {
480 fast_rwlock_unlock_rd(&shard->lock);
481 return NULL;
482 }
483
484 cache_slot_t* slot = &shard->slots[idx];
485 cache_entry_t* entry = slot->entry;
486
487 time_t now = time(NULL);
488
489 // Check expiration
490 if (unlikely(now >= entry->expires_at)) {
491 fast_rwlock_unlock_rd(&shard->lock);
492
493 // Upgrade to write lock to remove the expired entry.
494 // Re-probe after acquiring the write lock (double-check locking): another
495 // thread may have removed or replaced this entry in the window between
496 // our read-unlock and this write-lock.
497 fast_rwlock_wrlock(&shard->lock);
498 bool re_found;
499 size_t re_idx = find_slot(shard, hash, key, klen, &re_found);
500 if (re_found) {
501 cache_entry_t* re_entry = shard->slots[re_idx].entry;
502 // Confirm the entry is still expired; a concurrent cache_set could have
503 // refreshed the TTL between our read-unlock and this write-lock.
504 if (now >= re_entry->expires_at) {
505 shard->slots[re_idx].metadata = pack_meta(HASH_DELETED, 0);
506 shard->slots[re_idx].entry = NULL;
507 shard->size--;
508 shard->tombstone_count++;
509 // Compaction is not triggered here to avoid O(n) rehash on the
510 // read path. The next cache_set or cache_invalidate will compact
511 // if the tombstone threshold is crossed.
512 entry_ref_dec(re_entry);
513 }
514 }
515 fast_rwlock_unlock_wr(&shard->lock);
516 return NULL;
517 }
518
519 // Silent store optimization: set the clock bit to 1 only if it is currently 0,
520 // avoiding an unnecessary atomic write-back on every access.
521 if (atomic_load_explicit(&entry->clock_bit, memory_order_relaxed) == 0) {
522 atomic_store_explicit(&entry->clock_bit, 1, memory_order_relaxed);
523 }
524 // Increment ref count before releasing the lock so the entry cannot be freed
525 // by a concurrent eviction or invalidation between unlock and caller use.
526 entry_ref_inc(entry);
527
528 if (out_len) *out_len = entry->value_len;
529
530 fast_rwlock_unlock_rd(&shard->lock);
531 return value_ptr_from_entry(entry);
532}
533
550bool cache_set(cache_t* cache_ptr, const char* key, size_t klen, const void* value, size_t value_len, uint32_t ttl) {
551 if (unlikely(!cache_ptr || !key || !klen || !value || !value_len)) return false;
552
553 struct cache_s* cache = (struct cache_s*)cache_ptr;
554 uint32_t hash = hash_key(key, klen);
555 time_t now = time(NULL);
556
557 // Allocate and populate the new entry before acquiring the lock.
558 // Memory layout: [cache_entry_t header][key]['\0'][back_ptr][value]
559 size_t alloc_sz = offsetof(cache_entry_t, data) + klen + 1 + sizeof(void*) + value_len;
560 cache_entry_t* new_entry = malloc(alloc_sz);
561 if (!new_entry) return false;
562
563 new_entry->hash = hash;
564 new_entry->key_len = (uint32_t)klen;
565 new_entry->value_len = value_len;
566 new_entry->expires_at = now + (ttl ? ttl : cache->default_ttl);
567 atomic_init(&new_entry->ref_count, 1);
568 atomic_init(&new_entry->clock_bit, 1); // Mark as recently used on insertion
569
570 // Write key, null terminator, back-pointer, and value into the flexible array.
571 unsigned char* ptr = new_entry->data;
572 memcpy(ptr, key, klen);
573 ptr[klen] = '\0';
574 // Back-pointer: allows entry_from_value() to recover the entry from a value ptr.
575 *(cache_entry_t**)(ptr + klen + 1) = new_entry;
576 memcpy(ptr + klen + 1 + sizeof(cache_entry_t*), value, value_len);
577
578 aligned_cache_shard_t* shard = &cache->shards[get_shard_idx(hash)];
579 fast_rwlock_wrlock(&shard->lock);
580
581 // Compact before probing so that find_slot operates on a clean table.
582 // Threshold: tombstones exceed 75% of bucket capacity.
583 // Using bucket_count (not capacity) is correct: tombstones consume bucket slots,
584 // not logical capacity slots. A high tombstone-to-bucket ratio degrades probe
585 // chains regardless of how many live entries the shard holds.
586 if (unlikely(shard->tombstone_count > (shard->bucket_count * 3) / 4)) {
587 compact_shard(shard);
588 }
589
590 bool found;
591 size_t idx = find_slot(shard, hash, key, klen, &found);
592
593 if (found) {
594 // Key already exists: swap the entry pointer. Size is unchanged.
595 cache_entry_t* old = shard->slots[idx].entry;
596 shard->slots[idx].entry = new_entry;
597 entry_ref_dec(old);
598 } else {
599 // New key: evict if at capacity, then insert into the slot find_slot returned.
600 if (shard->size >= shard->capacity) {
601 // clock_evict() marks some other slot HASH_DELETED. It does NOT touch
602 // shard->slots or invalidate idx. The idx returned by find_slot above
603 // remains a valid insertion target — no second find_slot call needed.
604 if (!clock_evict(shard)) {
605 // Respect capacity limit on the cache.
606 // This can happen if the shard is full of entries with their clock bits set (recently accessed),
607 // causing clock_evict to scan the entire bucket array without finding a candidate for eviction.
608 fast_rwlock_unlock_wr(&shard->lock);
609 free(new_entry);
610 return false;
611 }
612 }
613
614 // If idx was previously a tombstone, reclaim it (reduce tombstone count).
615 if (meta_hash(shard->slots[idx].metadata) == HASH_DELETED) {
616 shard->tombstone_count--;
617 }
618 shard->slots[idx].entry = new_entry;
619 shard->slots[idx].metadata = pack_meta(hash, (uint32_t)klen);
620 shard->size++;
621 }
622
623 fast_rwlock_unlock_wr(&shard->lock);
624 return true;
625}
626
630void cache_invalidate(cache_t* cache_ptr, const char* key) {
631 if (!cache_ptr || !key) return;
632 struct cache_s* cache = (struct cache_s*)cache_ptr;
633 size_t klen = strlen(key);
634 uint32_t hash = hash_key(key, klen);
635 aligned_cache_shard_t* shard = &cache->shards[get_shard_idx(hash)];
636
637 fast_rwlock_wrlock(&shard->lock);
638 bool found;
639 size_t idx = find_slot(shard, hash, key, klen, &found);
640
641 if (found) {
642 cache_entry_t* entry = shard->slots[idx].entry;
643 shard->slots[idx].metadata = pack_meta(HASH_DELETED, 0);
644 shard->slots[idx].entry = NULL;
645 shard->size--;
646 shard->tombstone_count++;
647
648 // Use the updated bucket_count-relative threshold consistent with cache_set.
649 if (unlikely(shard->tombstone_count > (shard->bucket_count * 3) / 4)) {
650 compact_shard(shard);
651 }
652 entry_ref_dec(entry);
653 }
654 fast_rwlock_unlock_wr(&shard->lock);
655}
656
661void cache_release(const void* ptr) {
662 if (!ptr) return;
663 entry_ref_dec(entry_from_value(ptr));
664}
665
669void cache_clear(cache_t* cache_ptr) {
670 if (!cache_ptr) return;
671 struct cache_s* cache = (struct cache_s*)cache_ptr;
672
673 for (int i = 0; i < CACHE_SHARD_COUNT; i++) {
674 fast_rwlock_wrlock(&cache->shards[i].lock);
675 for (size_t j = 0; j < cache->shards[i].bucket_count; j++) {
676 if (meta_hash(cache->shards[i].slots[j].metadata) >= HASH_MIN_VAL) {
677 entry_ref_dec(cache->shards[i].slots[j].entry);
678 }
679 }
680 memset(cache->shards[i].slots, 0, cache->shards[i].bucket_count * sizeof(cache_slot_t));
681 cache->shards[i].size = 0;
682 cache->shards[i].tombstone_count = 0;
683 cache->shards[i].clock_hand = 0;
684 fast_rwlock_unlock_wr(&cache->shards[i].lock);
685 }
686}
687
693size_t get_total_cache_size(cache_t* cache_ptr) {
694 if (!cache_ptr) return 0;
695 struct cache_s* cache = (struct cache_s*)cache_ptr;
696 size_t total = 0;
697 for (int i = 0; i < CACHE_SHARD_COUNT; i++) {
698 fast_rwlock_rdlock(&cache->shards[i].lock);
699 total += cache->shards[i].size;
700 fast_rwlock_unlock_rd(&cache->shards[i].lock);
701 }
702 return total;
703}
704
710size_t get_total_capacity(cache_t* cache_ptr) {
711 if (!cache_ptr) return 0;
712 struct cache_s* cache = (struct cache_s*)cache_ptr;
713 size_t total = 0;
714 for (int i = 0; i < CACHE_SHARD_COUNT; i++) {
715 fast_rwlock_rdlock(&cache->shards[i].lock);
716 total += cache->shards[i].capacity;
717 fast_rwlock_unlock_rd(&cache->shards[i].lock);
718 }
719 return total;
720}
721
722// --- Persistence Helpers ---
723
728static inline bool file_write_chk(const void* ptr, size_t size, size_t count, FILE* stream) {
729 return fwrite(ptr, size, count, stream) == count;
730}
731
736static inline bool file_read_chk(void* ptr, size_t size, size_t count, FILE* stream) {
737 return fread(ptr, size, count, stream) == count;
738}
739
752bool cache_save(cache_t* cache_ptr, const char* filename) {
753 if (!cache_ptr || !filename) return false;
754 struct cache_s* cache = (struct cache_s*)cache_ptr;
755
756 FILE* f = fopen(filename, "wb");
757 if (!f) return false;
758
759 uint32_t magic = CACHE_FILE_MAGIC;
760 uint32_t version = CACHE_FILE_VERSION;
761 uint64_t total_entries = 0; // Placeholder; patched after iteration
762
763 if (!file_write_chk(&magic, sizeof(magic), 1, f) || !file_write_chk(&version, sizeof(version), 1, f) ||
764 !file_write_chk(&total_entries, sizeof(total_entries), 1, f)) {
765 fclose(f);
766 return false;
767 }
768
769 uint64_t actual_count = 0;
770 time_t now = time(NULL);
771
772 for (int i = 0; i < CACHE_SHARD_COUNT; i++) {
773 aligned_cache_shard_t* shard = &cache->shards[i];
774
775 fast_rwlock_rdlock(&shard->lock);
776
777 for (size_t j = 0; j < shard->bucket_count; j++) {
778 cache_slot_t* slot = &shard->slots[j];
779
780 if (meta_hash(slot->metadata) < HASH_MIN_VAL) continue;
781
782 cache_entry_t* entry = slot->entry;
783 if (!entry || entry->expires_at <= now) continue;
784
785 uint32_t klen = entry->key_len;
786 uint64_t vlen = (uint64_t)entry->value_len;
787 int64_t expiry = (int64_t)entry->expires_at; // Cast to fixed-width for portability
788
789 const void* val_ptr = value_ptr_from_entry(entry);
790 if (!val_ptr) continue;
791
792 if (!file_write_chk(&klen, sizeof(klen), 1, f) || !file_write_chk(&vlen, sizeof(vlen), 1, f) ||
793 !file_write_chk(&expiry, sizeof(expiry), 1, f) || !file_write_chk(entry->data, 1, klen, f) ||
794 !file_write_chk(val_ptr, 1, (size_t)vlen, f)) {
795 fast_rwlock_unlock_rd(&shard->lock);
796 fclose(f);
797 return false;
798 }
799
800 actual_count++;
801 }
802
803 fast_rwlock_unlock_rd(&shard->lock);
804 }
805
806 // Patch the entry count in the header now that we know the true value.
807 fseek(f, sizeof(magic) + sizeof(version), SEEK_SET);
808 if (!file_write_chk(&actual_count, sizeof(actual_count), 1, f)) {
809 fclose(f);
810 return false;
811 }
812
813 fclose(f);
814 return true;
815}
816
828bool cache_load(cache_t* cache_ptr, const char* filename) {
829 if (!cache_ptr || !filename) return false;
830
831 FILE* f = fopen(filename, "rb");
832 if (!f) return false;
833
834 uint32_t magic = 0;
835 uint32_t version = 0;
836 uint64_t stored_count = 0;
837
838 if (!file_read_chk(&magic, sizeof(magic), 1, f) || !file_read_chk(&version, sizeof(version), 1, f) ||
839 !file_read_chk(&stored_count, sizeof(stored_count), 1, f)) {
840 fclose(f);
841 return false;
842 }
843
844 if (magic != CACHE_FILE_MAGIC || version != CACHE_FILE_VERSION) {
845 fclose(f);
846 return false;
847 }
848
849 char key_buf[512]; // Stack buffer for keys up to 511 bytes (covers the vast majority)
850 char* big_key_buf = NULL; // Heap fallback for oversized keys; reused across loop iterations
851 void* val_buf = NULL; // Heap buffer for values; grown as needed, reused across iterations
852 size_t val_buf_cap = 0;
853 time_t now = time(NULL);
854
855 bool success = true;
856
857 for (uint64_t i = 0; i < stored_count; i++) {
858 uint32_t klen;
859 uint64_t vlen;
860 int64_t expiry;
861
862 if (!file_read_chk(&klen, sizeof(klen), 1, f) || !file_read_chk(&vlen, sizeof(vlen), 1, f) ||
863 !file_read_chk(&expiry, sizeof(expiry), 1, f)) {
864 success = false;
865 break;
866 }
867
868 // Select key destination: stack buffer if it fits, heap buffer otherwise.
869 char* kptr = key_buf;
870 if (klen >= sizeof(key_buf)) {
871 big_key_buf = realloc(big_key_buf, klen + 1);
872 if (!big_key_buf) {
873 success = false;
874 break;
875 }
876 kptr = big_key_buf;
877 }
878
879 // Grow the value buffer if the current entry's value exceeds its capacity.
880 if (vlen > val_buf_cap) {
881 void* tmp = realloc(val_buf, vlen);
882 if (!tmp) {
883 success = false;
884 break;
885 }
886 val_buf = tmp;
887 val_buf_cap = vlen;
888 }
889
890 if (!file_read_chk(kptr, 1, klen, f) || !file_read_chk(val_buf, 1, (size_t)vlen, f)) {
891 success = false;
892 break;
893 }
894
895 kptr[klen] = '\0';
896
897 // Skip entries that expired before or during this load.
898 if ((time_t)expiry > now) {
899 // Compute remaining TTL to preserve the original expiry wall-clock time.
900 uint32_t ttl = (uint32_t)((time_t)expiry - now);
901
902 if (!cache_set(cache_ptr, kptr, klen, val_buf, (size_t)vlen, ttl)) {
903 success = false;
904 break;
905 }
906 }
907 }
908
909 free(big_key_buf);
910 free(val_buf);
911 fclose(f);
912 return success;
913}
bool cache_load(cache_t *cache_ptr, const char *filename)
Definition cache.c:828
void cache_clear(cache_t *cache)
Definition cache.c:669
void cache_release(const void *ptr)
Definition cache.c:661
void cache_destroy(cache_t *cache)
Definition cache.c:434
size_t get_total_cache_size(cache_t *cache)
Definition cache.c:693
bool cache_set(cache_t *cache, const char *key, size_t key_len, const void *value, size_t value_len, uint32_t ttl_override)
Definition cache.c:550
bool cache_save(cache_t *cache_ptr, const char *filename)
Definition cache.c:752
const void * cache_get(cache_t *cache, const char *key, size_t key_len, size_t *out_len)
Definition cache.c:467
size_t get_total_capacity(cache_t *cache)
Definition cache.c:710
cache_t * cache_create(size_t capacity, uint32_t default_ttl)
Definition cache.c:391
struct cache_s cache_t
Definition cache.h:25
void cache_invalidate(cache_t *cache, const char *key)
Definition cache.c:630
Reader-writer spinlock structure.
Definition spinlock.h:49