aboutsummaryrefslogtreecommitdiff
path: root/vec.c
diff options
context:
space:
mode:
authorAryadev Chavali <aryadev@aryadevchavali.com>2025-08-20 23:27:04 +0100
committerAryadev Chavali <aryadev@aryadevchavali.com>2025-08-20 23:37:08 +0100
commit847eb1a69b54da3a5d686922f0a2fcd8ab37f1e6 (patch)
tree057d4c1ca6f478a2909d0ee271d2bb8ff0f25c2f /vec.c
parent13142dc7f38e6b148efadc97edffca8664b9cde7 (diff)
downloadalisp-847eb1a69b54da3a5d686922f0a2fcd8ab37f1e6.tar.gz
alisp-847eb1a69b54da3a5d686922f0a2fcd8ab37f1e6.tar.bz2
alisp-847eb1a69b54da3a5d686922f0a2fcd8ab37f1e6.zip
Refactor vectors to SBO, removing inlined entirely.
Avoid 2 levels of indirection, and having to allocate twice for small payloads, by having an inlined array on the vector directly! Beautiful and simple. Required a bit of refactoring around the board, but overall the result makes me feel happier.
Diffstat (limited to 'vec.c')
-rw-r--r--vec.c51
1 files changed, 44 insertions, 7 deletions
diff --git a/vec.c b/vec.c
index 84f6944..c9128b5 100644
--- a/vec.c
+++ b/vec.c
@@ -18,15 +18,39 @@
#include "./alisp.h"
+void vec_init(vec_t *vec, u64 size)
+{
+ memset(vec, 0, sizeof(*vec));
+ if (!vec)
+ return;
+ else if (size <= VEC_INLINE_CAPACITY)
+ {
+ vec->is_inlined = 1;
+ vec->capacity = VEC_INLINE_CAPACITY;
+ vec->ptr = NULL;
+ }
+ else
+ {
+ vec->is_inlined = 0;
+ vec->capacity = size;
+ vec->ptr = calloc(1, vec->capacity);
+ }
+}
+
void vec_free(vec_t *vec)
{
if (!vec)
return;
- if (vec->data)
- free(vec->data);
+ if (!vec->is_inlined && vec->ptr)
+ free(vec->ptr);
memset(vec, 0, sizeof(*vec));
}
+void *vec_data(vec_t *vec)
+{
+ return vec->is_inlined ? vec->inlined : vec->ptr;
+}
+
void vec_ensure_free(vec_t *vec, u64 size)
{
if (!vec)
@@ -34,7 +58,21 @@ void vec_ensure_free(vec_t *vec, u64 size)
if (vec->capacity - vec->size < size)
{
vec->capacity = MAX(vec->capacity * VEC_MULT, vec->size + size);
- vec->data = realloc(vec->data, vec->capacity);
+ if (vec->is_inlined)
+ {
+ // If we're inlined, we need to allocate on the heap now. So let's copy
+ // vec->inlined over to vec->ptr, then turn off inlining.
+
+ // We need to do a two-way swap since vec->ptr and vec->inlined are taking
+ // up the same space.
+ u8 buffer[VEC_INLINE_CAPACITY];
+ memcpy(buffer, vec->inlined, vec->size);
+ vec->ptr = calloc(1, vec->capacity);
+ memcpy(vec->ptr, buffer, vec->size);
+ vec->is_inlined = 0;
+ }
+ else
+ vec->ptr = realloc(vec->ptr, vec->capacity);
}
}
@@ -43,7 +81,7 @@ void vec_append(vec_t *vec, void *ptr, u64 size)
if (!vec)
return;
vec_ensure_free(vec, size);
- memcpy(vec->data + vec->size, ptr, size);
+ memcpy(vec_data(vec) + vec->size, ptr, size);
vec->size += size;
}
@@ -51,7 +89,6 @@ void vec_clone(vec_t *dest, vec_t *src)
{
if (!src || !dest)
return;
- dest = src;
- dest->data = calloc(1, dest->capacity);
- memcpy(dest->data, src->data, src->size);
+ vec_init(dest, src->capacity);
+ memcpy(vec_data(dest), vec_data(src), src->size);
}