diff --git a/include/socks/util.h b/include/socks/util.h index ba2fd65..8205781 100644 --- a/include/socks/util.h +++ b/include/socks/util.h @@ -2,7 +2,9 @@ #define SOCKS_UTIL_H_ #include +#include extern void data_size_to_string(size_t value, char *out, size_t outsz); +static inline bool power_of_2(size_t x) { return (x > 0 && (x & (x - 1)) == 0); } #endif diff --git a/include/socks/vm.h b/include/socks/vm.h index 741ec0b..2072b2d 100644 --- a/include/socks/vm.h +++ b/include/socks/vm.h @@ -137,9 +137,11 @@ typedef struct vm_cache { unsigned int c_obj_size; /* combined size of vm_slab_t and the freelist */ unsigned int c_hdr_size; + /* power of 2 alignment for objects returned from the cache */ + unsigned int c_align; /* offset from one object to the next in a slab. - this may be different from c_obj_size as - we enforce a 16-byte alignment on allocated objects */ + this may be different from c_obj_size depending + on the alignment settings for this cache. */ unsigned int c_stride; /* size of page used for slabs */ unsigned int c_page_order; diff --git a/vm/cache.c b/vm/cache.c index 016b49b..f4d1b54 100644 --- a/vm/cache.c +++ b/vm/cache.c @@ -1,7 +1,8 @@ #include #include -#include #include +#include +#include #define FREELIST_END ((unsigned int)-1) @@ -31,13 +32,19 @@ void vm_cache_init(vm_cache_t *cache) cache->c_flags |= VM_CACHE_OFFSLAB; } + if (power_of_2(cache->c_obj_size)) { + cache->c_align = cache->c_obj_size; + } else { + cache->c_align = 8; + } + size_t available = vm_page_order_to_bytes(cache->c_page_order); size_t space_per_item = cache->c_obj_size; - /* align to 16-byte boundary */ - if (space_per_item & 0xF) { - space_per_item &= ~0xF; - space_per_item += 0x10; + /* align to specified boundary */ + if (space_per_item & (cache->c_align - 1)) { + space_per_item &= ~(cache->c_align - 1); + space_per_item += cache->c_align; } cache->c_stride = space_per_item; @@ -55,6 +62,15 @@ void vm_cache_init(vm_cache_t *cache) cache->c_slabs_empty = QUEUE_INIT; cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count); + + /* for on-slab caches, c_hdr_size is added to the slab pointer to + get the object buffer pointer. by aligning c_hdr_size to the + requested alignment, we ensure that the object buffer + is aligned too */ + if (cache->c_hdr_size & (cache->c_align - 1)) { + cache->c_hdr_size &= ~(cache->c_align - 1); + cache->c_hdr_size += cache->c_align; + } } void vm_cache_destroy(vm_cache_t *cache)