sandbox: vm: add cache allocator, generic size-N caches for kmalloc()

This commit is contained in:
2023-02-02 16:58:48 +00:00
parent 3c781a4cb3
commit 662be5ec1f
4 changed files with 312 additions and 14 deletions

View File

@@ -174,9 +174,6 @@ int memory_test(void)
print_free_pages(&pg_data->pg_zones[i]);
}
printf("all pages:\n");
print_all_pages();
vm_page_t *pg = vm_page_alloc(VM_PAGE_128K, 0);
printf("allocated 128K at 0x%lx\n", vm_page_get_paddr(pg));
@@ -242,6 +239,9 @@ int memory_test(void)
}
}
void *p = kmalloc(32, 0);
printf("allocated 32 bytes at %p\n", p);
munmap(system_memory, MB_TO_BYTES(MEMORY_SIZE_MB));
return 0;
}

167
sandbox/vm/cache.c Normal file
View File

@@ -0,0 +1,167 @@
#include "socks/queue.h"
#include <socks/vm.h>
#define FREELIST_END ((unsigned int)-1)
static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) };
vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags)
{
if (!VM_CACHE_INITIALISED(&cache_cache)) {
vm_cache_init(&cache_cache);
}
vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0);
new_cache->c_name = name;
new_cache->c_obj_size = objsz;
new_cache->c_flags = flags;
vm_cache_init(new_cache);
return new_cache;
}
void vm_cache_init(vm_cache_t *cache)
{
cache->c_page_order = VM_PAGE_16K;
if (cache->c_obj_size >= 512) {
cache->c_flags |= VM_CACHE_OFFSLAB;
}
size_t available = vm_page_order_to_bytes(cache->c_page_order);
size_t space_per_item = cache->c_obj_size;
/* align to 16-byte boundary */
if (space_per_item & 0xF) {
space_per_item &= ~0xF;
space_per_item += 0x10;
}
cache->c_stride = space_per_item;
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
available -= sizeof(vm_slab_t);
}
/* one entry in the freelist per object slot */
space_per_item += sizeof(unsigned int);
cache->c_obj_count = available / space_per_item;
cache->c_slabs_full = QUEUE_INIT;
cache->c_slabs_partial = QUEUE_INIT;
cache->c_slabs_empty = QUEUE_INIT;
}
void vm_cache_destroy(vm_cache_t *cache)
{
/* TODO */
}
static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
{
vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags);
vm_slab_t *slab_hdr = NULL;
void *slab_data = vm_page_get_vaddr(slab_page);
if (cache->c_flags & VM_CACHE_OFFSLAB) {
/* NOTE the circular dependency here:
kmalloc -> vm_cache_alloc -> alloc_slab -> kmalloc
since this call path is only used for caches with
VM_CACHE_OFFSLAB set, we avoid the circular dependency
by ensuring the small size-N (where N < 512) caches
(which don't use that flag) are initialised before
attempting to allocate from an offslab cache. */
slab_hdr = kmalloc(cache->c_hdr_size, flags);
slab_hdr->s_objects = slab_data;
} else {
slab_hdr = slab_data;
slab_hdr->s_objects = (void *)((char *)slab_data + cache->c_hdr_size);
}
slab_hdr->s_cache = cache;
slab_hdr->s_list = QUEUE_ENTRY_INIT;
slab_hdr->s_obj_allocated = 0;
slab_hdr->s_free = 0;
for (unsigned int i = 0; i < cache->c_obj_count; i++) {
slab_hdr->s_freelist[i] = i + 1;
}
slab_hdr->s_freelist[cache->c_obj_count - 1] = FREELIST_END;
return slab_hdr;
}
static void destroy_slab(vm_slab_t *slab)
{
}
static unsigned int slab_allocate_slot(vm_slab_t *slab)
{
if (slab->s_free == FREELIST_END) {
return FREELIST_END;
}
unsigned int slot = slab->s_free;
slab->s_free = slab->s_freelist[slab->s_free];
slab->s_obj_allocated++;
return slot;
}
static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
{
unsigned int next = slab->s_free;
slab->s_free = slot;
slab->s_freelist[slot] = next;
slab->s_obj_allocated--;
}
static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot)
{
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
}
static unsigned int pointer_to_slot(vm_slab_t *slab, void *p)
{
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
return offset / slab->s_cache->c_stride;
}
void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
{
vm_slab_t *slab = NULL;
if (queue_length(&cache->c_slabs_partial) > 0) {
/* prefer using up partially-full slabs before taking a fresh one */
slab = QUEUE_CONTAINER(vm_slab_t, s_list, queue_pop_front(&cache->c_slabs_partial));
} else if (queue_length(&cache->c_slabs_empty) > 0) {
slab = QUEUE_CONTAINER(vm_slab_t, s_list, queue_pop_front(&cache->c_slabs_empty));
} else {
/* we've run out of slabs. create a new one */
slab = alloc_slab(cache, flags);
}
if (!slab) {
return NULL;
}
unsigned int slot = slab_allocate_slot(slab);
void *p = slot_to_pointer(slab, slot);
if (slab->s_free == FREELIST_END) {
queue_push_back(&cache->c_slabs_full, &slab->s_list);
} else {
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
}
return p;
}
void vm_cache_free(vm_cache_t *cache, void *p)
{
}

View File

@@ -17,6 +17,7 @@
#define VM_PAGE_SIZE 0x1000
#define VM_PAGE_SHIFT 12
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define vm_page_foreach(pg, i) \
@@ -73,11 +74,28 @@ typedef enum vm_page_order {
VM_PAGE_MAX_ORDER = VM_PAGE_8M,
} vm_page_order_t;
typedef enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page has been allocated by a zone's buddy allocator, and is in-use */
VM_PAGE_ALLOC = 0x02u,
/* page is the first page of a huge-page */
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
} vm_page_flags_t;
typedef enum vm_memory_region_status {
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
} vm_memory_region_status_t;
typedef enum vm_cache_flags {
VM_CACHE_OFFSLAB = 0x01u,
VM_CACHE_DMA = 0x02u
} vm_cache_flags_t;
typedef struct vm_zone_descriptor {
vm_zone_id_t zd_id;
vm_node_id_t zd_node;
@@ -103,17 +121,50 @@ typedef struct vm_region {
phys_addr_t r_limit;
} vm_region_t;
typedef enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page has been allocated by a zone's buddy allocator, and is in-use */
VM_PAGE_ALLOC = 0x02u,
/* page is the first page of a huge-page */
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
} vm_page_flags_t;
typedef struct vm_cache {
const char *c_name;
vm_cache_flags_t c_flags;
queue_entry_t c_list;
queue_t c_slabs_full;
queue_t c_slabs_partial;
queue_t c_slabs_empty;
/* number of objects that can be stored in a single slab */
unsigned int c_obj_count;
/* the size of object kept in the cache */
unsigned int c_obj_size;
/* combined size of vm_slab_t and the freelist */
unsigned int c_hdr_size;
/* offset from one object to the next in a slab.
this may be different from c_obj_size as
we enforce a 16-byte alignment on allocated objects */
unsigned int c_stride;
/* size of page used for slabs */
unsigned int c_page_order;
} vm_cache_t;
typedef struct vm_slab {
vm_cache_t *s_cache;
/* queue entry for vm_cache_t.c_slabs_* */
queue_entry_t s_list;
/* pointer to the first object slot. */
void *s_objects;
/* the number of objects allocated on the slab. */
unsigned int s_obj_allocated;
/* the index of the next free object.
if s_free is equal to FREELIST_END (defined in vm/cache.c)
there are no free slots left in the slab. */
unsigned int s_free;
/* list of free object slots.
when allocating:
- s_free should be set to the value of s_freelist[s_free]
when freeing:
- s_free should be set to the index of the object being freed.
- s_freelist[s_free] should be set to the previous value of s_free.
*/
unsigned int s_freelist[];
} vm_slab_t;
typedef struct vm_page {
/* order of the page block that this page belongs too */
@@ -135,6 +186,13 @@ typedef struct vm_page {
*/
queue_entry_t p_list;
/* owner-specific data */
union {
struct {
vm_slab_t *p_slab;
};
};
} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t;
extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones);
@@ -164,4 +222,14 @@ extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags);
extern void vm_cache_init(vm_cache_t *cache);
extern void vm_cache_destroy(vm_cache_t *cache);
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
extern void vm_cache_free(vm_cache_t *cache, void *p);
extern void *kmalloc(size_t count, vm_flags_t flags);
extern void *kzalloc(size_t count, vm_flags_t flags);
extern void kfree(void *p);
#endif

63
sandbox/vm/kmalloc.c Normal file
View File

@@ -0,0 +1,63 @@
#include <socks/vm.h>
#include <string.h>
#define SIZE_N_CACHE(s) \
{ .c_name = "size-" # s, .c_obj_size = s, .c_page_order = VM_PAGE_16K }
/* reserve space for the size-N caches: */
static vm_cache_t size_n_caches[] = {
SIZE_N_CACHE(16),
SIZE_N_CACHE(32),
SIZE_N_CACHE(48),
SIZE_N_CACHE(64),
SIZE_N_CACHE(96),
SIZE_N_CACHE(128),
SIZE_N_CACHE(160),
SIZE_N_CACHE(256),
SIZE_N_CACHE(388),
SIZE_N_CACHE(512),
SIZE_N_CACHE(576),
SIZE_N_CACHE(768),
SIZE_N_CACHE(1024),
SIZE_N_CACHE(1664),
SIZE_N_CACHE(2048),
SIZE_N_CACHE(3072),
SIZE_N_CACHE(4096),
};
static const size_t nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0];
void *kmalloc(size_t count, vm_flags_t flags)
{
vm_cache_t *best_fit = NULL;
for (size_t i = 0; i < nr_size_n_caches; i++) {
if (size_n_caches[i].c_obj_size >= count) {
best_fit = &size_n_caches[i];
break;
}
}
if (!best_fit) {
return NULL;
}
if (!VM_CACHE_INITIALISED(best_fit)) {
vm_cache_init(best_fit);
}
return vm_cache_alloc(best_fit, flags);
}
void *kzalloc(size_t count, vm_flags_t flags)
{
void *p = kmalloc(count, flags);
if (p) {
memset(p, 0x0, count);
}
return p;
}
void kfree(void *p)
{
/* TODO */
}