Files
mango/vm/cache.c

218 lines
5.4 KiB
C
Raw Normal View History

#include <socks/queue.h>
#include <stdlib.h>
#include <assert.h>
#include <socks/vm.h>
#define FREELIST_END ((unsigned int)-1)
static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) };
vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags)
{
if (!VM_CACHE_INITIALISED(&cache_cache)) {
vm_cache_init(&cache_cache);
}
vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0);
new_cache->c_name = name;
new_cache->c_obj_size = objsz;
new_cache->c_flags = flags;
vm_cache_init(new_cache);
return new_cache;
}
void vm_cache_init(vm_cache_t *cache)
{
cache->c_page_order = VM_PAGE_16K;
if (cache->c_obj_size >= 512) {
cache->c_flags |= VM_CACHE_OFFSLAB;
}
size_t available = vm_page_order_to_bytes(cache->c_page_order);
size_t space_per_item = cache->c_obj_size;
/* align to 16-byte boundary */
if (space_per_item & 0xF) {
space_per_item &= ~0xF;
space_per_item += 0x10;
}
cache->c_stride = space_per_item;
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
available -= sizeof(vm_slab_t);
}
/* one entry in the freelist per object slot */
space_per_item += sizeof(unsigned int);
cache->c_obj_count = available / space_per_item;
cache->c_slabs_full = QUEUE_INIT;
cache->c_slabs_partial = QUEUE_INIT;
cache->c_slabs_empty = QUEUE_INIT;
cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count);
}
void vm_cache_destroy(vm_cache_t *cache)
{
/* TODO */
}
static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
{
vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags);
vm_slab_t *slab_hdr = NULL;
void *slab_data = vm_page_get_vaddr(slab_page);
if (cache->c_flags & VM_CACHE_OFFSLAB) {
/* NOTE the circular dependency here:
kmalloc -> vm_cache_alloc -> alloc_slab -> kmalloc
since this call path is only used for caches with
VM_CACHE_OFFSLAB set, we avoid the circular dependency
by ensuring the small size-N (where N < 512) caches
(which don't use that flag) are initialised before
attempting to allocate from an offslab cache. */
slab_hdr = kmalloc(cache->c_hdr_size, flags);
slab_hdr->s_objects = slab_data;
} else {
slab_hdr = slab_data;
slab_hdr->s_objects = (void *)((char *)slab_data + cache->c_hdr_size);
}
slab_hdr->s_cache = cache;
slab_hdr->s_list = QUEUE_ENTRY_INIT;
slab_hdr->s_obj_allocated = 0;
slab_hdr->s_free = 0;
for (unsigned int i = 0; i < cache->c_obj_count; i++) {
slab_hdr->s_freelist[i] = i + 1;
}
slab_hdr->s_freelist[cache->c_obj_count - 1] = FREELIST_END;
vm_page_foreach (slab_page, i) {
i->p_slab = slab_hdr;
}
return slab_hdr;
}
static void destroy_slab(vm_slab_t *slab)
{
}
static unsigned int slab_allocate_slot(vm_slab_t *slab)
{
if (slab->s_free == FREELIST_END) {
return FREELIST_END;
}
unsigned int slot = slab->s_free;
slab->s_free = slab->s_freelist[slab->s_free];
slab->s_obj_allocated++;
return slot;
}
static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
{
unsigned int next = slab->s_free;
slab->s_free = slot;
slab->s_freelist[slot] = next;
slab->s_obj_allocated--;
}
static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot)
{
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
}
static unsigned int pointer_to_slot(vm_slab_t *slab, void *p)
{
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
return offset / slab->s_cache->c_stride;
}
void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
{
unsigned long irq_flags;
spin_lock_irqsave(&cache->c_lock, &irq_flags);
vm_slab_t *slab = NULL;
if (!queue_empty(&cache->c_slabs_partial)) {
/* prefer using up partially-full slabs before taking a fresh one */
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_partial);
assert(slab_entry);
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
} else if (!queue_empty(&cache->c_slabs_empty)) {
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_empty);
assert(slab_entry);
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
} else {
/* we've run out of slabs. create a new one */
slab = alloc_slab(cache, flags);
}
if (!slab) {
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
return NULL;
}
unsigned int slot = slab_allocate_slot(slab);
void *p = slot_to_pointer(slab, slot);
if (slab->s_free == FREELIST_END) {
queue_push_back(&cache->c_slabs_full, &slab->s_list);
} else {
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
}
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
return p;
}
void vm_cache_free(vm_cache_t *cache, void *p)
{
unsigned long irq_flags;
spin_lock_irqsave(&cache->c_lock, &irq_flags);
phys_addr_t phys = vm_virt_to_phys(p);
vm_page_t *pg = vm_page_get(phys);
if (!pg || !pg->p_slab) {
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
return;
}
vm_slab_t *slab = pg->p_slab;
if (slab->s_cache != cache) {
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
return;
}
if (slab->s_free == FREELIST_END) {
queue_delete(&cache->c_slabs_full, &slab->s_list);
} else {
queue_delete(&cache->c_slabs_partial, &slab->s_list);
}
unsigned int slot = pointer_to_slot(slab, p);
slab_free_slot(slab, slot);
if (slab->s_obj_allocated == 0) {
queue_push_back(&cache->c_slabs_empty, &slab->s_list);
} else {
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
}
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
}