241 lines
6.2 KiB
C
241 lines
6.2 KiB
C
#include <socks/queue.h>
|
|
#include <socks/compiler.h>
|
|
#include <socks/panic.h>
|
|
#include <socks/vm.h>
|
|
#include <socks/util.h>
|
|
#include <stddef.h>
|
|
|
|
#define FREELIST_END ((unsigned int)-1)
|
|
|
|
static struct vm_cache cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(struct vm_cache) };
|
|
|
|
struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags)
|
|
{
|
|
if (!VM_CACHE_INITIALISED(&cache_cache)) {
|
|
vm_cache_init(&cache_cache);
|
|
}
|
|
|
|
struct vm_cache *new_cache = vm_cache_alloc(&cache_cache, 0);
|
|
|
|
new_cache->c_name = name;
|
|
new_cache->c_obj_size = objsz;
|
|
new_cache->c_flags = flags;
|
|
|
|
vm_cache_init(new_cache);
|
|
|
|
return new_cache;
|
|
}
|
|
|
|
void vm_cache_init(struct vm_cache *cache)
|
|
{
|
|
cache->c_page_order = VM_PAGE_16K;
|
|
if (cache->c_obj_size >= 512) {
|
|
cache->c_flags |= VM_CACHE_OFFSLAB;
|
|
}
|
|
|
|
if (power_of_2(cache->c_obj_size)) {
|
|
cache->c_align = cache->c_obj_size;
|
|
} else {
|
|
cache->c_align = 8;
|
|
}
|
|
|
|
size_t available = vm_page_order_to_bytes(cache->c_page_order);
|
|
size_t space_per_item = cache->c_obj_size;
|
|
|
|
/* align to specified boundary */
|
|
if (space_per_item & (cache->c_align - 1)) {
|
|
space_per_item &= ~(cache->c_align - 1);
|
|
space_per_item += cache->c_align;
|
|
}
|
|
|
|
cache->c_stride = space_per_item;
|
|
|
|
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
|
|
available -= sizeof(struct vm_slab);
|
|
}
|
|
|
|
/* one entry in the freelist per object slot */
|
|
space_per_item += sizeof(unsigned int);
|
|
|
|
cache->c_obj_count = available / space_per_item;
|
|
cache->c_slabs_full = QUEUE_INIT;
|
|
cache->c_slabs_partial = QUEUE_INIT;
|
|
cache->c_slabs_empty = QUEUE_INIT;
|
|
|
|
cache->c_hdr_size = sizeof(struct vm_slab) + (sizeof(unsigned int) * cache->c_obj_count);
|
|
|
|
/* for on-slab caches, c_hdr_size is added to the slab pointer to
|
|
get the object buffer pointer. by aligning c_hdr_size to the
|
|
requested alignment, we ensure that the object buffer
|
|
is aligned too */
|
|
if (cache->c_hdr_size & (cache->c_align - 1) && !(cache->c_flags & VM_CACHE_OFFSLAB)) {
|
|
cache->c_hdr_size &= ~(cache->c_align - 1);
|
|
cache->c_hdr_size += cache->c_align;
|
|
}
|
|
}
|
|
|
|
void vm_cache_destroy(struct vm_cache *cache)
|
|
{
|
|
/* TODO */
|
|
}
|
|
|
|
static struct vm_slab *alloc_slab(struct vm_cache *cache, enum vm_flags flags)
|
|
{
|
|
struct vm_page *slab_page = vm_page_alloc(cache->c_page_order, flags);
|
|
if (!slab_page) {
|
|
return NULL;
|
|
}
|
|
|
|
struct vm_slab *slab_hdr = NULL;
|
|
void *slab_data = vm_page_get_vaddr(slab_page);
|
|
|
|
if (cache->c_flags & VM_CACHE_OFFSLAB) {
|
|
/* NOTE the circular dependency here:
|
|
|
|
kmalloc -> vm_cache_alloc -> alloc_slab -> kmalloc
|
|
|
|
since this call path is only used for caches with
|
|
VM_CACHE_OFFSLAB set, we avoid the circular dependency
|
|
by ensuring the small size-N (where N < 512) caches
|
|
(which don't use that flag) are initialised before
|
|
attempting to allocate from an offslab cache. */
|
|
slab_hdr = kmalloc(cache->c_hdr_size, flags);
|
|
slab_hdr->s_objects = slab_data;
|
|
} else {
|
|
slab_hdr = slab_data;
|
|
slab_hdr->s_objects = (void *)((char *)slab_data + cache->c_hdr_size);
|
|
}
|
|
|
|
slab_hdr->s_cache = cache;
|
|
slab_hdr->s_list = QUEUE_ENTRY_INIT;
|
|
slab_hdr->s_obj_allocated = 0;
|
|
slab_hdr->s_free = 0;
|
|
|
|
unsigned int *freelist = (unsigned int *)(slab_hdr + 1);
|
|
for (unsigned int i = 0; i < cache->c_obj_count; i++) {
|
|
freelist[i] = i + 1;
|
|
}
|
|
|
|
freelist[cache->c_obj_count - 1] = FREELIST_END;
|
|
|
|
vm_page_foreach (slab_page, i) {
|
|
i->p_slab = slab_hdr;
|
|
}
|
|
|
|
return slab_hdr;
|
|
}
|
|
|
|
static void __used destroy_slab(struct vm_slab *slab)
|
|
{
|
|
|
|
}
|
|
|
|
static unsigned int slab_allocate_slot(struct vm_slab *slab)
|
|
{
|
|
if (slab->s_free == FREELIST_END) {
|
|
return FREELIST_END;
|
|
}
|
|
|
|
unsigned int slot = slab->s_free;
|
|
unsigned int *freelist = (unsigned int *)(slab + 1);
|
|
slab->s_free = freelist[slab->s_free];
|
|
slab->s_obj_allocated++;
|
|
|
|
return slot;
|
|
}
|
|
|
|
static void slab_free_slot(struct vm_slab *slab, unsigned int slot)
|
|
{
|
|
unsigned int next = slab->s_free;
|
|
unsigned int *freelist = (unsigned int *)(slab + 1);
|
|
|
|
slab->s_free = slot;
|
|
freelist[slot] = next;
|
|
slab->s_obj_allocated--;
|
|
}
|
|
|
|
static void *slot_to_pointer(struct vm_slab *slab, unsigned int slot)
|
|
{
|
|
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
|
|
}
|
|
|
|
static unsigned int pointer_to_slot(struct vm_slab *slab, void *p)
|
|
{
|
|
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
|
|
return offset / slab->s_cache->c_stride;
|
|
}
|
|
|
|
void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags)
|
|
{
|
|
unsigned long irq_flags;
|
|
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
|
|
|
struct vm_slab *slab = NULL;
|
|
if (!queue_empty(&cache->c_slabs_partial)) {
|
|
/* prefer using up partially-full slabs before taking a fresh one */
|
|
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_partial);
|
|
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
|
|
} else if (!queue_empty(&cache->c_slabs_empty)) {
|
|
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_empty);
|
|
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
|
|
} else {
|
|
/* we've run out of slabs. create a new one */
|
|
slab = alloc_slab(cache, flags);
|
|
}
|
|
|
|
if (!slab) {
|
|
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
|
return NULL;
|
|
}
|
|
|
|
unsigned int slot = slab_allocate_slot(slab);
|
|
void *p = slot_to_pointer(slab, slot);
|
|
|
|
if (slab->s_free == FREELIST_END) {
|
|
queue_push_back(&cache->c_slabs_full, &slab->s_list);
|
|
} else {
|
|
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
|
return p;
|
|
}
|
|
|
|
void vm_cache_free(struct vm_cache *cache, void *p)
|
|
{
|
|
unsigned long irq_flags;
|
|
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
|
|
|
phys_addr_t phys = vm_virt_to_phys(p);
|
|
struct vm_page *pg = vm_page_get(phys);
|
|
|
|
if (!pg || !pg->p_slab) {
|
|
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
|
return;
|
|
}
|
|
|
|
struct vm_slab *slab = pg->p_slab;
|
|
|
|
if (slab->s_cache != cache) {
|
|
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
|
return;
|
|
}
|
|
|
|
if (slab->s_free == FREELIST_END) {
|
|
queue_delete(&cache->c_slabs_full, &slab->s_list);
|
|
} else {
|
|
queue_delete(&cache->c_slabs_partial, &slab->s_list);
|
|
}
|
|
|
|
unsigned int slot = pointer_to_slot(slab, p);
|
|
slab_free_slot(slab, slot);
|
|
|
|
if (slab->s_obj_allocated == 0) {
|
|
queue_push_back(&cache->c_slabs_empty, &slab->s_list);
|
|
} else {
|
|
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
|
|
}
|
|
|
|
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
|
}
|