kernel: don't use typedef for enums or non-opaque structs

This commit is contained in:
2023-04-12 20:17:11 +01:00
parent 0d75e347e9
commit b6f8c1ccaa
51 changed files with 663 additions and 665 deletions

View File

@@ -6,15 +6,15 @@
#define FREELIST_END ((unsigned int)-1)
static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) };
static struct vm_cache cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(struct vm_cache) };
vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags)
struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags)
{
if (!VM_CACHE_INITIALISED(&cache_cache)) {
vm_cache_init(&cache_cache);
}
vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0);
struct vm_cache *new_cache = vm_cache_alloc(&cache_cache, 0);
new_cache->c_name = name;
new_cache->c_obj_size = objsz;
@@ -25,7 +25,7 @@ vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t fla
return new_cache;
}
void vm_cache_init(vm_cache_t *cache)
void vm_cache_init(struct vm_cache *cache)
{
cache->c_page_order = VM_PAGE_16K;
if (cache->c_obj_size >= 512) {
@@ -50,7 +50,7 @@ void vm_cache_init(vm_cache_t *cache)
cache->c_stride = space_per_item;
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
available -= sizeof(vm_slab_t);
available -= sizeof(struct vm_slab);
}
/* one entry in the freelist per object slot */
@@ -61,7 +61,7 @@ void vm_cache_init(vm_cache_t *cache)
cache->c_slabs_partial = QUEUE_INIT;
cache->c_slabs_empty = QUEUE_INIT;
cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count);
cache->c_hdr_size = sizeof(struct vm_slab) + (sizeof(unsigned int) * cache->c_obj_count);
/* for on-slab caches, c_hdr_size is added to the slab pointer to
get the object buffer pointer. by aligning c_hdr_size to the
@@ -73,15 +73,15 @@ void vm_cache_init(vm_cache_t *cache)
}
}
void vm_cache_destroy(vm_cache_t *cache)
void vm_cache_destroy(struct vm_cache *cache)
{
/* TODO */
}
static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
static struct vm_slab *alloc_slab(struct vm_cache *cache, enum vm_flags flags)
{
vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags);
vm_slab_t *slab_hdr = NULL;
struct vm_page *slab_page = vm_page_alloc(cache->c_page_order, flags);
struct vm_slab *slab_hdr = NULL;
void *slab_data = vm_page_get_vaddr(slab_page);
if (cache->c_flags & VM_CACHE_OFFSLAB) {
@@ -120,12 +120,12 @@ static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
return slab_hdr;
}
static void __used destroy_slab(vm_slab_t *slab)
static void __used destroy_slab(struct vm_slab *slab)
{
}
static unsigned int slab_allocate_slot(vm_slab_t *slab)
static unsigned int slab_allocate_slot(struct vm_slab *slab)
{
if (slab->s_free == FREELIST_END) {
return FREELIST_END;
@@ -139,7 +139,7 @@ static unsigned int slab_allocate_slot(vm_slab_t *slab)
return slot;
}
static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
static void slab_free_slot(struct vm_slab *slab, unsigned int slot)
{
unsigned int next = slab->s_free;
unsigned int *freelist = (unsigned int *)(slab + 1);
@@ -149,30 +149,30 @@ static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
slab->s_obj_allocated--;
}
static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot)
static void *slot_to_pointer(struct vm_slab *slab, unsigned int slot)
{
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
}
static unsigned int pointer_to_slot(vm_slab_t *slab, void *p)
static unsigned int pointer_to_slot(struct vm_slab *slab, void *p)
{
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
return offset / slab->s_cache->c_stride;
}
void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags)
{
unsigned long irq_flags;
spin_lock_irqsave(&cache->c_lock, &irq_flags);
vm_slab_t *slab = NULL;
struct vm_slab *slab = NULL;
if (!queue_empty(&cache->c_slabs_partial)) {
/* prefer using up partially-full slabs before taking a fresh one */
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_partial);
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_partial);
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
} else if (!queue_empty(&cache->c_slabs_empty)) {
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_empty);
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_empty);
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
} else {
/* we've run out of slabs. create a new one */
slab = alloc_slab(cache, flags);
@@ -196,20 +196,20 @@ void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
return p;
}
void vm_cache_free(vm_cache_t *cache, void *p)
void vm_cache_free(struct vm_cache *cache, void *p)
{
unsigned long irq_flags;
spin_lock_irqsave(&cache->c_lock, &irq_flags);
phys_addr_t phys = vm_virt_to_phys(p);
vm_page_t *pg = vm_page_get(phys);
struct vm_page *pg = vm_page_get(phys);
if (!pg || !pg->p_slab) {
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
return;
}
vm_slab_t *slab = pg->p_slab;
struct vm_slab *slab = pg->p_slab;
if (slab->s_cache != cache) {
spin_unlock_irqrestore(&cache->c_lock, irq_flags);