From 0516ef06a38bdcae507d48eb8cff194163685745 Mon Sep 17 00:00:00 2001 From: Max Wash Date: Mon, 6 Feb 2023 20:46:38 +0000 Subject: [PATCH] vm: explicitly initialise kmalloc in vm_bootstrap if kmalloc is called with count=N before vm_bootstrap is finished, the request will be fulfilled using memblock_alloc. if N is a power of 2, the returned pointer will be aligned to an N-byte boundary. --- include/socks/vm.h | 1 + vm/bootstrap.c | 1 + vm/kmalloc.c | 40 ++++++++++++++++++++++++++++++++++------ 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/include/socks/vm.h b/include/socks/vm.h index 2072b2d..2036f2a 100644 --- a/include/socks/vm.h +++ b/include/socks/vm.h @@ -234,6 +234,7 @@ extern void vm_cache_destroy(vm_cache_t *cache); extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags); extern void vm_cache_free(vm_cache_t *cache, void *p); +extern void kmalloc_init(void); extern void *kmalloc(size_t count, vm_flags_t flags); extern void *kzalloc(size_t count, vm_flags_t flags); extern void kfree(void *p); diff --git a/vm/bootstrap.c b/vm/bootstrap.c index 975ca20..bf32ad5 100644 --- a/vm/bootstrap.c +++ b/vm/bootstrap.c @@ -22,6 +22,7 @@ kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones) vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]); } + kmalloc_init(); return KERN_OK; } diff --git a/vm/kmalloc.c b/vm/kmalloc.c index 324b360..fac5593 100644 --- a/vm/kmalloc.c +++ b/vm/kmalloc.c @@ -1,9 +1,14 @@ +#include +#include #include +#include #include #define SIZE_N_CACHE(s) \ { .c_name = "size-" # s, .c_obj_size = s, .c_page_order = VM_PAGE_16K } +static int kmalloc_initialised = 0; + /* reserve space for the size-N caches: */ static vm_cache_t size_n_caches[] = { SIZE_N_CACHE(16), @@ -24,7 +29,16 @@ static vm_cache_t size_n_caches[] = { SIZE_N_CACHE(3072), SIZE_N_CACHE(4096), }; -static const size_t nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0]; +static const unsigned int nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0]; + +void kmalloc_init(void) +{ + for (unsigned int i = 0; i < nr_size_n_caches; i++) { + vm_cache_init(&size_n_caches[i]); + } + + kmalloc_initialised = 1; +} void *kmalloc(size_t count, vm_flags_t flags) { @@ -32,8 +46,20 @@ void *kmalloc(size_t count, vm_flags_t flags) return NULL; } + if (!kmalloc_initialised) { + /* if alloc size is a power of 2, align pointer to that same power of 2, + otherwise, align to 8-byte boundary. this emulates the behaviour of + the cache allocator. */ + phys_addr_t align = 8; + if (power_of_2(count)) { + align = count; + } + + return memblock_alloc(count, align); + } + vm_cache_t *best_fit = NULL; - for (size_t i = 0; i < nr_size_n_caches; i++) { + for (unsigned int i = 0; i < nr_size_n_caches; i++) { if (size_n_caches[i].c_obj_size >= count) { best_fit = &size_n_caches[i]; break; @@ -44,10 +70,6 @@ void *kmalloc(size_t count, vm_flags_t flags) return NULL; } - if (!VM_CACHE_INITIALISED(best_fit)) { - vm_cache_init(best_fit); - } - return vm_cache_alloc(best_fit, flags); } @@ -63,6 +85,12 @@ void *kzalloc(size_t count, vm_flags_t flags) void kfree(void *p) { + if (!kmalloc_initialised) { + /* p was probably allocated using memblock. however, memblock requires that + we specify the amount of memory to free, which isn't possible here. */ + return; + } + phys_addr_t phys = vm_virt_to_phys(p); vm_page_t *pg = vm_page_get(phys); if (!pg || !pg->p_slab) {