vm: explicitly initialise kmalloc in vm_bootstrap
if kmalloc is called with count=N before vm_bootstrap is finished, the request will be fulfilled using memblock_alloc. if N is a power of 2, the returned pointer will be aligned to an N-byte boundary.
This commit is contained in:
@@ -234,6 +234,7 @@ extern void vm_cache_destroy(vm_cache_t *cache);
|
|||||||
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
|
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
|
||||||
extern void vm_cache_free(vm_cache_t *cache, void *p);
|
extern void vm_cache_free(vm_cache_t *cache, void *p);
|
||||||
|
|
||||||
|
extern void kmalloc_init(void);
|
||||||
extern void *kmalloc(size_t count, vm_flags_t flags);
|
extern void *kmalloc(size_t count, vm_flags_t flags);
|
||||||
extern void *kzalloc(size_t count, vm_flags_t flags);
|
extern void *kzalloc(size_t count, vm_flags_t flags);
|
||||||
extern void kfree(void *p);
|
extern void kfree(void *p);
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones)
|
|||||||
vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]);
|
vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kmalloc_init();
|
||||||
return KERN_OK;
|
return KERN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
40
vm/kmalloc.c
40
vm/kmalloc.c
@@ -1,9 +1,14 @@
|
|||||||
|
#include <socks/types.h>
|
||||||
|
#include <socks/memblock.h>
|
||||||
#include <socks/vm.h>
|
#include <socks/vm.h>
|
||||||
|
#include <socks/util.h>
|
||||||
#include <socks/libc/string.h>
|
#include <socks/libc/string.h>
|
||||||
|
|
||||||
#define SIZE_N_CACHE(s) \
|
#define SIZE_N_CACHE(s) \
|
||||||
{ .c_name = "size-" # s, .c_obj_size = s, .c_page_order = VM_PAGE_16K }
|
{ .c_name = "size-" # s, .c_obj_size = s, .c_page_order = VM_PAGE_16K }
|
||||||
|
|
||||||
|
static int kmalloc_initialised = 0;
|
||||||
|
|
||||||
/* reserve space for the size-N caches: */
|
/* reserve space for the size-N caches: */
|
||||||
static vm_cache_t size_n_caches[] = {
|
static vm_cache_t size_n_caches[] = {
|
||||||
SIZE_N_CACHE(16),
|
SIZE_N_CACHE(16),
|
||||||
@@ -24,7 +29,16 @@ static vm_cache_t size_n_caches[] = {
|
|||||||
SIZE_N_CACHE(3072),
|
SIZE_N_CACHE(3072),
|
||||||
SIZE_N_CACHE(4096),
|
SIZE_N_CACHE(4096),
|
||||||
};
|
};
|
||||||
static const size_t nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0];
|
static const unsigned int nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0];
|
||||||
|
|
||||||
|
void kmalloc_init(void)
|
||||||
|
{
|
||||||
|
for (unsigned int i = 0; i < nr_size_n_caches; i++) {
|
||||||
|
vm_cache_init(&size_n_caches[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
kmalloc_initialised = 1;
|
||||||
|
}
|
||||||
|
|
||||||
void *kmalloc(size_t count, vm_flags_t flags)
|
void *kmalloc(size_t count, vm_flags_t flags)
|
||||||
{
|
{
|
||||||
@@ -32,8 +46,20 @@ void *kmalloc(size_t count, vm_flags_t flags)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!kmalloc_initialised) {
|
||||||
|
/* if alloc size is a power of 2, align pointer to that same power of 2,
|
||||||
|
otherwise, align to 8-byte boundary. this emulates the behaviour of
|
||||||
|
the cache allocator. */
|
||||||
|
phys_addr_t align = 8;
|
||||||
|
if (power_of_2(count)) {
|
||||||
|
align = count;
|
||||||
|
}
|
||||||
|
|
||||||
|
return memblock_alloc(count, align);
|
||||||
|
}
|
||||||
|
|
||||||
vm_cache_t *best_fit = NULL;
|
vm_cache_t *best_fit = NULL;
|
||||||
for (size_t i = 0; i < nr_size_n_caches; i++) {
|
for (unsigned int i = 0; i < nr_size_n_caches; i++) {
|
||||||
if (size_n_caches[i].c_obj_size >= count) {
|
if (size_n_caches[i].c_obj_size >= count) {
|
||||||
best_fit = &size_n_caches[i];
|
best_fit = &size_n_caches[i];
|
||||||
break;
|
break;
|
||||||
@@ -44,10 +70,6 @@ void *kmalloc(size_t count, vm_flags_t flags)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!VM_CACHE_INITIALISED(best_fit)) {
|
|
||||||
vm_cache_init(best_fit);
|
|
||||||
}
|
|
||||||
|
|
||||||
return vm_cache_alloc(best_fit, flags);
|
return vm_cache_alloc(best_fit, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,6 +85,12 @@ void *kzalloc(size_t count, vm_flags_t flags)
|
|||||||
|
|
||||||
void kfree(void *p)
|
void kfree(void *p)
|
||||||
{
|
{
|
||||||
|
if (!kmalloc_initialised) {
|
||||||
|
/* p was probably allocated using memblock. however, memblock requires that
|
||||||
|
we specify the amount of memory to free, which isn't possible here. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
phys_addr_t phys = vm_virt_to_phys(p);
|
phys_addr_t phys = vm_virt_to_phys(p);
|
||||||
vm_page_t *pg = vm_page_get(phys);
|
vm_page_t *pg = vm_page_get(phys);
|
||||||
if (!pg || !pg->p_slab) {
|
if (!pg || !pg->p_slab) {
|
||||||
|
|||||||
Reference in New Issue
Block a user