kernel: don't use typedef for enums or non-opaque structs
This commit is contained in:
@@ -8,15 +8,15 @@
|
||||
#include <limits.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* One vm_pg_data_t per NUMA node. */
|
||||
static vm_pg_data_t *node_data = NULL;
|
||||
/* One struct vm_pg_data per NUMA node. */
|
||||
static struct vm_pg_data *node_data = NULL;
|
||||
|
||||
kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones)
|
||||
kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones)
|
||||
{
|
||||
int numa_count = 1;
|
||||
|
||||
/* we're only worrying about UMA systems for now */
|
||||
node_data = memblock_alloc(sizeof(vm_pg_data_t) * numa_count, 8);
|
||||
node_data = memblock_alloc(sizeof(struct vm_pg_data) * numa_count, 8);
|
||||
|
||||
/* TODO select which memory model to use automatically, and add
|
||||
a kernel boot parameter to override the choice */
|
||||
@@ -41,7 +41,7 @@ kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones)
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
vm_pg_data_t *vm_pg_data_get(vm_node_id_t node)
|
||||
struct vm_pg_data *vm_pg_data_get(vm_node_id_t node)
|
||||
{
|
||||
if (node == 0) {
|
||||
return node_data;
|
||||
|
||||
48
vm/cache.c
48
vm/cache.c
@@ -6,15 +6,15 @@
|
||||
|
||||
#define FREELIST_END ((unsigned int)-1)
|
||||
|
||||
static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) };
|
||||
static struct vm_cache cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(struct vm_cache) };
|
||||
|
||||
vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags)
|
||||
struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags)
|
||||
{
|
||||
if (!VM_CACHE_INITIALISED(&cache_cache)) {
|
||||
vm_cache_init(&cache_cache);
|
||||
}
|
||||
|
||||
vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0);
|
||||
struct vm_cache *new_cache = vm_cache_alloc(&cache_cache, 0);
|
||||
|
||||
new_cache->c_name = name;
|
||||
new_cache->c_obj_size = objsz;
|
||||
@@ -25,7 +25,7 @@ vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t fla
|
||||
return new_cache;
|
||||
}
|
||||
|
||||
void vm_cache_init(vm_cache_t *cache)
|
||||
void vm_cache_init(struct vm_cache *cache)
|
||||
{
|
||||
cache->c_page_order = VM_PAGE_16K;
|
||||
if (cache->c_obj_size >= 512) {
|
||||
@@ -50,7 +50,7 @@ void vm_cache_init(vm_cache_t *cache)
|
||||
cache->c_stride = space_per_item;
|
||||
|
||||
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
|
||||
available -= sizeof(vm_slab_t);
|
||||
available -= sizeof(struct vm_slab);
|
||||
}
|
||||
|
||||
/* one entry in the freelist per object slot */
|
||||
@@ -61,7 +61,7 @@ void vm_cache_init(vm_cache_t *cache)
|
||||
cache->c_slabs_partial = QUEUE_INIT;
|
||||
cache->c_slabs_empty = QUEUE_INIT;
|
||||
|
||||
cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count);
|
||||
cache->c_hdr_size = sizeof(struct vm_slab) + (sizeof(unsigned int) * cache->c_obj_count);
|
||||
|
||||
/* for on-slab caches, c_hdr_size is added to the slab pointer to
|
||||
get the object buffer pointer. by aligning c_hdr_size to the
|
||||
@@ -73,15 +73,15 @@ void vm_cache_init(vm_cache_t *cache)
|
||||
}
|
||||
}
|
||||
|
||||
void vm_cache_destroy(vm_cache_t *cache)
|
||||
void vm_cache_destroy(struct vm_cache *cache)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
|
||||
static struct vm_slab *alloc_slab(struct vm_cache *cache, enum vm_flags flags)
|
||||
{
|
||||
vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags);
|
||||
vm_slab_t *slab_hdr = NULL;
|
||||
struct vm_page *slab_page = vm_page_alloc(cache->c_page_order, flags);
|
||||
struct vm_slab *slab_hdr = NULL;
|
||||
void *slab_data = vm_page_get_vaddr(slab_page);
|
||||
|
||||
if (cache->c_flags & VM_CACHE_OFFSLAB) {
|
||||
@@ -120,12 +120,12 @@ static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
|
||||
return slab_hdr;
|
||||
}
|
||||
|
||||
static void __used destroy_slab(vm_slab_t *slab)
|
||||
static void __used destroy_slab(struct vm_slab *slab)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static unsigned int slab_allocate_slot(vm_slab_t *slab)
|
||||
static unsigned int slab_allocate_slot(struct vm_slab *slab)
|
||||
{
|
||||
if (slab->s_free == FREELIST_END) {
|
||||
return FREELIST_END;
|
||||
@@ -139,7 +139,7 @@ static unsigned int slab_allocate_slot(vm_slab_t *slab)
|
||||
return slot;
|
||||
}
|
||||
|
||||
static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
|
||||
static void slab_free_slot(struct vm_slab *slab, unsigned int slot)
|
||||
{
|
||||
unsigned int next = slab->s_free;
|
||||
unsigned int *freelist = (unsigned int *)(slab + 1);
|
||||
@@ -149,30 +149,30 @@ static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
|
||||
slab->s_obj_allocated--;
|
||||
}
|
||||
|
||||
static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot)
|
||||
static void *slot_to_pointer(struct vm_slab *slab, unsigned int slot)
|
||||
{
|
||||
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
|
||||
}
|
||||
|
||||
static unsigned int pointer_to_slot(vm_slab_t *slab, void *p)
|
||||
static unsigned int pointer_to_slot(struct vm_slab *slab, void *p)
|
||||
{
|
||||
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
|
||||
return offset / slab->s_cache->c_stride;
|
||||
}
|
||||
|
||||
void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
|
||||
void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
||||
|
||||
vm_slab_t *slab = NULL;
|
||||
struct vm_slab *slab = NULL;
|
||||
if (!queue_empty(&cache->c_slabs_partial)) {
|
||||
/* prefer using up partially-full slabs before taking a fresh one */
|
||||
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_partial);
|
||||
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
|
||||
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_partial);
|
||||
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
|
||||
} else if (!queue_empty(&cache->c_slabs_empty)) {
|
||||
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_empty);
|
||||
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
|
||||
struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_empty);
|
||||
slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry);
|
||||
} else {
|
||||
/* we've run out of slabs. create a new one */
|
||||
slab = alloc_slab(cache, flags);
|
||||
@@ -196,20 +196,20 @@ void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
|
||||
return p;
|
||||
}
|
||||
|
||||
void vm_cache_free(vm_cache_t *cache, void *p)
|
||||
void vm_cache_free(struct vm_cache *cache, void *p)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
||||
|
||||
phys_addr_t phys = vm_virt_to_phys(p);
|
||||
vm_page_t *pg = vm_page_get(phys);
|
||||
struct vm_page *pg = vm_page_get(phys);
|
||||
|
||||
if (!pg || !pg->p_slab) {
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
vm_slab_t *slab = pg->p_slab;
|
||||
struct vm_slab *slab = pg->p_slab;
|
||||
|
||||
if (slab->s_cache != cache) {
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
|
||||
10
vm/flat.c
10
vm/flat.c
@@ -20,7 +20,7 @@
|
||||
#include <socks/printk.h>
|
||||
|
||||
/* array of pages, one for each physical page frame present in RAM */
|
||||
static vm_page_t *page_array = NULL;
|
||||
static struct vm_page *page_array = NULL;
|
||||
|
||||
/* number of pages stored in page_array */
|
||||
static size_t page_array_count = 0;
|
||||
@@ -30,7 +30,7 @@ void vm_flat_init(void)
|
||||
printk("vm: using flat memory model");
|
||||
size_t pmem_size = 0;
|
||||
|
||||
memblock_iter_t it;
|
||||
struct memblock_iter it;
|
||||
for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) {
|
||||
if (pmem_size < it.it_limit + 1) {
|
||||
pmem_size = it.it_limit + 1;
|
||||
@@ -42,7 +42,7 @@ void vm_flat_init(void)
|
||||
nr_pages++;
|
||||
}
|
||||
|
||||
page_array = memblock_alloc(sizeof(vm_page_t) * nr_pages, 8);
|
||||
page_array = memblock_alloc(sizeof(struct vm_page) * nr_pages, 8);
|
||||
page_array_count = nr_pages;
|
||||
|
||||
size_t nr_reserved = nr_pages;
|
||||
@@ -63,13 +63,13 @@ void vm_flat_init(void)
|
||||
printk("vm: page array has %zu pages, %zu reserved", nr_pages, nr_reserved);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_flat(phys_addr_t addr)
|
||||
struct vm_page *vm_page_get_flat(phys_addr_t addr)
|
||||
{
|
||||
size_t pfn = addr / VM_PAGE_SIZE;
|
||||
return pfn < page_array_count ? &page_array[pfn] : NULL;
|
||||
}
|
||||
|
||||
size_t vm_page_get_pfn_flat(vm_page_t *pg)
|
||||
size_t vm_page_get_pfn_flat(struct vm_page *pg)
|
||||
{
|
||||
return ((uintptr_t)pg - (uintptr_t)page_array) / sizeof *pg;
|
||||
}
|
||||
|
||||
10
vm/kmalloc.c
10
vm/kmalloc.c
@@ -10,7 +10,7 @@
|
||||
static int kmalloc_initialised = 0;
|
||||
|
||||
/* reserve space for the size-N caches: */
|
||||
static vm_cache_t size_n_caches[] = {
|
||||
static struct vm_cache size_n_caches[] = {
|
||||
SIZE_N_CACHE(16),
|
||||
SIZE_N_CACHE(32),
|
||||
SIZE_N_CACHE(48),
|
||||
@@ -40,7 +40,7 @@ void kmalloc_init(void)
|
||||
kmalloc_initialised = 1;
|
||||
}
|
||||
|
||||
void *kmalloc(size_t count, vm_flags_t flags)
|
||||
void *kmalloc(size_t count, enum vm_flags flags)
|
||||
{
|
||||
if (!count) {
|
||||
return NULL;
|
||||
@@ -58,7 +58,7 @@ void *kmalloc(size_t count, vm_flags_t flags)
|
||||
return memblock_alloc(count, align);
|
||||
}
|
||||
|
||||
vm_cache_t *best_fit = NULL;
|
||||
struct vm_cache *best_fit = NULL;
|
||||
for (unsigned int i = 0; i < nr_size_n_caches; i++) {
|
||||
if (size_n_caches[i].c_obj_size >= count) {
|
||||
best_fit = &size_n_caches[i];
|
||||
@@ -73,7 +73,7 @@ void *kmalloc(size_t count, vm_flags_t flags)
|
||||
return vm_cache_alloc(best_fit, flags);
|
||||
}
|
||||
|
||||
void *kzalloc(size_t count, vm_flags_t flags)
|
||||
void *kzalloc(size_t count, enum vm_flags flags)
|
||||
{
|
||||
void *p = kmalloc(count, flags);
|
||||
if (p) {
|
||||
@@ -92,7 +92,7 @@ void kfree(void *p)
|
||||
}
|
||||
|
||||
phys_addr_t phys = vm_virt_to_phys(p);
|
||||
vm_page_t *pg = vm_page_get(phys);
|
||||
struct vm_page *pg = vm_page_get(phys);
|
||||
if (!pg || !pg->p_slab) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -38,12 +38,12 @@
|
||||
be bounded by the defined memory regions, and not by this constant. */
|
||||
#define ADDR_MAX (~(uintptr_t)0)
|
||||
|
||||
static memblock_region_t init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
|
||||
static memblock_region_t init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
|
||||
static struct memblock_region init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
|
||||
static struct memblock_region init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
|
||||
|
||||
static phys_addr_t do_alloc(size_t size, phys_addr_t align);
|
||||
|
||||
memblock_t memblock = {
|
||||
struct memblock memblock = {
|
||||
.memory.regions = init_memory_regions,
|
||||
.memory.count = 0,
|
||||
.memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT,
|
||||
@@ -55,33 +55,33 @@ memblock_t memblock = {
|
||||
.reserved.name = "reserved",
|
||||
};
|
||||
|
||||
static void memblock_double_capacity(memblock_type_t *type)
|
||||
static void memblock_double_capacity(struct memblock_type *type)
|
||||
{
|
||||
size_t new_max = type->max * 2;
|
||||
|
||||
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(memblock_region_t), 8);
|
||||
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(struct memblock_region), 8);
|
||||
|
||||
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
|
||||
memcpy(new_regions, type->regions, type->count * sizeof(memblock_region_t));
|
||||
memcpy(new_regions, type->regions, type->count * sizeof(struct memblock_region));
|
||||
|
||||
type->regions = new_regions;
|
||||
type->max = new_max;
|
||||
}
|
||||
|
||||
static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_add)
|
||||
static int memblock_insert_region(struct memblock_type *type, struct memblock_region *to_add)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
|
||||
for (i = 0; i < type->count; i++) {
|
||||
const memblock_region_t *cur = &type->regions[i];
|
||||
const struct memblock_region *cur = &type->regions[i];
|
||||
|
||||
if (cur->base >= to_add->limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
memblock_region_t *src = &type->regions[i];
|
||||
memblock_region_t *dst = &type->regions[i + 1];
|
||||
struct memblock_region *src = &type->regions[i];
|
||||
struct memblock_region *dst = &type->regions[i + 1];
|
||||
unsigned int count = type->count - i;
|
||||
|
||||
memmove(dst, src, count * sizeof *src);
|
||||
@@ -92,14 +92,14 @@ static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_a
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int memblock_remove_region(memblock_type_t *type, unsigned int i)
|
||||
static int memblock_remove_region(struct memblock_type *type, unsigned int i)
|
||||
{
|
||||
if (i >= type->count) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
memblock_region_t *src = &type->regions[i + 1];
|
||||
memblock_region_t *dst = &type->regions[i];
|
||||
struct memblock_region *src = &type->regions[i + 1];
|
||||
struct memblock_region *dst = &type->regions[i];
|
||||
unsigned int count = type->count - i;
|
||||
|
||||
memmove(dst, src, count * sizeof *src);
|
||||
@@ -116,7 +116,7 @@ int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, memblock_region_status_t status)
|
||||
int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, enum memblock_region_status status)
|
||||
{
|
||||
if (size == 0) {
|
||||
return 0;
|
||||
@@ -131,12 +131,12 @@ int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, membl
|
||||
return 0;
|
||||
}
|
||||
|
||||
memblock_region_t new_region = { .base = base, .limit = limit, .status = status };
|
||||
struct memblock_region new_region = { .base = base, .limit = limit, .status = status };
|
||||
|
||||
/* two regions with different statuses CANNOT intersect. we first need to check
|
||||
to make sure the region being added doesn't violate this rule. */
|
||||
for (unsigned int i = 0; i < type->count; i++) {
|
||||
memblock_region_t *cur_region = &type->regions[i];
|
||||
struct memblock_region *cur_region = &type->regions[i];
|
||||
|
||||
if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) {
|
||||
continue;
|
||||
@@ -152,7 +152,7 @@ int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, membl
|
||||
bool add_new = true;
|
||||
|
||||
for (unsigned int i = 0; i < type->count; i++) {
|
||||
memblock_region_t *cur_region = &type->regions[i];
|
||||
struct memblock_region *cur_region = &type->regions[i];
|
||||
|
||||
/* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */
|
||||
if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) {
|
||||
@@ -244,7 +244,7 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
|
||||
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
|
||||
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
|
||||
|
||||
memblock_iter_t it;
|
||||
struct memblock_iter it;
|
||||
for_each_free_mem_range (&it, region_start, region_end) {
|
||||
phys_addr_t base = it.it_base;
|
||||
if (base & (align - 1)) {
|
||||
@@ -306,13 +306,13 @@ int memblock_free_phys(phys_addr_t addr, size_t size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock_type_t *type_b, uintptr_t start, uintptr_t end)
|
||||
void __next_memory_region(struct memblock_iter *it, struct memblock_type *type_a, struct memblock_type *type_b, uintptr_t start, uintptr_t end)
|
||||
{
|
||||
unsigned int idx_a = IDX_A(it->__idx);
|
||||
unsigned int idx_b = IDX_B(it->__idx);
|
||||
|
||||
for (; idx_a < type_a->count; idx_a++) {
|
||||
memblock_region_t *m = &type_a->regions[idx_a];
|
||||
struct memblock_region *m = &type_a->regions[idx_a];
|
||||
|
||||
uintptr_t m_start = m->base;
|
||||
uintptr_t m_end = m->limit;
|
||||
@@ -337,7 +337,7 @@ void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock
|
||||
}
|
||||
|
||||
for (; idx_b < type_b->count + 1; idx_b++) {
|
||||
memblock_region_t *r = &type_b->regions[idx_b];
|
||||
struct memblock_region *r = &type_b->regions[idx_b];
|
||||
|
||||
/* r_start and r_end delimit the region of memory between the current and previous reserved regions.
|
||||
if we have gone past the last reserved region, these variables delimit the range between the end
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#include <socks/vm.h>
|
||||
|
||||
static vm_model_t model;
|
||||
static enum vm_model model;
|
||||
|
||||
vm_model_t vm_memory_model(void)
|
||||
enum vm_model vm_memory_model(void)
|
||||
{
|
||||
return model;
|
||||
}
|
||||
|
||||
void vm_set_memory_model(vm_model_t m)
|
||||
void vm_set_memory_model(enum vm_model m)
|
||||
{
|
||||
model = m;
|
||||
}
|
||||
|
||||
48
vm/page.c
48
vm/page.c
@@ -24,7 +24,7 @@ static size_t page_order_bytes[] = {
|
||||
[VM_PAGE_128M] = 0x8000000,
|
||||
|
||||
/* vm can support pages of this size, but
|
||||
vm_page_t only has 4 bits with which to store
|
||||
struct vm_page only has 4 bits with which to store
|
||||
the page order, which cannot accomodate these
|
||||
larger order numbers */
|
||||
[VM_PAGE_256M] = 0x10000000,
|
||||
@@ -63,7 +63,7 @@ void *vm_phys_to_virt(phys_addr_t p)
|
||||
return (void *)(VM_PAGEMAP_BASE + p);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get(phys_addr_t addr)
|
||||
struct vm_page *vm_page_get(phys_addr_t addr)
|
||||
{
|
||||
switch (vm_memory_model()) {
|
||||
case VM_MODEL_FLAT:
|
||||
@@ -75,17 +75,17 @@ vm_page_t *vm_page_get(phys_addr_t addr)
|
||||
}
|
||||
}
|
||||
|
||||
phys_addr_t vm_page_get_paddr(vm_page_t *pg)
|
||||
phys_addr_t vm_page_get_paddr(struct vm_page *pg)
|
||||
{
|
||||
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
void *vm_page_get_vaddr(vm_page_t *pg)
|
||||
void *vm_page_get_vaddr(struct vm_page *pg)
|
||||
{
|
||||
return (void *)(vm_phys_to_virt(vm_page_get_pfn(pg) * VM_PAGE_SIZE));
|
||||
}
|
||||
|
||||
size_t vm_page_get_pfn(vm_page_t *pg)
|
||||
size_t vm_page_get_pfn(struct vm_page *pg)
|
||||
{
|
||||
switch (vm_memory_model()) {
|
||||
case VM_MODEL_FLAT:
|
||||
@@ -97,7 +97,7 @@ size_t vm_page_get_pfn(vm_page_t *pg)
|
||||
}
|
||||
}
|
||||
|
||||
size_t vm_page_order_to_bytes(vm_page_order_t order)
|
||||
size_t vm_page_order_to_bytes(enum vm_page_order order)
|
||||
{
|
||||
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
|
||||
return 0;
|
||||
@@ -106,7 +106,7 @@ size_t vm_page_order_to_bytes(vm_page_order_t order)
|
||||
return page_order_bytes[order];
|
||||
}
|
||||
|
||||
phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
|
||||
phys_addr_t vm_page_order_to_pages(enum vm_page_order order)
|
||||
{
|
||||
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
|
||||
return 0;
|
||||
@@ -115,7 +115,7 @@ phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
|
||||
return page_order_bytes[order] >> VM_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order)
|
||||
vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order)
|
||||
{
|
||||
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
||||
return 0;
|
||||
@@ -136,9 +136,9 @@ size_t vm_bytes_to_pages(size_t bytes)
|
||||
return bytes;
|
||||
}
|
||||
|
||||
vm_zone_t *vm_page_get_zone(vm_page_t *pg)
|
||||
struct vm_zone *vm_page_get_zone(struct vm_page *pg)
|
||||
{
|
||||
vm_pg_data_t *node = vm_pg_data_get(pg->p_node);
|
||||
struct vm_pg_data *node = vm_pg_data_get(pg->p_node);
|
||||
if (!node) {
|
||||
return 0;
|
||||
}
|
||||
@@ -151,19 +151,19 @@ vm_zone_t *vm_page_get_zone(vm_page_t *pg)
|
||||
}
|
||||
|
||||
|
||||
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
|
||||
struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags)
|
||||
{
|
||||
/* TODO prefer nodes closer to us */
|
||||
vm_pg_data_t *node = vm_pg_data_get(0);
|
||||
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
|
||||
struct vm_pg_data *node = vm_pg_data_get(0);
|
||||
enum vm_zone_id zone_id = VM_ZONE_HIGHMEM;
|
||||
if (flags & VM_GET_DMA) {
|
||||
zone_id = VM_ZONE_DMA;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
vm_zone_t *z = &node->pg_zones[zone_id];
|
||||
struct vm_zone *z = &node->pg_zones[zone_id];
|
||||
|
||||
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
|
||||
struct vm_page *pg = vm_zone_alloc_page(z, order, flags);
|
||||
if (pg) {
|
||||
return pg;
|
||||
}
|
||||
@@ -178,9 +178,9 @@ vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vm_page_free(vm_page_t *pg)
|
||||
void vm_page_free(struct vm_page *pg)
|
||||
{
|
||||
vm_zone_t *z = vm_page_get_zone(pg);
|
||||
struct vm_zone *z = vm_page_get_zone(pg);
|
||||
if (!z) {
|
||||
return;
|
||||
}
|
||||
@@ -188,7 +188,7 @@ void vm_page_free(vm_page_t *pg)
|
||||
vm_zone_free_page(z, pg);
|
||||
}
|
||||
|
||||
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
||||
int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b)
|
||||
{
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
return -1;
|
||||
@@ -202,7 +202,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
||||
pg[i].p_order--;
|
||||
}
|
||||
|
||||
vm_page_t *buddy = vm_page_get_buddy(pg);
|
||||
struct vm_page *buddy = vm_page_get_buddy(pg);
|
||||
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
||||
@@ -218,7 +218,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
||||
struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b)
|
||||
{
|
||||
if (a->p_order != b->p_order) {
|
||||
return NULL;
|
||||
@@ -238,7 +238,7 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
||||
|
||||
/* make sure that a comes before b */
|
||||
if (a > b) {
|
||||
vm_page_t *tmp = a;
|
||||
struct vm_page *tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
@@ -260,16 +260,16 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
||||
return a;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
|
||||
struct vm_page *vm_page_get_buddy(struct vm_page *pg)
|
||||
{
|
||||
phys_addr_t paddr = vm_page_get_paddr(pg);
|
||||
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
|
||||
return vm_page_get(paddr);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_next_tail(vm_page_t *pg)
|
||||
struct vm_page *vm_page_get_next_tail(struct vm_page *pg)
|
||||
{
|
||||
vm_page_t *next = pg + 1;
|
||||
struct vm_page *next = pg + 1;
|
||||
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
36
vm/sparse.c
36
vm/sparse.c
@@ -28,10 +28,10 @@
|
||||
#include <socks/util.h>
|
||||
#include <socks/machine/cpu.h>
|
||||
|
||||
static vm_sector_t *sector_array = NULL;
|
||||
static struct vm_sector *sector_array = NULL;
|
||||
static size_t sector_array_count = 0;
|
||||
|
||||
static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
|
||||
static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
|
||||
{
|
||||
/* all sectors have the same size */
|
||||
size_t step = vm_page_order_to_bytes(sector_array[0].s_size);
|
||||
@@ -52,16 +52,16 @@ static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sect
|
||||
return §or_array[sector];
|
||||
}
|
||||
|
||||
static vm_page_t *get_or_create_page(phys_addr_t addr)
|
||||
static struct vm_page *get_or_create_page(phys_addr_t addr)
|
||||
{
|
||||
size_t sector_number, page_number;
|
||||
phys_addr_to_sector_and_index(addr, §or_number, &page_number);
|
||||
|
||||
vm_sector_t *sector = §or_array[sector_number];
|
||||
struct vm_sector *sector = §or_array[sector_number];
|
||||
|
||||
if (!sector->s_pages) {
|
||||
size_t nr_pages = vm_page_order_to_pages(sector->s_size);
|
||||
sector->s_pages = kzalloc(nr_pages * sizeof(vm_page_t), 0);
|
||||
sector->s_pages = kzalloc(nr_pages * sizeof(struct vm_page), 0);
|
||||
|
||||
for (size_t i = 0; i < nr_pages; i++) {
|
||||
sector->s_pages[i].p_flags = VM_PAGE_RESERVED;
|
||||
@@ -73,9 +73,9 @@ static vm_page_t *get_or_create_page(phys_addr_t addr)
|
||||
return §or->s_pages[page_number];
|
||||
}
|
||||
|
||||
static vm_page_order_t find_minimum_sector_size(size_t pmem_size)
|
||||
static enum vm_page_order find_minimum_sector_size(size_t pmem_size)
|
||||
{
|
||||
for (vm_page_order_t i = VM_PAGE_4K; i < VM_PAGE_64G; i++) {
|
||||
for (enum vm_page_order i = VM_PAGE_4K; i < VM_PAGE_64G; i++) {
|
||||
size_t order_bytes = vm_page_order_to_bytes(i);
|
||||
if (order_bytes * VM_MAX_SECTORS >= pmem_size) {
|
||||
return i;
|
||||
@@ -93,12 +93,12 @@ static vm_page_order_t find_minimum_sector_size(size_t pmem_size)
|
||||
this function uses some heuristics and thresholds that are untested and
|
||||
are in need of improvement to ensure that sparse works well on a wide
|
||||
range of systems. */
|
||||
static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, vm_page_order_t *out_sector_size)
|
||||
static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
|
||||
{
|
||||
/* we can support up to VM_MAX_SECTORS memory sectors.
|
||||
the minimum sector size is what ever is required
|
||||
to cover all of physical memory in the maximum number of sectors */
|
||||
vm_page_order_t sector_size = find_minimum_sector_size(pmem_size);
|
||||
enum vm_page_order sector_size = find_minimum_sector_size(pmem_size);
|
||||
|
||||
if (sector_size <= VM_PAGE_2M) {
|
||||
/* override really small sector sizes with something
|
||||
@@ -148,7 +148,7 @@ void vm_sparse_init(void)
|
||||
{
|
||||
size_t pmem_size = 0, reserved_size = 0;
|
||||
|
||||
memblock_iter_t it;
|
||||
struct memblock_iter it;
|
||||
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
|
||||
if (pmem_size < it.it_limit + 1) {
|
||||
pmem_size = it.it_limit + 1;
|
||||
@@ -159,7 +159,7 @@ void vm_sparse_init(void)
|
||||
reserved_size += it.it_limit - it.it_base + 1;
|
||||
}
|
||||
|
||||
vm_page_order_t sector_size;
|
||||
enum vm_page_order sector_size;
|
||||
size_t sector_bytes = 0;
|
||||
unsigned int nr_sectors = 0;
|
||||
calculate_sector_size_and_count(pmem_size, reserved_size, &nr_sectors, §or_size);
|
||||
@@ -168,7 +168,7 @@ void vm_sparse_init(void)
|
||||
char sector_size_str[64];
|
||||
data_size_to_string(sector_bytes, sector_size_str, sizeof sector_size_str);
|
||||
|
||||
sector_array = kzalloc(sizeof(vm_sector_t) * nr_sectors, 0);
|
||||
sector_array = kzalloc(sizeof(struct vm_sector) * nr_sectors, 0);
|
||||
sector_array_count = nr_sectors;
|
||||
|
||||
for (unsigned int i = 0; i < nr_sectors; i++) {
|
||||
@@ -186,7 +186,7 @@ void vm_sparse_init(void)
|
||||
}
|
||||
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = get_or_create_page(i);
|
||||
struct vm_page *pg = get_or_create_page(i);
|
||||
pg->p_flags = 0;
|
||||
}
|
||||
}
|
||||
@@ -198,7 +198,7 @@ void vm_sparse_init(void)
|
||||
}
|
||||
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
|
||||
if (!pg) {
|
||||
/* if the page doesn't exist, it is part of a sector
|
||||
@@ -214,7 +214,7 @@ void vm_sparse_init(void)
|
||||
printk("vm: [sparse] initialised %zu sectors of size %s", nr_sectors, sector_size_str);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
struct vm_page *vm_page_get_sparse(phys_addr_t addr)
|
||||
{
|
||||
size_t sector_number, page_number;
|
||||
phys_addr_to_sector_and_index(addr, §or_number, &page_number);
|
||||
@@ -222,7 +222,7 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vm_sector_t *sector = §or_array[sector_number];
|
||||
struct vm_sector *sector = §or_array[sector_number];
|
||||
|
||||
if (!sector->s_pages || page_number >= vm_page_order_to_pages(sector->s_size)) {
|
||||
return NULL;
|
||||
@@ -231,8 +231,8 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
return §or->s_pages[page_number];
|
||||
}
|
||||
|
||||
size_t vm_page_get_pfn_sparse(vm_page_t *pg)
|
||||
size_t vm_page_get_pfn_sparse(struct vm_page *pg)
|
||||
{
|
||||
vm_sector_t *sector = §or_array[pg->p_sector];
|
||||
struct vm_sector *sector = §or_array[pg->p_sector];
|
||||
return sector->s_first_pfn + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg);
|
||||
}
|
||||
|
||||
48
vm/zone.c
48
vm/zone.c
@@ -8,11 +8,11 @@
|
||||
#include <socks/libc/string.h>
|
||||
#include <socks/machine/cpu.h>
|
||||
|
||||
static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_addr_t limit, int order)
|
||||
static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t base, phys_addr_t limit, int order)
|
||||
{
|
||||
vm_page_t *first_page = NULL;
|
||||
struct vm_page *first_page = NULL;
|
||||
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
if (!pg) {
|
||||
continue;
|
||||
}
|
||||
@@ -37,7 +37,7 @@ static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_ad
|
||||
return first_page;
|
||||
}
|
||||
|
||||
static void convert_region_to_blocks(vm_zone_t *zone,
|
||||
static void convert_region_to_blocks(struct vm_zone *zone,
|
||||
phys_addr_t base, phys_addr_t limit,
|
||||
int reserved)
|
||||
{
|
||||
@@ -60,7 +60,7 @@ static void convert_region_to_blocks(vm_zone_t *zone,
|
||||
}
|
||||
|
||||
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
|
||||
vm_page_t *block_page = group_pages_into_block(zone, base, block_limit, order);
|
||||
struct vm_page *block_page = group_pages_into_block(zone, base, block_limit, order);
|
||||
|
||||
if (reserved == 0) {
|
||||
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
|
||||
@@ -80,13 +80,13 @@ static void convert_region_to_blocks(vm_zone_t *zone,
|
||||
}
|
||||
}
|
||||
|
||||
static size_t zone_free_bytes(vm_zone_t *z)
|
||||
static size_t zone_free_bytes(struct vm_zone *z)
|
||||
{
|
||||
size_t free_bytes = 0;
|
||||
for (vm_page_order_t i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
size_t page_bytes = vm_page_order_to_bytes(i);
|
||||
size_t nr_pages = 0;
|
||||
queue_foreach (vm_page_t, pg, &z->z_free_pages[i], p_list) {
|
||||
queue_foreach (struct vm_page, pg, &z->z_free_pages[i], p_list) {
|
||||
free_bytes += page_bytes;
|
||||
nr_pages++;
|
||||
}
|
||||
@@ -95,7 +95,7 @@ static size_t zone_free_bytes(vm_zone_t *z)
|
||||
return free_bytes;
|
||||
}
|
||||
|
||||
void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
||||
{
|
||||
memset(z, 0x0, sizeof *z);
|
||||
memcpy(&z->z_info, zone_info, sizeof *zone_info);
|
||||
@@ -108,7 +108,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
int this_page_reserved = 0, last_page_reserved = -1;
|
||||
|
||||
phys_addr_t plimit = 0;
|
||||
memblock_iter_t it;
|
||||
struct memblock_iter it;
|
||||
for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
|
||||
if (it.it_limit + 1 > plimit) {
|
||||
plimit = it.it_limit + 1;
|
||||
@@ -121,7 +121,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
|
||||
size_t nr_pages_found = 0;
|
||||
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
|
||||
if (pg) {
|
||||
nr_pages_found++;
|
||||
@@ -162,7 +162,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str);
|
||||
}
|
||||
|
||||
static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
{
|
||||
if (!queue_empty(&z->z_free_pages[order])) {
|
||||
/* we already have pages available. */
|
||||
@@ -175,9 +175,9 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
}
|
||||
|
||||
/* the lowest page order that is >= `order` and still has pages available */
|
||||
vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS;
|
||||
enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS;
|
||||
|
||||
for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
if (!queue_empty(&z->z_free_pages[i])) {
|
||||
first_order_with_free = i;
|
||||
break;
|
||||
@@ -197,11 +197,11 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
/* starting from the first page list with free pages,
|
||||
take a page, split it in half, and add the sub-pages
|
||||
to the next order's free list. */
|
||||
for (vm_page_order_t i = first_order_with_free; i > order; i--) {
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[i]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
|
||||
for (enum vm_page_order i = first_order_with_free; i > order; i--) {
|
||||
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[i]);
|
||||
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
||||
|
||||
vm_page_t *a, *b;
|
||||
struct vm_page *a, *b;
|
||||
vm_page_split(pg, &a, &b);
|
||||
|
||||
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
|
||||
@@ -211,7 +211,7 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags)
|
||||
struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
||||
@@ -222,8 +222,8 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl
|
||||
return NULL;
|
||||
}
|
||||
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
|
||||
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
||||
vm_page_foreach (pg, i) {
|
||||
i->p_flags |= VM_PAGE_ALLOC;
|
||||
}
|
||||
@@ -232,7 +232,7 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl
|
||||
return pg;
|
||||
}
|
||||
|
||||
void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
|
||||
void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
||||
@@ -241,8 +241,8 @@ void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
|
||||
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
|
||||
|
||||
while (1) {
|
||||
vm_page_t *buddy = vm_page_get_buddy(pg);
|
||||
vm_page_t *huge = vm_page_merge(pg, buddy);
|
||||
struct vm_page *buddy = vm_page_get_buddy(pg);
|
||||
struct vm_page *huge = vm_page_merge(pg, buddy);
|
||||
if (!huge) {
|
||||
break;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user