kernel: don't use typedef for enums or non-opaque structs

This commit is contained in:
2023-04-12 20:17:11 +01:00
parent 0d75e347e9
commit b6f8c1ccaa
51 changed files with 663 additions and 665 deletions

View File

@@ -8,11 +8,11 @@
#include <socks/libc/string.h>
#include <socks/machine/cpu.h>
static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_addr_t limit, int order)
static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t base, phys_addr_t limit, int order)
{
vm_page_t *first_page = NULL;
struct vm_page *first_page = NULL;
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
vm_page_t *pg = vm_page_get(i);
struct vm_page *pg = vm_page_get(i);
if (!pg) {
continue;
}
@@ -37,7 +37,7 @@ static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_ad
return first_page;
}
static void convert_region_to_blocks(vm_zone_t *zone,
static void convert_region_to_blocks(struct vm_zone *zone,
phys_addr_t base, phys_addr_t limit,
int reserved)
{
@@ -60,7 +60,7 @@ static void convert_region_to_blocks(vm_zone_t *zone,
}
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
vm_page_t *block_page = group_pages_into_block(zone, base, block_limit, order);
struct vm_page *block_page = group_pages_into_block(zone, base, block_limit, order);
if (reserved == 0) {
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
@@ -80,13 +80,13 @@ static void convert_region_to_blocks(vm_zone_t *zone,
}
}
static size_t zone_free_bytes(vm_zone_t *z)
static size_t zone_free_bytes(struct vm_zone *z)
{
size_t free_bytes = 0;
for (vm_page_order_t i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
size_t page_bytes = vm_page_order_to_bytes(i);
size_t nr_pages = 0;
queue_foreach (vm_page_t, pg, &z->z_free_pages[i], p_list) {
queue_foreach (struct vm_page, pg, &z->z_free_pages[i], p_list) {
free_bytes += page_bytes;
nr_pages++;
}
@@ -95,7 +95,7 @@ static size_t zone_free_bytes(vm_zone_t *z)
return free_bytes;
}
void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
{
memset(z, 0x0, sizeof *z);
memcpy(&z->z_info, zone_info, sizeof *zone_info);
@@ -108,7 +108,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
int this_page_reserved = 0, last_page_reserved = -1;
phys_addr_t plimit = 0;
memblock_iter_t it;
struct memblock_iter it;
for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
if (it.it_limit + 1 > plimit) {
plimit = it.it_limit + 1;
@@ -121,7 +121,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
size_t nr_pages_found = 0;
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
vm_page_t *pg = vm_page_get(i);
struct vm_page *pg = vm_page_get(i);
if (pg) {
nr_pages_found++;
@@ -162,7 +162,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str);
}
static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
{
if (!queue_empty(&z->z_free_pages[order])) {
/* we already have pages available. */
@@ -175,9 +175,9 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
}
/* the lowest page order that is >= `order` and still has pages available */
vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS;
enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS;
for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) {
for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) {
if (!queue_empty(&z->z_free_pages[i])) {
first_order_with_free = i;
break;
@@ -197,11 +197,11 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
/* starting from the first page list with free pages,
take a page, split it in half, and add the sub-pages
to the next order's free list. */
for (vm_page_order_t i = first_order_with_free; i > order; i--) {
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[i]);
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
for (enum vm_page_order i = first_order_with_free; i > order; i--) {
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[i]);
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
vm_page_t *a, *b;
struct vm_page *a, *b;
vm_page_split(pg, &a, &b);
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
@@ -211,7 +211,7 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
return 0;
}
vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags)
struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags)
{
unsigned long irq_flags;
spin_lock_irqsave(&z->z_lock, &irq_flags);
@@ -222,8 +222,8 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl
return NULL;
}
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]);
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
vm_page_foreach (pg, i) {
i->p_flags |= VM_PAGE_ALLOC;
}
@@ -232,7 +232,7 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl
return pg;
}
void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg)
{
unsigned long irq_flags;
spin_lock_irqsave(&z->z_lock, &irq_flags);
@@ -241,8 +241,8 @@ void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
while (1) {
vm_page_t *buddy = vm_page_get_buddy(pg);
vm_page_t *huge = vm_page_merge(pg, buddy);
struct vm_page *buddy = vm_page_get_buddy(pg);
struct vm_page *huge = vm_page_merge(pg, buddy);
if (!huge) {
break;
}