kernel: don't use typedef for enums or non-opaque structs

This commit is contained in:
2023-04-12 20:17:11 +01:00
parent 0d75e347e9
commit b6f8c1ccaa
51 changed files with 663 additions and 665 deletions

View File

@@ -24,7 +24,7 @@ static size_t page_order_bytes[] = {
[VM_PAGE_128M] = 0x8000000,
/* vm can support pages of this size, but
vm_page_t only has 4 bits with which to store
struct vm_page only has 4 bits with which to store
the page order, which cannot accomodate these
larger order numbers */
[VM_PAGE_256M] = 0x10000000,
@@ -63,7 +63,7 @@ void *vm_phys_to_virt(phys_addr_t p)
return (void *)(VM_PAGEMAP_BASE + p);
}
vm_page_t *vm_page_get(phys_addr_t addr)
struct vm_page *vm_page_get(phys_addr_t addr)
{
switch (vm_memory_model()) {
case VM_MODEL_FLAT:
@@ -75,17 +75,17 @@ vm_page_t *vm_page_get(phys_addr_t addr)
}
}
phys_addr_t vm_page_get_paddr(vm_page_t *pg)
phys_addr_t vm_page_get_paddr(struct vm_page *pg)
{
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
}
void *vm_page_get_vaddr(vm_page_t *pg)
void *vm_page_get_vaddr(struct vm_page *pg)
{
return (void *)(vm_phys_to_virt(vm_page_get_pfn(pg) * VM_PAGE_SIZE));
}
size_t vm_page_get_pfn(vm_page_t *pg)
size_t vm_page_get_pfn(struct vm_page *pg)
{
switch (vm_memory_model()) {
case VM_MODEL_FLAT:
@@ -97,7 +97,7 @@ size_t vm_page_get_pfn(vm_page_t *pg)
}
}
size_t vm_page_order_to_bytes(vm_page_order_t order)
size_t vm_page_order_to_bytes(enum vm_page_order order)
{
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
return 0;
@@ -106,7 +106,7 @@ size_t vm_page_order_to_bytes(vm_page_order_t order)
return page_order_bytes[order];
}
phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
phys_addr_t vm_page_order_to_pages(enum vm_page_order order)
{
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
return 0;
@@ -115,7 +115,7 @@ phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
return page_order_bytes[order] >> VM_PAGE_SHIFT;
}
vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order)
vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order)
{
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
return 0;
@@ -136,9 +136,9 @@ size_t vm_bytes_to_pages(size_t bytes)
return bytes;
}
vm_zone_t *vm_page_get_zone(vm_page_t *pg)
struct vm_zone *vm_page_get_zone(struct vm_page *pg)
{
vm_pg_data_t *node = vm_pg_data_get(pg->p_node);
struct vm_pg_data *node = vm_pg_data_get(pg->p_node);
if (!node) {
return 0;
}
@@ -151,19 +151,19 @@ vm_zone_t *vm_page_get_zone(vm_page_t *pg)
}
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags)
{
/* TODO prefer nodes closer to us */
vm_pg_data_t *node = vm_pg_data_get(0);
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
struct vm_pg_data *node = vm_pg_data_get(0);
enum vm_zone_id zone_id = VM_ZONE_HIGHMEM;
if (flags & VM_GET_DMA) {
zone_id = VM_ZONE_DMA;
}
while (1) {
vm_zone_t *z = &node->pg_zones[zone_id];
struct vm_zone *z = &node->pg_zones[zone_id];
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
struct vm_page *pg = vm_zone_alloc_page(z, order, flags);
if (pg) {
return pg;
}
@@ -178,9 +178,9 @@ vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
return NULL;
}
void vm_page_free(vm_page_t *pg)
void vm_page_free(struct vm_page *pg)
{
vm_zone_t *z = vm_page_get_zone(pg);
struct vm_zone *z = vm_page_get_zone(pg);
if (!z) {
return;
}
@@ -188,7 +188,7 @@ void vm_page_free(vm_page_t *pg)
vm_zone_free_page(z, pg);
}
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b)
{
if (pg->p_order == VM_PAGE_MIN_ORDER) {
return -1;
@@ -202,7 +202,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
pg[i].p_order--;
}
vm_page_t *buddy = vm_page_get_buddy(pg);
struct vm_page *buddy = vm_page_get_buddy(pg);
if (pg->p_order == VM_PAGE_MIN_ORDER) {
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
@@ -218,7 +218,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
return 0;
}
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b)
{
if (a->p_order != b->p_order) {
return NULL;
@@ -238,7 +238,7 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
/* make sure that a comes before b */
if (a > b) {
vm_page_t *tmp = a;
struct vm_page *tmp = a;
a = b;
b = tmp;
}
@@ -260,16 +260,16 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
return a;
}
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
struct vm_page *vm_page_get_buddy(struct vm_page *pg)
{
phys_addr_t paddr = vm_page_get_paddr(pg);
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
return vm_page_get(paddr);
}
vm_page_t *vm_page_get_next_tail(vm_page_t *pg)
struct vm_page *vm_page_get_next_tail(struct vm_page *pg)
{
vm_page_t *next = pg + 1;
struct vm_page *next = pg + 1;
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
return NULL;
}