From b6f8c1ccaacef0963b469fbb5ecd361ddcd278e4 Mon Sep 17 00:00:00 2001 From: Max Wash Date: Wed, 12 Apr 2023 20:17:11 +0100 Subject: [PATCH] kernel: don't use typedef for enums or non-opaque structs --- arch/user/init.c | 2 +- arch/user/stdcon.c | 4 +- arch/x86_64/acpi/apic.cpp | 2 +- arch/x86_64/include/arch/acpi/io_apic.hpp | 2 +- arch/x86_64/include/arch/irq.h | 14 +- arch/x86_64/include/arch/paging.h | 20 +-- arch/x86_64/init.c | 4 +- arch/x86_64/irq.c | 14 +- arch/x86_64/pit.c | 2 +- arch/x86_64/pmap.c | 28 ++-- arch/x86_64/vgacon.c | 4 +- dev/core.c | 4 +- ds/btree.c | 88 +++++------ ds/queue.c | 32 ++-- include/socks/btree.h | 96 ++++++------ include/socks/console.h | 22 +-- include/socks/cpu.h | 18 +-- include/socks/device.h | 4 +- include/socks/kext.h | 2 +- include/socks/memblock.h | 48 +++--- include/socks/object.h | 84 ++++++----- include/socks/pmap.h | 8 +- include/socks/printk.h | 2 +- include/socks/queue.h | 46 +++--- include/socks/sched.h | 82 +++++------ include/socks/tty.h | 46 +++--- include/socks/vm.h | 172 +++++++++++----------- kernel/console.c | 12 +- kernel/cpu.c | 10 +- kernel/panic.c | 4 +- kernel/printk.c | 4 +- kernel/tty.c | 4 +- kxld/internal.c | 4 +- kxld/kext.c | 22 +-- obj/namespace.c | 36 ++--- obj/object.c | 34 ++--- obj/set.c | 26 ++-- sched/core.c | 4 +- sched/runqueue.c | 2 +- sched/task.c | 30 ++-- sched/thread.c | 18 +-- test/obj.c | 10 +- vm/bootstrap.c | 10 +- vm/cache.c | 48 +++--- vm/flat.c | 10 +- vm/kmalloc.c | 10 +- vm/memblock.c | 42 +++--- vm/model.c | 6 +- vm/page.c | 48 +++--- vm/sparse.c | 36 ++--- vm/zone.c | 48 +++--- 51 files changed, 663 insertions(+), 665 deletions(-) diff --git a/arch/user/init.c b/arch/user/init.c index f4a2e43..cc521c2 100644 --- a/arch/user/init.c +++ b/arch/user/init.c @@ -48,7 +48,7 @@ int ml_init(uintptr_t arg) memblock_add(0, PMEM_SIZE); - vm_zone_descriptor_t vm_zones[] = { + struct vm_zone_descriptor vm_zones[] = { { .zd_id = VM_ZONE_DMA, .zd_node = 0, .zd_name = "dma", .zd_base = 0x00, .zd_limit = 0xffffff }, { .zd_id = VM_ZONE_NORMAL, .zd_node = 0, .zd_name = "normal", .zd_base = 0x1000000, .zd_limit = UINTPTR_MAX }, }; diff --git a/arch/user/stdcon.c b/arch/user/stdcon.c index d71373e..f548d64 100644 --- a/arch/user/stdcon.c +++ b/arch/user/stdcon.c @@ -6,14 +6,14 @@ #include #include -static void stdcon_write(console_t *con, const char *s, unsigned int len) +static void stdcon_write(struct console *con, const char *s, unsigned int len) { for (unsigned int i = 0; i < len; i++) { fputc(s[i], stdout); } } -static console_t stdcon = { +static struct console stdcon = { .c_name = "stdcon", .c_flags = CON_BOOT, .c_write = stdcon_write, diff --git a/arch/x86_64/acpi/apic.cpp b/arch/x86_64/acpi/apic.cpp index cc43982..19d247f 100644 --- a/arch/x86_64/acpi/apic.cpp +++ b/arch/x86_64/acpi/apic.cpp @@ -20,7 +20,7 @@ static int apic_enabled = 0; using namespace arch::acpi; static uint32_t *lapic_base; -static queue_t io_apics; +static struct queue io_apics; extern "C" { /* defined in apic_ctrl.S */ diff --git a/arch/x86_64/include/arch/acpi/io_apic.hpp b/arch/x86_64/include/arch/acpi/io_apic.hpp index 68a58d8..056882b 100644 --- a/arch/x86_64/include/arch/acpi/io_apic.hpp +++ b/arch/x86_64/include/arch/acpi/io_apic.hpp @@ -10,7 +10,7 @@ namespace arch::acpi { uint32_t *io_base = nullptr; unsigned int io_first_irq = 0; unsigned int io_nr_irq = 0; - queue_entry_t io_entry; + struct queue_entry io_entry; struct irq_entry { uint64_t irq_vec : 8; diff --git a/arch/x86_64/include/arch/irq.h b/arch/x86_64/include/arch/irq.h index 056ccde..4a3b36a 100644 --- a/arch/x86_64/include/arch/irq.h +++ b/arch/x86_64/include/arch/irq.h @@ -11,7 +11,7 @@ extern "C" { #define NR_IDT_ENTRIES 48 -typedef enum irq_vector { +enum irq_vector { IRQ0 = 32, IRQ1, IRQ2, @@ -28,12 +28,12 @@ typedef enum irq_vector { IRQ13, IRQ14, IRQ15, -} irq_vector_t; +}; -typedef struct irq_hook { - queue_entry_t irq_entry; +struct irq_hook { + struct queue_entry irq_entry; int (*irq_callback)(void); -} irq_hook_t; +}; struct cpu_context { uint64_t r15, r14, r13, r12, r11, r10, r9, r8; @@ -69,8 +69,8 @@ typedef void (*int_hook)(struct cpu_context *); extern int idt_init(struct idt_ptr *idtp); extern int idt_load(struct idt_ptr *idtp); -extern void hook_irq(irq_vector_t vec, irq_hook_t *hook); -extern void unhook_irq(irq_vector_t vec, irq_hook_t *hook); +extern void hook_irq(enum irq_vector vec, struct irq_hook *hook); +extern void unhook_irq(enum irq_vector vec, struct irq_hook *hook); #ifdef __cplusplus } diff --git a/arch/x86_64/include/arch/paging.h b/arch/x86_64/include/arch/paging.h index 1ce6ca4..d9c1ca7 100644 --- a/arch/x86_64/include/arch/paging.h +++ b/arch/x86_64/include/arch/paging.h @@ -19,37 +19,37 @@ extern "C" { typedef phys_addr_t pml4t_ptr_t; typedef uint64_t pte_t; -typedef struct pml4t { +struct pml4t { phys_addr_t p_entries[512]; -} __packed pml4t_t; +} __packed; -typedef struct pdpt { +struct pdpt { union { /* 4KiB and 2MiB pages */ phys_addr_t p_entries[512]; /* 1GiB pages */ pte_t p_pages[512]; }; -} __packed pdpt_t; +} __packed; -typedef struct pdir { +struct pdir { union { /* 4KiB pages */ phys_addr_t p_entries[512]; /* 2MiB pages */ pte_t p_pages[512]; }; -} __packed pdir_t; +} __packed; -typedef struct ptab { +struct ptab { pte_t p_pages[512]; -} __packed ptab_t; +} __packed; -typedef enum page_size { +enum page_size { PS_4K, PS_2M, PS_1G, -} page_size_t; +}; /* returns 1 if gigabyte pages are supported by the CPU, 0 otherwise. defined in pmap_ctrl.S */ diff --git a/arch/x86_64/init.c b/arch/x86_64/init.c index 13e717d..5d5f579 100644 --- a/arch/x86_64/init.c +++ b/arch/x86_64/init.c @@ -58,13 +58,13 @@ int ml_init(uintptr_t arg) acpi_scan_cpu_topology(); init_per_cpu_areas(); - cpu_data_t *this_cpu = get_this_cpu(); + struct cpu_data *this_cpu = get_this_cpu(); this_cpu->c_flags = CPU_ONLINE; this_cpu->c_id = this_cpu(); g_bootstrap_cpu.c_data = this_cpu; put_cpu(this_cpu); - vm_zone_descriptor_t vm_zones[] = { + struct vm_zone_descriptor vm_zones[] = { { .zd_id = VM_ZONE_DMA, .zd_node = 0, .zd_name = "dma", .zd_base = 0x00, .zd_limit = 0xffffff }, { .zd_id = VM_ZONE_NORMAL, .zd_node = 0, .zd_name = "normal", .zd_base = 0x1000000, .zd_limit = UINTPTR_MAX }, }; diff --git a/arch/x86_64/irq.c b/arch/x86_64/irq.c index d0e3379..633d6a2 100644 --- a/arch/x86_64/irq.c +++ b/arch/x86_64/irq.c @@ -64,7 +64,7 @@ extern void syscall_gate(); extern uintptr_t pf_faultptr(void); static int_hook isr_handlers[NR_IDT_ENTRIES]; -static queue_t irq_hooks[32]; +static struct queue irq_hooks[32]; static struct idt idt; static int idt_initialised = 0; @@ -233,8 +233,8 @@ void isr_dispatch(struct cpu_context *regs) void irq_dispatch(struct cpu_context *regs) { irq_ack(regs->int_no); - queue_t *hooks = &irq_hooks[regs->int_no - IRQ0]; - queue_foreach(irq_hook_t, hook, hooks, irq_entry) { + struct queue *hooks = &irq_hooks[regs->int_no - IRQ0]; + queue_foreach(struct irq_hook, hook, hooks, irq_entry) { hook->irq_callback(); } } @@ -244,14 +244,14 @@ void syscall_dispatch(struct cpu_context *regs) } -void hook_irq(irq_vector_t vec, irq_hook_t *hook) +void hook_irq(enum irq_vector vec, struct irq_hook *hook) { - queue_t *hook_queue = &irq_hooks[vec - IRQ0]; + struct queue *hook_queue = &irq_hooks[vec - IRQ0]; queue_push_back(hook_queue, &hook->irq_entry); } -void unhook_irq(irq_vector_t vec, irq_hook_t *hook) +void unhook_irq(enum irq_vector vec, struct irq_hook *hook) { - queue_t *hook_queue = &irq_hooks[vec - IRQ0]; + struct queue *hook_queue = &irq_hooks[vec - IRQ0]; queue_delete(hook_queue, &hook->irq_entry); } diff --git a/arch/x86_64/pit.c b/arch/x86_64/pit.c index d8e5cd8..311c03c 100644 --- a/arch/x86_64/pit.c +++ b/arch/x86_64/pit.c @@ -11,7 +11,7 @@ static int pit_callback(void) return 0; } -static irq_hook_t pit_irq_hook = { +static struct irq_hook pit_irq_hook = { .irq_callback = pit_callback }; diff --git a/arch/x86_64/pmap.c b/arch/x86_64/pmap.c index 8851ecf..6d5dca9 100644 --- a/arch/x86_64/pmap.c +++ b/arch/x86_64/pmap.c @@ -19,7 +19,7 @@ static int can_use_gbpages = 0; static pmap_t kernel_pmap; -static size_t ps_size(page_size_t ps) +static size_t ps_size(enum page_size ps) { switch (ps) { case PS_4K: @@ -35,11 +35,11 @@ static size_t ps_size(page_size_t ps) static pmap_t alloc_pmap() { - pml4t_t *p = kzalloc(sizeof *p, 0); + struct pml4t *p = kzalloc(sizeof *p, 0); return vm_virt_to_phys(p); } -static pte_t make_pte(pfn_t pfn, vm_prot_t prot, page_size_t size) +static pte_t make_pte(pfn_t pfn, enum vm_prot prot, enum page_size size) { pte_t v = pfn; @@ -95,7 +95,7 @@ static void delete_ptab(phys_addr_t pt) return; } - ptab_t *ptab = vm_phys_to_virt(pt); + struct ptab *ptab = vm_phys_to_virt(pt); kfree(ptab); } @@ -112,7 +112,7 @@ static void delete_pdir(phys_addr_t pd) return; } - pdir_t *pdir = vm_phys_to_virt(pd); + struct pdir *pdir = vm_phys_to_virt(pd); for (int i = 0; i < 512; i++) { if (pdir->p_pages[i] & PTE_PAGESIZE) { /* this is a hugepage, there is nothing to delete */ @@ -125,7 +125,7 @@ static void delete_pdir(phys_addr_t pd) kfree(pdir); } -static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot, page_size_t size) +static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum page_size size) { uintptr_t pv = (uintptr_t)p; unsigned int @@ -158,13 +158,13 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot } /* 1. get PML4T (mandatory) */ - pml4t_t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap)); + struct pml4t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap)); if (!pml4t) { return KERN_INVALID_ARGUMENT; } /* 2. traverse PML4T, get PDPT (mandatory) */ - pdpt_t *pdpt = NULL; + struct pdpt *pdpt = NULL; if (!pml4t->p_entries[pml4t_index]) { pdpt = kzalloc(sizeof *pdpt, 0); pml4t->p_entries[pml4t_index] = PTR_TO_ENTRY(vm_virt_to_phys(pdpt)); @@ -187,7 +187,7 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot /* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */ - pdir_t *pdir = NULL; + struct pdir *pdir = NULL; if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) { /* entry is null, or points to a hugepage */ pdir = kzalloc(sizeof *pdir, 0); @@ -209,7 +209,7 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot } /* 4. traverse PDIR, get PTAB (optional, 4K only) */ - ptab_t *ptab = NULL; + struct ptab *ptab = NULL; if (!pdir->p_entries[pd_index] || pdir->p_pages[pd_index] & PTE_PAGESIZE) { /* entry is null, or points to a hugepage */ ptab = kzalloc(sizeof *ptab, 0); @@ -234,7 +234,7 @@ void pmap_bootstrap(void) enable_nx(); printk("pmap: NX protection enabled"); - page_size_t hugepage = PS_2M; + enum page_size hugepage = PS_2M; if (can_use_gbpages) { hugepage = PS_1G; } @@ -255,7 +255,7 @@ void pmap_bootstrap(void) } phys_addr_t pmem_limit = 0x0; - memblock_iter_t it; + struct memblock_iter it; for_each_mem_range(&it, 0x00, UINTPTR_MAX) { if (it.it_limit > pmem_limit) { pmem_limit = it.it_limit; @@ -283,12 +283,12 @@ void pmap_destroy(pmap_t pmap) } -kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot, pmap_flags_t flags) +kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum pmap_flags flags) { return KERN_OK; } -kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, vm_prot_t prot, pmap_flags_t flags) +kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, enum vm_prot prot, enum pmap_flags flags) { return KERN_OK; } diff --git a/arch/x86_64/vgacon.c b/arch/x86_64/vgacon.c index d511540..5c2a43b 100644 --- a/arch/x86_64/vgacon.c +++ b/arch/x86_64/vgacon.c @@ -101,14 +101,14 @@ static void vgacon_putchar(int c) move_vga_cursor(g_console_cursor_xpos, g_console_cursor_ypos); } -static void vgacon_write(console_t *con, const char *s, unsigned int len) +static void vgacon_write(struct console *con, const char *s, unsigned int len) { for (unsigned int i = 0; i < len; i++) { vgacon_putchar(s[i]); } } -static console_t vgacon = { +static struct console vgacon = { .c_name = "vgacon", .c_flags = CON_BOOT, .c_write = vgacon_write, diff --git a/dev/core.c b/dev/core.c index c82aa68..a8d34a6 100644 --- a/dev/core.c +++ b/dev/core.c @@ -3,7 +3,7 @@ #include static struct device *root_device = NULL; -static object_type_t device_type = { +static struct object_type device_type = { .ob_name = "device", .ob_size = sizeof(struct device), .ob_ops = { @@ -31,7 +31,7 @@ kern_status_t set_root_device(struct device *dev) struct device *device_alloc(void) { - object_t *dev_object = object_create(&device_type); + struct object *dev_object = object_create(&device_type); if (!dev_object) { return NULL; } diff --git a/ds/btree.c b/ds/btree.c index ede4267..f9a048e 100644 --- a/ds/btree.c +++ b/ds/btree.c @@ -50,7 +50,7 @@ this file intentionally excludes any kind of search function implementation. it is up to the programmer to implement their own tree node type - using btree_node_t, and their own search function using btree_t. + using struct btree_node, and their own search function using struct btree. this allows the programmer to define their own node types with complex non-integer key types. btree.h contains a number of macros to help define these functions. the macros do all the work, you just have to @@ -75,12 +75,12 @@ #define HEIGHT(x) ((x) ? (x)->b_height : 0) -static inline void update_height(btree_node_t *x) +static inline void update_height(struct btree_node *x) { x->b_height = MAX(HEIGHT(x->b_left), HEIGHT((x->b_right))) + 1; } -static inline int bf(btree_node_t *x) +static inline int bf(struct btree_node *x) { int bf = 0; @@ -125,11 +125,11 @@ static inline int bf(btree_node_t *x) note that this function does NOT update b_height for the rotated nodes. it is up to you to call update_height_to_root(). */ -static void rotate_left(btree_t *tree, btree_node_t *x) +static void rotate_left(struct btree *tree, struct btree_node *x) { - btree_node_t *y = x->b_right; + struct btree_node *y = x->b_right; - btree_node_t *p = x->b_parent; + struct btree_node *p = x->b_parent; if (y->b_left) { y->b_left->b_parent = x; @@ -150,7 +150,7 @@ static void rotate_left(btree_t *tree, btree_node_t *x) y->b_parent = p; } -static void update_height_to_root(btree_node_t *x) +static void update_height_to_root(struct btree_node *x) { while (x) { update_height(x); @@ -184,11 +184,11 @@ static void update_height_to_root(btree_node_t *x) note that this function does NOT update b_height for the rotated nodes. it is up to you to call update_height_to_root(). */ -static void rotate_right(btree_t *tree, btree_node_t *y) +static void rotate_right(struct btree *tree, struct btree_node *y) { - btree_node_t *x = y->b_left; + struct btree_node *x = y->b_left; - btree_node_t *p = y->b_parent; + struct btree_node *p = y->b_parent; if (x->b_right) { x->b_right->b_parent = y; @@ -236,10 +236,10 @@ static void rotate_right(btree_t *tree, btree_node_t *y) DOES update b_height for the rotated nodes (since it needs to be done in a certain order). */ -static void rotate_double_left(btree_t *tree, btree_node_t *z) +static void rotate_double_left(struct btree *tree, struct btree_node *z) { - btree_node_t *x = z->b_right; - btree_node_t *y = x->b_left; + struct btree_node *x = z->b_right; + struct btree_node *y = x->b_left; rotate_right(tree, x); rotate_left(tree, z); @@ -280,10 +280,10 @@ static void rotate_double_left(btree_t *tree, btree_node_t *z) DOES update b_height for the rotated nodes (since it needs to be done in a certain order). */ -static void rotate_double_right(btree_t *tree, btree_node_t *z) +static void rotate_double_right(struct btree *tree, struct btree_node *z) { - btree_node_t *x = z->b_left; - btree_node_t *y = x->b_right; + struct btree_node *x = z->b_left; + struct btree_node *y = x->b_right; rotate_left(tree, x); rotate_right(tree, z); @@ -309,9 +309,9 @@ static void rotate_double_right(btree_t *tree, btree_node_t *z) @param w the node that was just inserted into the tree */ -static void insert_fixup(btree_t *tree, btree_node_t *w) +static void insert_fixup(struct btree *tree, struct btree_node *w) { - btree_node_t *z = NULL, *y = NULL, *x = NULL; + struct btree_node *z = NULL, *y = NULL, *x = NULL; z = w; while (z) { @@ -360,9 +360,9 @@ next_ancestor: - the node that replaced the node that was deleted, if the node that was deleted had one child. */ -static void delete_fixup(btree_t *tree, btree_node_t *w) +static void delete_fixup(struct btree *tree, struct btree_node *w) { - btree_node_t *z = w; + struct btree_node *z = w; while (z) { if (bf(z) > 1) { @@ -390,11 +390,11 @@ static void delete_fixup(btree_t *tree, btree_node_t *w) @param node the node that was just inserted into the tree. */ -void btree_insert_fixup(btree_t *tree, btree_node_t *node) +void btree_insert_fixup(struct btree *tree, struct btree_node *node) { node->b_height = 0; - btree_node_t *cur = node; + struct btree_node *cur = node; while (cur) { update_height(cur); cur = cur->b_parent; @@ -412,10 +412,10 @@ void btree_insert_fixup(btree_t *tree, btree_node_t *node) @param node the node to delete. */ -static btree_node_t *remove_node_with_no_children(btree_t *tree, btree_node_t *node) +static struct btree_node *remove_node_with_no_children(struct btree *tree, struct btree_node *node) { - btree_node_t *w = node->b_parent; - btree_node_t *p = node->b_parent; + struct btree_node *w = node->b_parent; + struct btree_node *p = node->b_parent; node->b_parent = NULL; if (!p) { @@ -444,10 +444,10 @@ static btree_node_t *remove_node_with_no_children(btree_t *tree, btree_node_t *n @param node the node to delete. */ -static btree_node_t *replace_node_with_one_subtree(btree_t *tree, btree_node_t *node) +static struct btree_node *replace_node_with_one_subtree(struct btree *tree, struct btree_node *node) { - btree_node_t *p = node->b_parent; - btree_node_t *z = NULL; + struct btree_node *p = node->b_parent; + struct btree_node *z = NULL; if (HAS_LEFT_CHILD(node)) { z = node->b_left; @@ -455,7 +455,7 @@ static btree_node_t *replace_node_with_one_subtree(btree_t *tree, btree_node_t * z = node->b_right; } - btree_node_t *w = z; + struct btree_node *w = z; if (!p) { tree->b_root = z; } else if (IS_LEFT_CHILD(p, node)) { @@ -491,20 +491,20 @@ static btree_node_t *replace_node_with_one_subtree(btree_t *tree, btree_node_t * @param z the node to delete. */ -static btree_node_t *replace_node_with_two_subtrees(btree_t *tree, btree_node_t *z) +static struct btree_node *replace_node_with_two_subtrees(struct btree *tree, struct btree_node *z) { /* x will replace z */ - btree_node_t *x = z->b_left; + struct btree_node *x = z->b_left; while (x->b_right) { x = x->b_right; } /* y is the node that will replace x (if x has a left child) */ - btree_node_t *y = x->b_left; + struct btree_node *y = x->b_left; /* w is the starting point for the height update and fixup */ - btree_node_t *w = x; + struct btree_node *w = x; if (w->b_parent != z) { w = w->b_parent; } @@ -545,7 +545,7 @@ static btree_node_t *replace_node_with_two_subtrees(btree_t *tree, btree_node_t tree->b_root = x; } - btree_node_t *cur = w; + struct btree_node *cur = w; while (cur) { update_height(cur); cur = cur->b_parent; @@ -555,9 +555,9 @@ static btree_node_t *replace_node_with_two_subtrees(btree_t *tree, btree_node_t } /* delete a node from the tree and re-balance it afterwards */ -void btree_delete(btree_t *tree, btree_node_t *node) +void btree_delete(struct btree *tree, struct btree_node *node) { - btree_node_t *w = NULL; + struct btree_node *w = NULL; if (HAS_NO_CHILDREN(node)) { w = remove_node_with_no_children(tree, node); @@ -574,11 +574,11 @@ void btree_delete(btree_t *tree, btree_node_t *node) node->b_left = node->b_right = node->b_parent = NULL; } -btree_node_t *btree_first(btree_t *tree) +struct btree_node *btree_first(struct btree *tree) { /* the first node in the tree is the node with the smallest key. we keep moving left until we can't go any further */ - btree_node_t *cur = tree->b_root; + struct btree_node *cur = tree->b_root; if (!cur) { return NULL; } @@ -590,11 +590,11 @@ btree_node_t *btree_first(btree_t *tree) return cur; } -btree_node_t *btree_last(btree_t *tree) +struct btree_node *btree_last(struct btree *tree) { /* the first node in the tree is the node with the largest key. we keep moving right until we can't go any further */ - btree_node_t *cur = tree->b_root; + struct btree_node *cur = tree->b_root; if (!cur) { return NULL; } @@ -606,7 +606,7 @@ btree_node_t *btree_last(btree_t *tree) return cur; } -btree_node_t *btree_next(btree_node_t *node) +struct btree_node *btree_next(struct btree_node *node) { if (!node) { return NULL; @@ -624,7 +624,7 @@ btree_node_t *btree_next(btree_node_t *node) if (node->b_right) { /* case 1: step into `node`'s right sub-tree and keep going left to find the smallest node */ - btree_node_t *cur = node->b_right; + struct btree_node *cur = node->b_right; while (cur->b_left) { cur = cur->b_left; } @@ -643,7 +643,7 @@ btree_node_t *btree_next(btree_node_t *node) return node->b_parent; } -btree_node_t *btree_prev(btree_node_t *node) +struct btree_node *btree_prev(struct btree_node *node) { if (!node) { return NULL; @@ -661,7 +661,7 @@ btree_node_t *btree_prev(btree_node_t *node) if (node->b_left) { /* case 1: step into `node`'s left sub-tree and keep going right to find the largest node */ - btree_node_t *cur = node->b_left; + struct btree_node *cur = node->b_left; while (cur->b_right) { cur = cur->b_right; } diff --git a/ds/queue.c b/ds/queue.c index 4d57bac..7e784f5 100644 --- a/ds/queue.c +++ b/ds/queue.c @@ -1,9 +1,9 @@ #include -size_t queue_length(queue_t *q) +size_t queue_length(struct queue *q) { size_t i = 0; - queue_entry_t *x = q->q_first; + struct queue_entry *x = q->q_first; while (x) { i++; x = x->qe_next; @@ -12,9 +12,9 @@ size_t queue_length(queue_t *q) return i; } -void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before) +void queue_insert_before(struct queue *q, struct queue_entry *entry, struct queue_entry *before) { - queue_entry_t *x = before->qe_prev; + struct queue_entry *x = before->qe_prev; if (x) { x->qe_next = entry; } else { @@ -27,9 +27,9 @@ void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before entry->qe_next = before; } -void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after) +void queue_insert_after(struct queue *q, struct queue_entry *entry, struct queue_entry *after) { - queue_entry_t *x = after->qe_next; + struct queue_entry *x = after->qe_next; if (x) { x->qe_prev = entry; } else { @@ -42,7 +42,7 @@ void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after) entry->qe_prev = after; } -void queue_push_front(queue_t *q, queue_entry_t *entry) +void queue_push_front(struct queue *q, struct queue_entry *entry) { if (q->q_first) { q->q_first->qe_prev = entry; @@ -58,7 +58,7 @@ void queue_push_front(queue_t *q, queue_entry_t *entry) } } -void queue_push_back(queue_t *q, queue_entry_t *entry) +void queue_push_back(struct queue *q, struct queue_entry *entry) { if (q->q_last) { q->q_last->qe_next = entry; @@ -74,9 +74,9 @@ void queue_push_back(queue_t *q, queue_entry_t *entry) } } -queue_entry_t *queue_pop_front(queue_t *q) +struct queue_entry *queue_pop_front(struct queue *q) { - queue_entry_t *x = q->q_first; + struct queue_entry *x = q->q_first; if (x) { queue_delete(q, x); } @@ -84,9 +84,9 @@ queue_entry_t *queue_pop_front(queue_t *q) return x; } -queue_entry_t *queue_pop_back(queue_t *q) +struct queue_entry *queue_pop_back(struct queue *q) { - queue_entry_t *x = q->q_last; + struct queue_entry *x = q->q_last; if (x) { queue_delete(q, x); } @@ -94,7 +94,7 @@ queue_entry_t *queue_pop_back(queue_t *q) return x; } -void queue_delete(queue_t *q, queue_entry_t *entry) +void queue_delete(struct queue *q, struct queue_entry *entry) { if (!entry) { return; @@ -119,11 +119,11 @@ void queue_delete(queue_t *q, queue_entry_t *entry) entry->qe_next = entry->qe_prev = NULL; } -void queue_delete_all(queue_t *q) +void queue_delete_all(struct queue *q) { - queue_entry_t *x = q->q_first; + struct queue_entry *x = q->q_first; while (x) { - queue_entry_t *next = x->qe_next; + struct queue_entry *next = x->qe_next; x->qe_next = x->qe_prev = NULL; x = next; } diff --git a/include/socks/btree.h b/include/socks/btree.h index 1367ca0..2593074 100644 --- a/include/socks/btree.h +++ b/include/socks/btree.h @@ -29,12 +29,12 @@ extern "C" { #endif -/* if your custom structure contains a btree_node_t (i.e. it can be part of a btree), - you can use this macro to convert a btree_node_t* to a your_type* +/* if your custom structure contains a struct btree_node (i.e. it can be part of a btree), + you can use this macro to convert a struct btree_node* to a your_type* @param t the name of your custom type (something that can be passed to offsetof) - @param m the name of the btree_node_t member variable within your custom type. - @param v the btree_node_t pointer that you wish to convert. if this is NULL, NULL will be returned. + @param m the name of the struct btree_node member variable within your custom type. + @param v the struct btree_node pointer that you wish to convert. if this is NULL, NULL will be returned. */ #define BTREE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0)) @@ -46,7 +46,7 @@ extern "C" { struct my_tree_node { int key; - btree_node_t base; + struct btree_node base; } You would use the following call to generate an insert function for a tree with this node type: @@ -55,15 +55,15 @@ extern "C" { Which would emit a function defined like: - static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node); + static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node); - @param node_type your custom tree node type. usually a structure that contains a btree_node_t member. - @param container_node_member the name of the btree_node_t member variable within your custom type. + @param node_type your custom tree node type. usually a structure that contains a struct btree_node member. + @param container_node_member the name of the struct btree_node member variable within your custom type. @param container_key_member the name of the key member variable within your custom type. @param function_name the name of the function to generate. */ #define BTREE_DEFINE_SIMPLE_INSERT(node_type, container_node_member, container_key_member, function_name) \ - static void function_name(btree_t *tree, node_type *node) \ + static void function_name(struct btree *tree, node_type *node) \ { \ if (!tree->b_root) { \ tree->b_root = &node->container_node_member; \ @@ -71,10 +71,10 @@ extern "C" { return; \ } \ \ - btree_node_t *cur = tree->b_root; \ + struct btree_node *cur = tree->b_root; \ while (1) { \ node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \ - btree_node_t *next = NULL; \ + struct btree_node *next = NULL; \ \ if (node->container_key_member > cur_node->container_key_member) { \ next = btree_right(cur); \ @@ -109,7 +109,7 @@ extern "C" { struct my_tree_node { complex_key_t key; - btree_node_t base; + struct btree_node base; } You would need to define a comparator function or macro with the following signature: @@ -128,17 +128,17 @@ extern "C" { Which would emit a function defined like: - static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node); + static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node); - @param node_type your custom tree node type. usually a structure that contains a btree_node_t member. - @param container_node_member the name of the btree_node_t member variable within your custom type. + @param node_type your custom tree node type. usually a structure that contains a struct btree_node member. + @param container_node_member the name of the struct btree_node member variable within your custom type. @param container_key_member the name of the key member variable within your custom type. @param function_name the name of the function to generate. @param comparator the name of a comparator function or functional-macro that conforms to the requirements listed above. */ #define BTREE_DEFINE_INSERT(node_type, container_node_member, container_key_member, function_name, comparator) \ - static void function_name(btree_t *tree, node_type *node) \ + static void function_name(struct btree *tree, node_type *node) \ { \ if (!tree->b_root) { \ tree->b_root = &node->container_node_member; \ @@ -146,10 +146,10 @@ extern "C" { return; \ } \ \ - btree_node_t *cur = tree->b_root; \ + struct btree_node *cur = tree->b_root; \ while (1) { \ node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \ - btree_node_t *next = NULL; \ + struct btree_node *next = NULL; \ int cmp = comparator(node, cur_node); \ \ if (cmp == 1) { \ @@ -184,7 +184,7 @@ extern "C" { struct my_tree_node { int key; - btree_node_t base; + struct btree_node base; } You would use the following call to generate a search function for a tree with this node type: @@ -193,19 +193,19 @@ extern "C" { Which would emit a function defined like: - static struct my_tree_node *my_tree_node_get(btree_t *tree, int key); + static struct my_tree_node *my_tree_node_get(struct btree *tree, int key); - @param node_type your custom tree node type. usually a structure that contains a btree_node_t member. + @param node_type your custom tree node type. usually a structure that contains a struct btree_node member. @param key_type the type name of the key embedded in your custom tree node type. this type must be compatible with the builtin comparison operators. - @param container_node_member the name of the btree_node_t member variable within your custom type. + @param container_node_member the name of the struct btree_node member variable within your custom type. @param container_key_member the name of the key member variable within your custom type. @param function_name the name of the function to generate. */ #define BTREE_DEFINE_SIMPLE_GET(node_type, key_type, container_node_member, container_key_member, function_name) \ -node_type *function_name(btree_t *tree, key_type key) \ +node_type *function_name(struct btree *tree, key_type key) \ { \ - btree_node_t *cur = tree->b_root; \ + struct btree_node *cur = tree->b_root; \ while (cur) { \ node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \ if (key > cur_node->container_key_member) { \ @@ -224,13 +224,13 @@ node_type *function_name(btree_t *tree, key_type key) \ If you have a tree defined like: - btree_t my_tree; + struct btree my_tree; with nodes defined like: struct my_tree_node { int key; - btree_node_t base; + struct btree_node base; } and you want to do something like: @@ -244,7 +244,7 @@ node_type *function_name(btree_t *tree, key_type key) \ @param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer. @param iter_name the name of the iterator variable. @param tree_name a pointer to the tree to traverse. - @param node_member the name of the btree_node_t member variable within the tree node type. + @param node_member the name of the struct btree_node member variable within the tree node type. */ #define btree_foreach(iter_type, iter_name, tree_name, node_member) \ for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_first(tree_name)); \ @@ -255,13 +255,13 @@ node_type *function_name(btree_t *tree, key_type key) \ If you have a tree defined like: - btree_t my_tree; + struct btree my_tree; with nodes defined like: struct my_tree_node { int key; - btree_node_t base; + struct btree_node base; } and you want to do something like: @@ -275,7 +275,7 @@ node_type *function_name(btree_t *tree, key_type key) \ @param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer. @param iter_name the name of the iterator variable. @param tree_name a pointer to the tree to traverse. - @param node_member the name of the btree_node_t member variable within the tree node type. + @param node_member the name of the struct btree_node member variable within the tree node type. */ #define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \ for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \ @@ -283,19 +283,19 @@ node_type *function_name(btree_t *tree, key_type key) \ iter_name = BTREE_CONTAINER(iter_type, node_member, btree_prev(&((iter_name)->node_member)))) /* binary tree nodes. this *cannot* be used directly. you need to define a custom node type - that contains a member variable of type btree_node_t. + that contains a member variable of type struct btree_node. you would then use the supplied macros to define functions to manipulate your custom binary tree. */ -typedef struct btree_node { +struct btree_node { struct btree_node *b_parent, *b_left, *b_right; unsigned short b_height; -} btree_node_t; +}; -/* binary tree. unlike btree_node_t, you can define variables of type btree_t. */ -typedef struct btree { +/* binary tree. unlike struct btree_node, you can define variables of type struct btree. */ +struct btree { struct btree_node *b_root; -} btree_t; +}; /* re-balance a binary tree after an insertion operation. @@ -305,59 +305,59 @@ typedef struct btree { @param tree the tree to re-balance. @param node the node that was just inserted into the tree. */ -extern void btree_insert_fixup(btree_t *tree, btree_node_t *node); +extern void btree_insert_fixup(struct btree *tree, struct btree_node *node); /* delete a node from a binary tree and re-balance the tree afterwards. @param tree the tree to delete from @param node the node to delete. */ -extern void btree_delete(btree_t *tree, btree_node_t *node); +extern void btree_delete(struct btree *tree, struct btree_node *node); /* get the first node in a binary tree. this will be the node with the smallest key (i.e. the node that is furthest-left from the root) */ -extern btree_node_t *btree_first(btree_t *tree); +extern struct btree_node *btree_first(struct btree *tree); /* get the last node in a binary tree. this will be the node with the largest key (i.e. the node that is furthest-right from the root) */ -extern btree_node_t *btree_last(btree_t *tree); +extern struct btree_node *btree_last(struct btree *tree); /* for any binary tree node, this function returns the node with the next-largest key value */ -extern btree_node_t *btree_next(btree_node_t *node); +extern struct btree_node *btree_next(struct btree_node *node); /* for any binary tree node, this function returns the node with the next-smallest key value */ -extern btree_node_t *btree_prev(btree_node_t *node); +extern struct btree_node *btree_prev(struct btree_node *node); /* sets `child` as the immediate left-child of `parent` */ -static inline void btree_put_left(btree_node_t *parent, btree_node_t *child) +static inline void btree_put_left(struct btree_node *parent, struct btree_node *child) { parent->b_left = child; child->b_parent = parent; } /* sets `child` as the immediate right-child of `parent` */ -static inline void btree_put_right(btree_node_t *parent, btree_node_t *child) +static inline void btree_put_right(struct btree_node *parent, struct btree_node *child) { parent->b_right = child; child->b_parent = parent; } /* get the immediate left-child of `node` */ -static inline btree_node_t *btree_left(btree_node_t *node) +static inline struct btree_node *btree_left(struct btree_node *node) { return node->b_left; } /* get the immediate right-child of `node` */ -static inline btree_node_t *btree_right(btree_node_t *node) +static inline struct btree_node *btree_right(struct btree_node *node) { return node->b_right; } /* get the immediate parent of `node` */ -static inline btree_node_t *btree_parent(btree_node_t *node) +static inline struct btree_node *btree_parent(struct btree_node *node) { return node->b_parent; } @@ -369,7 +369,7 @@ static inline btree_node_t *btree_parent(btree_node_t *node) this count includes the node itself, so the height of a leaf node will be 1. */ -static inline unsigned short btree_height(btree_node_t *node) +static inline unsigned short btree_height(struct btree_node *node) { return node->b_height; } diff --git a/include/socks/console.h b/include/socks/console.h index a16aea6..fee6ea3 100644 --- a/include/socks/console.h +++ b/include/socks/console.h @@ -6,7 +6,7 @@ Consoles are like simplified TTYs. Their purpose is to serve as an output sink for messages printed using printk. - a console_t could be used to represent a serial port, UART port, or even + a struct console could be used to represent a serial port, UART port, or even a text-based framebuffer display. Anything where the job of displaying or sending text can be abstracted to a simple write() call. @@ -22,29 +22,29 @@ extern "C" { #endif -typedef enum console_flags { +enum console_flags { /* console is only used during the boot process. the console will be automatically de-registered when the first non-boot console is registered */ CON_BOOT = 0x01u, -} console_flags_t; +}; -typedef struct console { +struct console { char c_name[16]; - console_flags_t c_flags; + enum console_flags c_flags; spin_lock_t c_lock; void (*c_write)(struct console *, const char *, unsigned int); int (*c_read)(struct console *, char *, unsigned int); - queue_entry_t c_list; -} console_t; + struct queue_entry c_list; +}; -extern kern_status_t console_register(console_t *con); -extern kern_status_t console_unregister(console_t *con); +extern kern_status_t console_register(struct console *con); +extern kern_status_t console_unregister(struct console *con); -extern void console_write(console_t *con, const char *s, unsigned int len); -extern int console_read(console_t *con, char *s, unsigned int len); +extern void console_write(struct console *con, const char *s, unsigned int len); +extern int console_read(struct console *con, char *s, unsigned int len); #ifdef __cplusplus } diff --git a/include/socks/cpu.h b/include/socks/cpu.h index a0e8045..33016a6 100644 --- a/include/socks/cpu.h +++ b/include/socks/cpu.h @@ -8,18 +8,18 @@ extern "C" { #endif -typedef enum cpu_flags { +enum cpu_flags { CPU_ONLINE = 0x01u, -} cpu_flags_t; +}; -typedef struct cpu_data { - cpu_flags_t c_flags; +struct cpu_data { + enum cpu_flags c_flags; unsigned int c_id; unsigned int c_preempt_count; - thread_t *c_current_thread; - runqueue_t c_rq; -} cpu_data_t; + struct thread *c_current_thread; + struct runqueue c_rq; +}; /* maximum number of processor cores that the kernel can support. TODO move to build config option */ @@ -27,8 +27,8 @@ typedef struct cpu_data { #define this_cpu() (ml_cpu_block_get_id(ml_this_cpu())) -extern cpu_data_t *get_this_cpu(void); -extern void put_cpu(cpu_data_t *cpu); +extern struct cpu_data *get_this_cpu(void); +extern void put_cpu(struct cpu_data *cpu); extern void cpu_set_available(unsigned int cpu_id); extern void cpu_set_online(unsigned int cpu_id); diff --git a/include/socks/device.h b/include/socks/device.h index 314a2ef..2bcd03c 100644 --- a/include/socks/device.h +++ b/include/socks/device.h @@ -69,8 +69,8 @@ struct bus_device { struct device { enum device_type dev_type; struct device *dev_parent; - queue_t dev_children; - queue_entry_t dev_childent; + struct queue dev_children; + struct queue_entry dev_childent; void *dev_priv; diff --git a/include/socks/kext.h b/include/socks/kext.h index 5b0e42b..1d8a332 100644 --- a/include/socks/kext.h +++ b/include/socks/kext.h @@ -72,7 +72,7 @@ struct kext { enum kext_flags k_flags; char k_ident[KEXT_IDENT_MAX]; uint64_t k_ident_hash; - btree_node_t k_node; + struct btree_node k_node; kern_status_t(*k_online)(struct kext *); kern_status_t(*k_offline)(struct kext *); diff --git a/include/socks/memblock.h b/include/socks/memblock.h index d4a77a1..c87e7f1 100644 --- a/include/socks/memblock.h +++ b/include/socks/memblock.h @@ -45,7 +45,7 @@ extern "C" { this iteration can be optionally constrained to a given region. - @param i the iterator. this should be a pointer of type memblock_iter_t. + @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about the current memory region. @param p_start the lower bound of the memory region to iterate through. @@ -55,14 +55,14 @@ extern "C" { EXAMPLE: to iterate through all memory regions (with no bounds): - memblock_iter_t it; + struct memblock_iter it; for_each_mem_region (&it, 0x0, UINTPTR_MAX) { ... } EXAMPLE: to iterate through all memory regions between physical addresses 0x40000 and 0x80000: - memblock_iter_t it; + struct memblock_iter it; for_each_mem_region (&it, 0x40000, 0x80000) { ... } */ #define for_each_mem_range(i, p_start, p_end) \ @@ -75,7 +75,7 @@ extern "C" { this iteration can be optionally constrained to a given region. - @param i the iterator. this should be a pointer of type memblock_iter_t. + @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about the current memory region. @param p_start the lower bound of the memory region to iterate through. @@ -85,14 +85,14 @@ extern "C" { EXAMPLE: to iterate through all reserved memory regions (with no bounds): - memblock_iter_t it; + struct memblock_iter it; for_each_reserved_mem_region (&it, 0x0, UINTPTR_MAX) { ... } EXAMPLE: to iterate through all reserved memory regions between physical addresses 0x40000 and 0x80000: - memblock_iter_t it; + struct memblock_iter it; for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... } */ #define for_each_reserved_mem_range(i, p_start, p_end) \ @@ -106,7 +106,7 @@ extern "C" { this iteration can be optionally constrained to a given region. - @param i the iterator. this should be a pointer of type memblock_iter_t. + @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about the current memory region. @param p_start the lower bound of the memory region to iterate through. @@ -128,7 +128,7 @@ extern "C" { the following call: - memblock_iter_t it; + struct memblock_iter it; for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) { ... } would iterate through the following sequence of free memory ranges: @@ -143,7 +143,7 @@ extern "C" { typedef uint64_t memblock_index_t; -typedef enum memblock_region_status { +enum memblock_region_status { /* Used in memblock.memory regions, indicates that the memory region exists */ MEMBLOCK_MEMORY = 0, /* Used in memblock.reserved regions, indicates that the memory region was reserved @@ -152,27 +152,27 @@ typedef enum memblock_region_status { /* Used in memblock.reserved regions, indicates that the memory region was reserved * by a call to memblock_reserve() */ MEMBLOCK_RESERVED, -} memblock_region_status_t; +}; -typedef struct memblock_region { +struct memblock_region { /* the status of the memory region (free, reserved, allocated, etc) */ - memblock_region_status_t status; + enum memblock_region_status status; /* the address of the first byte that makes up the region */ phys_addr_t base; /* the address of the last byte that makes up the region */ phys_addr_t limit; -} memblock_region_t; +}; /* buffer of memblock regions, all of which are the same type (memory, reserved, etc) */ -typedef struct memblock_type { +struct memblock_type { struct memblock_region *regions; unsigned int count; unsigned int max; const char *name; -} memblock_type_t; +}; -typedef struct memblock { +struct memblock { /* bounds of the memory region that can be used by memblock_alloc() both of these are virtual addresses */ uintptr_t m_alloc_start, m_alloc_end; @@ -183,19 +183,19 @@ typedef struct memblock { struct memblock_type memory; struct memblock_type reserved; -} memblock_t; +}; -typedef struct memblock_iter { +struct memblock_iter { memblock_index_t __idx; phys_addr_t it_base; phys_addr_t it_limit; - memblock_region_status_t it_status; -} memblock_iter_t; + enum memblock_region_status it_status; +}; /* global memblock state. */ -extern memblock_t memblock; +extern struct memblock memblock; -extern int __next_mem_range(memblock_iter_t *it); +extern int __next_mem_range(struct memblock_iter *it); /* initialise the global memblock state. this function must be called before any other memblock functions can be used. @@ -319,8 +319,8 @@ extern phys_addr_t memblock_virt_to_phys(void *p); */ extern void *memblock_phys_to_virt(phys_addr_t p); -extern void __next_memory_region(memblock_iter_t *it, \ - memblock_type_t *type_a, memblock_type_t *type_b, +extern void __next_memory_region(struct memblock_iter *it, \ + struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t start, phys_addr_t end); #ifdef __cplusplus diff --git a/include/socks/object.h b/include/socks/object.h index fa41bc1..33d67a8 100644 --- a/include/socks/object.h +++ b/include/socks/object.h @@ -16,11 +16,11 @@ extern "C" { struct object; struct object_attrib; -typedef enum object_type_flags { +enum object_type_flags { OBJTYPE_INIT = 0x01u, -} object_type_flags_t; +}; -typedef struct object_ops { +struct object_ops { kern_status_t(*open)(struct object *obj); kern_status_t(*close)(struct object *obj); kern_status_t(*destroy)(struct object *obj); @@ -30,64 +30,62 @@ typedef struct object_ops { kern_status_t(*get_at)(struct object *obj, size_t at, struct object **out); kern_status_t(*read_attrib)(struct object *obj, struct object_attrib *attrib, char *out, size_t max, size_t *r); kern_status_t(*write_attrib)(struct object *obj, struct object_attrib *attrib, const char *s, size_t len, size_t *r); -} object_ops_t; +}; -typedef struct object_attrib { +struct object_attrib { char *a_name; - queue_entry_t a_list; -} object_attrib_t; + struct queue_entry a_list; +}; -typedef struct object_type { - object_type_flags_t ob_flags; +struct object_type { + enum object_type_flags ob_flags; char ob_name[32]; unsigned int ob_size; - vm_cache_t ob_cache; - queue_entry_t ob_list; - queue_t ob_attrib; - object_ops_t ob_ops; -} object_type_t; + struct vm_cache ob_cache; + struct queue_entry ob_list; + struct queue ob_attrib; + struct object_ops ob_ops; +}; -typedef struct object { +struct object { uint32_t ob_magic; - object_type_t *ob_type; + struct object_type *ob_type; spin_lock_t ob_lock; unsigned int ob_refcount; unsigned int ob_handles; - queue_t ob_attrib; - queue_entry_t ob_list; -} __aligned(sizeof(long)) object_t; - -typedef struct object_namespace object_namespace_t; + struct queue ob_attrib; + struct queue_entry ob_list; +} __aligned(sizeof(long)); extern kern_status_t object_bootstrap(void); -extern kern_status_t object_type_register(object_type_t *p); -extern kern_status_t object_type_unregister(object_type_t *p); +extern kern_status_t object_type_register(struct object_type *p); +extern kern_status_t object_type_unregister(struct object_type *p); -extern object_namespace_t *global_namespace(void); -extern object_namespace_t *object_namespace_create(void); -extern kern_status_t object_namespace_get_object(object_namespace_t *ns, const char *path, object_t **out); -extern kern_status_t object_publish(object_namespace_t *ns, const char *path, object_t *obj); -extern kern_status_t object_unpublish(object_namespace_t *ns, object_t *obj); +extern struct object_namespace *global_namespace(void); +extern struct object_namespace *object_namespace_create(void); +extern kern_status_t object_namespace_get_object(struct object_namespace *ns, const char *path, struct object **out); +extern kern_status_t object_publish(struct object_namespace *ns, const char *path, struct object *obj); +extern kern_status_t object_unpublish(struct object_namespace *ns, struct object *obj); -extern object_t *object_create(object_type_t *type); -extern object_t *object_ref(object_t *obj); -extern void object_deref(object_t *obj); -extern void object_lock(object_t *obj, unsigned long *flags); -extern void object_unlock(object_t *obj, unsigned long flags); -extern void *object_data(object_t *obj); -extern object_t *object_header(void *p); -static inline kern_status_t object_get(const char *path, object_t **out) +extern struct object *object_create(struct object_type *type); +extern struct object *object_ref(struct object *obj); +extern void object_deref(struct object *obj); +extern void object_lock(struct object *obj, unsigned long *flags); +extern void object_unlock(struct object *obj, unsigned long flags); +extern void *object_data(struct object *obj); +extern struct object *object_header(void *p); +static inline kern_status_t object_get(const char *path, struct object **out) { return object_namespace_get_object(global_namespace(), path, out); } -extern kern_status_t object_get_child_named(object_t *obj, const char *name, object_t **out); -extern kern_status_t object_get_child_at(object_t *obj, size_t at, object_t **out); -extern kern_status_t object_query_name(object_t *obj, char name[OBJECT_NAME_MAX]); +extern kern_status_t object_get_child_named(struct object *obj, const char *name, struct object **out); +extern kern_status_t object_get_child_at(struct object *obj, size_t at, struct object **out); +extern kern_status_t object_query_name(struct object *obj, char name[OBJECT_NAME_MAX]); -extern object_t *set_create(const char *name); -extern kern_status_t set_add_object(object_t *set, object_t *obj); -extern kern_status_t set_remove_object(object_t *set, object_t *obj); -extern bool object_is_set(object_t *obj); +extern struct object *set_create(const char *name); +extern kern_status_t set_add_object(struct object *set, struct object *obj); +extern kern_status_t set_remove_object(struct object *set, struct object *obj); +extern bool object_is_set(struct object *obj); extern void init_set_objects(void); extern void init_global_namespace(void); diff --git a/include/socks/pmap.h b/include/socks/pmap.h index 9eac14c..f79046a 100644 --- a/include/socks/pmap.h +++ b/include/socks/pmap.h @@ -17,9 +17,9 @@ extern "C" { typedef ml_pmap_t pmap_t; typedef ml_pfn_t pfn_t; -typedef enum pmap_flags { +enum pmap_flags { PMAP_HUGEPAGE = 0x01u, -} pmap_flags_t; +}; extern void pmap_bootstrap(void); extern pmap_t get_kernel_pmap(void); @@ -28,8 +28,8 @@ extern pmap_t pmap_create(void); extern void pmap_destroy(pmap_t pmap); extern void pmap_switch(pmap_t pmap); -extern kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot, pmap_flags_t flags); -extern kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, vm_prot_t prot, pmap_flags_t flags); +extern kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum pmap_flags flags); +extern kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, enum vm_prot prot, enum pmap_flags flags); extern kern_status_t pmap_remove(pmap_t pmap, void *p); extern kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len); diff --git a/include/socks/printk.h b/include/socks/printk.h index 1714e1d..8e140a3 100644 --- a/include/socks/printk.h +++ b/include/socks/printk.h @@ -7,7 +7,7 @@ extern "C" { #endif -extern void early_printk_init(console_t *con); +extern void early_printk_init(struct console *con); extern int printk(const char *format, ...); #ifdef __cplusplus diff --git a/include/socks/queue.h b/include/socks/queue.h index 36fe9da..2f25dfa 100644 --- a/include/socks/queue.h +++ b/include/socks/queue.h @@ -10,8 +10,8 @@ extern "C" { #define QUEUE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0)) -#define QUEUE_INIT ((queue_t){ .q_first = NULL, .q_last = NULL }) -#define QUEUE_ENTRY_INIT ((queue_entry_t){ .qe_next = NULL, .qe_prev = NULL }) +#define QUEUE_INIT ((struct queue){ .q_first = NULL, .q_last = NULL }) +#define QUEUE_ENTRY_INIT ((struct queue_entry){ .qe_next = NULL, .qe_prev = NULL }) #define queue_foreach(iter_type, iter_name, queue_name, node_member) \ for (iter_type *iter_name = (iter_type *)QUEUE_CONTAINER(iter_type, node_member, queue_first(queue_name)); \ @@ -23,37 +23,37 @@ extern "C" { iter_name; \ iter_name = (iter_type *)QUEUE_CONTAINER(iter_type, node_member, queue_prev(&((iter_name)->node_member)))) -typedef struct queue_entry { +struct queue_entry { struct queue_entry *qe_next; struct queue_entry *qe_prev; -} queue_entry_t; +}; -typedef struct queue { - queue_entry_t *q_first; - queue_entry_t *q_last; -} queue_t; +struct queue { + struct queue_entry *q_first; + struct queue_entry *q_last; +}; -static inline void queue_init(queue_t *q) { memset(q, 0x00, sizeof *q); } -static inline bool queue_empty(queue_t *q) { return q->q_first == NULL; } +static inline void queue_init(struct queue *q) { memset(q, 0x00, sizeof *q); } +static inline bool queue_empty(struct queue *q) { return q->q_first == NULL; } -static inline queue_entry_t *queue_first(queue_t *q) { return q->q_first; } -static inline queue_entry_t *queue_last(queue_t *q) { return q->q_last; } -static inline queue_entry_t *queue_next(queue_entry_t *entry) { return entry->qe_next; } -static inline queue_entry_t *queue_prev(queue_entry_t *entry) { return entry->qe_prev; } +static inline struct queue_entry *queue_first(struct queue *q) { return q->q_first; } +static inline struct queue_entry *queue_last(struct queue *q) { return q->q_last; } +static inline struct queue_entry *queue_next(struct queue_entry *entry) { return entry->qe_next; } +static inline struct queue_entry *queue_prev(struct queue_entry *entry) { return entry->qe_prev; } -extern size_t queue_length(queue_t *q); +extern size_t queue_length(struct queue *q); -extern void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before); -extern void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after); +extern void queue_insert_before(struct queue *q, struct queue_entry *entry, struct queue_entry *before); +extern void queue_insert_after(struct queue *q, struct queue_entry *entry, struct queue_entry *after); -extern void queue_push_front(queue_t *q, queue_entry_t *entry); -extern void queue_push_back(queue_t *q, queue_entry_t *entry); +extern void queue_push_front(struct queue *q, struct queue_entry *entry); +extern void queue_push_back(struct queue *q, struct queue_entry *entry); -extern queue_entry_t *queue_pop_front(queue_t *q); -extern queue_entry_t *queue_pop_back(queue_t *q); +extern struct queue_entry *queue_pop_front(struct queue *q); +extern struct queue_entry *queue_pop_back(struct queue *q); -extern void queue_delete(queue_t *q, queue_entry_t *entry); -extern void queue_delete_all(queue_t *q); +extern void queue_delete(struct queue *q, struct queue_entry *entry); +extern void queue_delete_all(struct queue *q); #ifdef __cplusplus } diff --git a/include/socks/sched.h b/include/socks/sched.h index a2294c6..6f9ce0f 100644 --- a/include/socks/sched.h +++ b/include/socks/sched.h @@ -15,91 +15,91 @@ extern "C" { #endif -typedef enum task_state { +enum task_state { TASK_RUNNING, TASK_STOPPED, -} task_state_t; +}; -typedef enum thread_state { +enum thread_state { THREAD_READY = 1, THREAD_SLEEPING = 2, THREAD_STOPPED = 3, -} thread_state_t; +}; -typedef enum thread_flags { +enum thread_flags { THREAD_F_NEED_RESCHED = 0x01u, THREAD_F_NO_PREEMPT = 0x02u, -} thread_flags_t; +}; -typedef enum sched_priority { - PRIO_IDLE = 4, - PRIO_SUBNORMAL = 6, - PRIO_NORMAL = 10, - PRIO_SUPERNORMAL = 14, - PRIO_HIGH = 18, - PRIO_REALTIME = 24, -} sched_priority_t; +enum sched_priority { + PRIO_IDLE = 4, + PRIO_SUBNORMAL = 6, + PRIO_NORMAL = 10, + PRIO_SUPERNORMAL = 14, + PRIO_HIGH = 18, + PRIO_REALTIME = 24, +}; -typedef struct task { +struct task { struct task *t_parent; unsigned int t_id; - task_state_t t_state; + enum task_state t_state; char t_name[TASK_NAME_MAX]; pmap_t t_pmap; - btree_node_t t_tasklist; - queue_t t_threads; - queue_t t_children; -} task_t; + struct btree_node t_tasklist; + struct queue t_threads; + struct queue t_children; +}; -typedef struct thread { - thread_state_t tr_state : 8; - thread_flags_t tr_flags : 8; - task_t *tr_parent; +struct thread { + enum thread_state tr_state : 8; + enum thread_flags tr_flags : 8; + struct task *tr_parent; unsigned int tr_id; unsigned int tr_prio; - queue_entry_t tr_threads; - queue_entry_t tr_rqentry; + struct queue_entry tr_threads; + struct queue_entry tr_rqentry; void *tr_kstack; -} thread_t; +}; -typedef struct runqueue { - queue_t rq_queues[PRIO_MAX]; +struct runqueue { + struct queue rq_queues[PRIO_MAX]; uint32_t rq_readybits; spin_lock_t rq_lock; -} runqueue_t; +}; extern kern_status_t sched_init(void); extern void schedule(void); extern void preempt_disable(void); extern void preempt_enable(void); -extern void runqueue_init(runqueue_t *rq); +extern void runqueue_init(struct runqueue *rq); -extern task_t *task_alloc(void); -static inline task_t *task_ref(task_t *task) { return (task_t *)object_data(object_ref(object_header(task))); } -static inline void task_deref(task_t *task) { object_deref(object_header(task)); } -extern task_t *task_from_pid(unsigned int pid); -extern task_t *kernel_task(void); +extern struct task *task_alloc(void); +static inline struct task *task_ref(struct task *task) { return (struct task *)object_data(object_ref(object_header(task))); } +static inline void task_deref(struct task *task) { object_deref(object_header(task)); } +extern struct task *task_from_pid(unsigned int pid); +extern struct task *kernel_task(void); extern bool need_resched(void); -extern task_t *current_task(void); -extern thread_t *current_thread(void); +extern struct task *current_task(void); +extern struct thread *current_thread(void); -static inline void task_lock_irqsave(task_t *task, unsigned long *flags) +static inline void task_lock_irqsave(struct task *task, unsigned long *flags) { object_lock(object_header(task), flags); } -static inline void task_unlock_irqrestore(task_t *task, unsigned long flags) +static inline void task_unlock_irqrestore(struct task *task, unsigned long flags) { object_unlock(object_header(task), flags); } -extern thread_t *thread_alloc(void); +extern struct thread *thread_alloc(void); #ifdef __cplusplus } diff --git a/include/socks/tty.h b/include/socks/tty.h index 2ab6b5e..29993d5 100644 --- a/include/socks/tty.h +++ b/include/socks/tty.h @@ -13,9 +13,9 @@ buffered user input. A TTY object is split into 2 parts: - - tty_t: This represents the terminal session, and tracks things like the cursor + - struct tty: This represents the terminal session, and tracks things like the cursor position, input buffer, flags, etc. - - tty_driver_t: This is a set of function callbacks that the TTY can use to + - struct tty_driver: This is a set of function callbacks that the TTY can use to manipulate the output device. This could represent a char-based framebuffer device, a serial port, etc. */ @@ -27,66 +27,66 @@ extern "C" { /* opaque context pointer for use by the tty driver */ typedef void *tty_driver_ctx_t; -typedef enum tty_driver_type { +enum tty_driver_type { /* For TTYs operating on simple IO devices like serial ports. Allows writing characters, receiving characters, and not much else. */ TTY_DRIVER_SIMPLE, /* For TTYs operating on more capable display interfaces. Allows putting characters at arbitrary locations, scrolling, etc */ TTY_DRIVER_FULL, -} tty_driver_type_t; +}; /* TTY cursor status. The extra cursor styles are just for completeness, the important one to support (if possible), is TTY_CURSOR_NONE. The others can be interpreted as "just turn on a cursor of any style". */ -typedef enum tty_cursor { +enum tty_cursor { TTY_CURSOR_ULINE, TTY_CURSOR_BLOCK, TTY_CURSOR_NONE, -} tty_cursor_t; +}; /* direction to use for scrolling. The important one to support is TTY_SCROLL_DOWN for when output overflows the display */ -typedef enum tty_scroll_dir { +enum tty_scroll_dir { TTY_SCROLL_DOWN, TTY_SCROLL_UP, -} tty_scroll_dir_t; +}; /* character attribute. this could be as simple as VGA's 16-colour palette plus an extra bit for bright, or a full 24-bit RGB value with bold and underline support, depending on what the driver supports. */ typedef uint64_t tty_attrib_t; -typedef struct tty_driver { +struct tty_driver { char tty_name[16]; - tty_driver_type_t tty_type; - queue_entry_t tty_list; + enum tty_driver_type tty_type; + struct queue_entry tty_list; void (*tty_init)(tty_driver_ctx_t *ctx); void (*tty_deinit)(tty_driver_ctx_t ctx); void (*tty_clear)(tty_driver_ctx_t ctx, int x, int y, int width, int height); void (*tty_putc)(tty_driver_ctx_t ctx, int c, int xpos, int ypos, tty_attrib_t attrib); - void (*tty_set_cursor)(tty_driver_ctx_t ctx, tty_cursor_t cur); + void (*tty_set_cursor)(tty_driver_ctx_t ctx, enum tty_cursor cur); void (*tty_move_cursor)(tty_driver_ctx_t ctx, int x, int y); - void (*tty_scroll)(tty_driver_ctx_t ctx, tty_scroll_dir_t dir, int lines); -} tty_driver_t; + void (*tty_scroll)(tty_driver_ctx_t ctx, enum tty_scroll_dir dir, int lines); +}; -typedef struct tty { +struct tty { int tty_xcur, tty_ycur; unsigned int tty_iflag, tty_oflag, tty_lflag; tty_driver_ctx_t tty_dctx; - const tty_driver_t *tty_driver; -} tty_t; + const struct tty_driver *tty_driver; +}; -extern kern_status_t tty_driver_register(tty_driver_t *drv); -extern kern_status_t tty_driver_unregister(tty_driver_t *drv); +extern kern_status_t tty_driver_register(struct tty_driver *drv); +extern kern_status_t tty_driver_unregister(struct tty_driver *drv); -extern tty_t *tty_create(void); -extern void tty_destroy(tty_t *tty); +extern struct tty *tty_create(void); +extern void tty_destroy(struct tty *tty); -extern int tty_read(tty_t *tty, char *s, unsigned long len); -extern int tty_write(tty_t *tty, const char *s, unsigned long len); +extern int tty_read(struct tty *tty, char *s, unsigned long len); +extern int tty_write(struct tty *tty, const char *s, unsigned long len); #ifdef __cplusplus } diff --git a/include/socks/vm.h b/include/socks/vm.h index 8db5903..74c8461 100644 --- a/include/socks/vm.h +++ b/include/socks/vm.h @@ -27,34 +27,34 @@ extern "C" { #define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0) #define vm_page_foreach(pg, i) \ - for (vm_page_t *i = (pg); i; i = vm_page_get_next_tail(i)) + for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i)) typedef phys_addr_t vm_alignment_t; typedef unsigned int vm_node_id_t; -typedef struct vm_object { +struct vm_object { unsigned int reserved; -} vm_object_t; +}; -typedef enum vm_model { +enum vm_model { VM_MODEL_FLAT = 1, VM_MODEL_SPARSE, -} vm_model_t; +}; -typedef enum vm_prot { +enum vm_prot { VM_PROT_READ = 0x01u, VM_PROT_WRITE = 0x02u, VM_PROT_EXEC = 0x04u, VM_PROT_USER = 0x08u, VM_PROT_SVR = 0x10u, -} vm_prot_t; +}; -typedef enum vm_flags { +enum vm_flags { VM_NORMAL = 0x00u, VM_GET_DMA = 0x01u, -} vm_flags_t; +}; -typedef enum vm_zone_id { +enum vm_zone_id { /* NOTE that these are used as indices into the node_zones array in vm/zone.c they need to be continuous, and must start at 0! */ VM_ZONE_DMA = 0u, @@ -62,9 +62,9 @@ typedef enum vm_zone_id { VM_ZONE_HIGHMEM = 2u, VM_ZONE_MIN = VM_ZONE_DMA, VM_ZONE_MAX = VM_ZONE_HIGHMEM, -} vm_zone_id_t; +}; -typedef enum vm_page_order { +enum vm_page_order { VM_PAGE_4K = 0u, VM_PAGE_8K, VM_PAGE_16K, @@ -82,7 +82,7 @@ typedef enum vm_page_order { VM_PAGE_64M, VM_PAGE_128M, - /* vm_page_t only has 4 bits to store the page order with. + /* struct vm_page only has 4 bits to store the page order with. the maximum order that can be stored in 4 bits is 15 (VM_PAGE_128M) to use any of the page orders listed here, this field will have to be expanded. */ @@ -95,9 +95,9 @@ typedef enum vm_page_order { VM_PAGE_16G, VM_PAGE_32G, VM_PAGE_64G, -} vm_page_order_t; +}; -typedef enum vm_page_flags { +enum vm_page_flags { /* page is reserved (probably by a call to memblock_reserve()) and cannot be returned by any allocation function */ VM_PAGE_RESERVED = 0x01u, @@ -107,52 +107,52 @@ typedef enum vm_page_flags { VM_PAGE_HEAD = 0x04u, /* page is part of a huge-page */ VM_PAGE_HUGE = 0x08u, -} vm_page_flags_t; +}; -typedef enum vm_memory_region_status { +enum vm_memory_region_status { VM_REGION_FREE = 0x01u, VM_REGION_RESERVED = 0x02u, -} vm_memory_region_status_t; +}; -typedef enum vm_cache_flags { +enum vm_cache_flags { VM_CACHE_OFFSLAB = 0x01u, VM_CACHE_DMA = 0x02u -} vm_cache_flags_t; +}; -typedef struct vm_zone_descriptor { - vm_zone_id_t zd_id; +struct vm_zone_descriptor { + enum vm_zone_id zd_id; vm_node_id_t zd_node; const char zd_name[32]; phys_addr_t zd_base; phys_addr_t zd_limit; -} vm_zone_descriptor_t; +}; -typedef struct vm_zone { - vm_zone_descriptor_t z_info; +struct vm_zone { + struct vm_zone_descriptor z_info; spin_lock_t z_lock; - queue_t z_free_pages[VM_MAX_PAGE_ORDERS]; + struct queue z_free_pages[VM_MAX_PAGE_ORDERS]; unsigned long z_size; -} vm_zone_t; +}; -typedef struct vm_pg_data { - vm_zone_t pg_zones[VM_MAX_ZONES]; -} vm_pg_data_t; +struct vm_pg_data { + struct vm_zone pg_zones[VM_MAX_ZONES]; +}; -typedef struct vm_region { - vm_memory_region_status_t r_status; +struct vm_region { + enum vm_memory_region_status r_status; phys_addr_t r_base; phys_addr_t r_limit; -} vm_region_t; +}; -typedef struct vm_cache { +struct vm_cache { const char *c_name; - vm_cache_flags_t c_flags; - queue_entry_t c_list; + enum vm_cache_flags c_flags; + struct queue_entry c_list; - queue_t c_slabs_full; - queue_t c_slabs_partial; - queue_t c_slabs_empty; + struct queue c_slabs_full; + struct queue c_slabs_partial; + struct queue c_slabs_empty; spin_lock_t c_lock; @@ -160,7 +160,7 @@ typedef struct vm_cache { unsigned int c_obj_count; /* the size of object kept in the cache */ unsigned int c_obj_size; - /* combined size of vm_slab_t and the freelist */ + /* combined size of struct vm_slab and the freelist */ unsigned int c_hdr_size; /* power of 2 alignment for objects returned from the cache */ unsigned int c_align; @@ -170,12 +170,12 @@ typedef struct vm_cache { unsigned int c_stride; /* size of page used for slabs */ unsigned int c_page_order; -} vm_cache_t; +}; -typedef struct vm_slab { - vm_cache_t *s_cache; - /* queue entry for vm_cache_t.c_slabs_* */ - queue_entry_t s_list; +struct vm_slab { + struct vm_cache *s_cache; + /* queue entry for struct vm_cache.c_slabs_* */ + struct queue_entry s_list; /* pointer to the first object slot. */ void *s_objects; /* the number of objects allocated on the slab. */ @@ -193,9 +193,9 @@ typedef struct vm_slab { this is commented as it as flexible arrays are not supported in c++. */ //unsigned int s_freelist[]; -} vm_slab_t; +}; -typedef struct vm_page { +struct vm_page { /* order of the page block that this page belongs too */ uint32_t p_order : 4; /* the id of the NUMA node that this page belongs to */ @@ -214,82 +214,82 @@ typedef struct vm_page { some examples: - the buddy allocator uses this to maintain its per-zone free-page lists. */ - queue_entry_t p_list; + struct queue_entry p_list; /* owner-specific data */ union { - vm_slab_t *p_slab; + struct vm_slab *p_slab; }; -} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t; +} __attribute__((aligned(2 * sizeof(unsigned long)))); /* represents a sector of memory, containing its own array of vm_pages. this struct is used under the sparse memory model, instead of the global vm_page array */ -typedef struct vm_sector { +struct vm_sector { /* sector size. this must be a power of 2. all sectors in the system have the same size. */ - vm_page_order_t s_size; + enum vm_page_order s_size; /* PFN of the first page contained in s_pages. to find the PFN of any page contained within s_pages, simply add its offset within the array to s_first_pfn */ size_t s_first_pfn; /* array of pages contained in this sector */ - vm_page_t *s_pages; -} vm_sector_t; + struct vm_page *s_pages; +}; -extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones); -extern vm_model_t vm_memory_model(void); -extern void vm_set_memory_model(vm_model_t model); +extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones); +extern enum vm_model vm_memory_model(void); +extern void vm_set_memory_model(enum vm_model model); -extern vm_pg_data_t *vm_pg_data_get(vm_node_id_t node); +extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node); extern phys_addr_t vm_virt_to_phys(void *p); extern void *vm_phys_to_virt(phys_addr_t p); extern void vm_page_init_array(); -extern vm_page_t *vm_page_get(phys_addr_t addr); -extern phys_addr_t vm_page_get_paddr(vm_page_t *pg); -extern vm_zone_t *vm_page_get_zone(vm_page_t *pg); -extern void *vm_page_get_vaddr(vm_page_t *pg); -extern size_t vm_page_get_pfn(vm_page_t *pg); -extern size_t vm_page_order_to_bytes(vm_page_order_t order); -extern size_t vm_page_order_to_pages(vm_page_order_t order); -extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order); -extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags); -extern void vm_page_free(vm_page_t *pg); +extern struct vm_page *vm_page_get(phys_addr_t addr); +extern phys_addr_t vm_page_get_paddr(struct vm_page *pg); +extern struct vm_zone *vm_page_get_zone(struct vm_page *pg); +extern void *vm_page_get_vaddr(struct vm_page *pg); +extern size_t vm_page_get_pfn(struct vm_page *pg); +extern size_t vm_page_order_to_bytes(enum vm_page_order order); +extern size_t vm_page_order_to_pages(enum vm_page_order order); +extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order); +extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags); +extern void vm_page_free(struct vm_page *pg); -extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b); -extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b); -extern vm_page_t *vm_page_get_buddy(vm_page_t *pg); -extern vm_page_t *vm_page_get_next_tail(vm_page_t *pg); +extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b); +extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b); +extern struct vm_page *vm_page_get_buddy(struct vm_page *pg); +extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg); extern size_t vm_bytes_to_pages(size_t bytes); -extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info); -extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags); -extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg); +extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info); +extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags); +extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg); -extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags); -extern void vm_cache_init(vm_cache_t *cache); -extern void vm_cache_destroy(vm_cache_t *cache); -extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags); -extern void vm_cache_free(vm_cache_t *cache, void *p); +extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags); +extern void vm_cache_init(struct vm_cache *cache); +extern void vm_cache_destroy(struct vm_cache *cache); +extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags); +extern void vm_cache_free(struct vm_cache *cache, void *p); extern void kmalloc_init(void); -extern void *kmalloc(size_t count, vm_flags_t flags); -extern void *kzalloc(size_t count, vm_flags_t flags); +extern void *kmalloc(size_t count, enum vm_flags flags); +extern void *kzalloc(size_t count, enum vm_flags flags); extern void kfree(void *p); /* Flat memory model functions */ extern void vm_flat_init(void); -extern vm_page_t *vm_page_get_flat(phys_addr_t addr); -extern size_t vm_page_get_pfn_flat(vm_page_t *pg); +extern struct vm_page *vm_page_get_flat(phys_addr_t addr); +extern size_t vm_page_get_pfn_flat(struct vm_page *pg); /* Sparse memory model functions */ extern void vm_sparse_init(void); -extern vm_page_t *vm_page_get_sparse(phys_addr_t addr); -extern size_t vm_page_get_pfn_sparse(vm_page_t *pg); +extern struct vm_page *vm_page_get_sparse(phys_addr_t addr); +extern size_t vm_page_get_pfn_sparse(struct vm_page *pg); #ifdef __cplusplus } diff --git a/kernel/console.c b/kernel/console.c index eb521f9..0dd1a1c 100644 --- a/kernel/console.c +++ b/kernel/console.c @@ -3,15 +3,15 @@ #include #include -static queue_t consoles; +static struct queue consoles; static spin_lock_t consoles_lock = SPIN_LOCK_INIT; -kern_status_t console_register(console_t *con) +kern_status_t console_register(struct console *con) { unsigned long flags; spin_lock_irqsave(&consoles_lock, &flags); - queue_foreach (console_t, cur, &consoles, c_list) { + queue_foreach (struct console, cur, &consoles, c_list) { if (!strcmp(cur->c_name, con->c_name)) { spin_unlock_irqrestore(&consoles_lock, flags); return KERN_NAME_EXISTS; @@ -23,7 +23,7 @@ kern_status_t console_register(console_t *con) return KERN_OK; } -kern_status_t console_unregister(console_t *con) +kern_status_t console_unregister(struct console *con) { unsigned long flags; spin_lock_irqsave(&consoles_lock, &flags); @@ -34,14 +34,14 @@ kern_status_t console_unregister(console_t *con) return KERN_OK; } -void console_write(console_t *con, const char *s, unsigned int len) +void console_write(struct console *con, const char *s, unsigned int len) { if (con->c_write) { con->c_write(con, s, len); } } -int console_read(console_t *con, char *s, unsigned int len) +int console_read(struct console *con, char *s, unsigned int len) { int ret = -1; if (con->c_read) { diff --git a/kernel/cpu.c b/kernel/cpu.c index ceed9da..0942ab9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -5,14 +5,14 @@ DECLARE_BITMAP(cpu_available, CPU_MAX); DECLARE_BITMAP(cpu_online, CPU_MAX); -DEFINE_PERCPU_VAR(cpu_data_t, cpu_data); +DEFINE_PERCPU_VAR(struct cpu_data, cpu_data); -cpu_data_t *get_this_cpu(void) +struct cpu_data *get_this_cpu(void) { return percpu_get(&cpu_data); } -void put_cpu(cpu_data_t *cpu) +void put_cpu(struct cpu_data *cpu) { percpu_put(cpu); } @@ -38,7 +38,7 @@ void cpu_set_online(unsigned int cpu_id) void preempt_disable(void) { ml_cpu_block *ml_cpu = ml_this_cpu(); - cpu_data_t *cpu_data = ml_cpu_block_get_data(ml_cpu); + struct cpu_data *cpu_data = ml_cpu_block_get_data(ml_cpu); if (!cpu_data) { return; } @@ -51,7 +51,7 @@ void preempt_disable(void) void preempt_enable(void) { ml_cpu_block *ml_cpu = ml_this_cpu(); - cpu_data_t *cpu_data = ml_cpu_block_get_data(ml_cpu); + struct cpu_data *cpu_data = ml_cpu_block_get_data(ml_cpu); if (!cpu_data) { return; } diff --git a/kernel/panic.c b/kernel/panic.c index 36154db..badc4b1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -18,8 +18,8 @@ void panic(const char *fmt, ...) printk("---[ kernel panic: %s", buf); printk("kernel: " BUILD_ID ", compiler version: " __VERSION__); - task_t *task = current_task(); - thread_t *thr = current_thread(); + struct task *task = current_task(); + struct thread *thr = current_thread(); if (task && thr) { printk("task: %s (id: %d, thread: %d)", task->t_name, task->t_id, thr->tr_id); diff --git a/kernel/printk.c b/kernel/printk.c index bd7df12..88e46ce 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -7,7 +7,7 @@ #define LOG_BUFFER_SIZE 0x40000 #define LOG_MSG_SIZE 0x100 -static console_t *early_console = NULL; +static struct console *early_console = NULL; static spin_lock_t log_buffer_lock = SPIN_LOCK_INIT; @@ -46,7 +46,7 @@ static void save_log_message(const char *msg) } } -void early_printk_init(console_t *con) +void early_printk_init(struct console *con) { early_console = con; } diff --git a/kernel/tty.c b/kernel/tty.c index 761c5bc..669f81b 100644 --- a/kernel/tty.c +++ b/kernel/tty.c @@ -2,12 +2,12 @@ #include #include -int tty_read(tty_t *tty, char *s, unsigned long len) +int tty_read(struct tty *tty, char *s, unsigned long len) { return 0; } -int tty_write(tty_t *tty, const char *s, unsigned long len) +int tty_write(struct tty *tty, const char *s, unsigned long len) { return 0; } diff --git a/kxld/internal.c b/kxld/internal.c index 6f553ec..4cd47f0 100644 --- a/kxld/internal.c +++ b/kxld/internal.c @@ -6,7 +6,7 @@ static struct kext *self = NULL; -extern btree_t kext_tree; +extern struct btree kext_tree; extern char __kexts_start[]; extern char __kexts_end[]; @@ -149,7 +149,7 @@ kern_status_t scan_internal_kexts(void) kern_status_t bring_internal_kexts_online(void) { - btree_node_t *cur = btree_first(&kext_tree); + struct btree_node *cur = btree_first(&kext_tree); while (cur) { struct kext *kext = BTREE_CONTAINER(struct kext, k_node, cur); diff --git a/kxld/kext.c b/kxld/kext.c index 2db1859..b1df5a9 100644 --- a/kxld/kext.c +++ b/kxld/kext.c @@ -6,10 +6,10 @@ #include static spin_lock_t kext_tree_lock = SPIN_LOCK_INIT; -static object_t *kext_set; -btree_t kext_tree; +static struct object *kext_set; +struct btree kext_tree; -static kern_status_t kext_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) +static kern_status_t kext_query_name(struct object *obj, char out[OBJECT_NAME_MAX]) { struct kext *kext = object_data(obj); strncpy(out, kext->k_ident, OBJECT_NAME_MAX - 1); @@ -17,7 +17,7 @@ static kern_status_t kext_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) return KERN_OK; } -static kern_status_t kext_destroy(object_t *obj) +static kern_status_t kext_destroy(struct object *obj) { struct kext *kext = object_data(obj); if (kext->k_dependencies) { @@ -27,7 +27,7 @@ static kern_status_t kext_destroy(object_t *obj) return KERN_OK; } -static object_type_t kext_type = { +static struct object_type kext_type = { .ob_name = "kext", .ob_size = sizeof(struct kext), .ob_ops = { @@ -38,7 +38,7 @@ static object_type_t kext_type = { static struct kext *kext_get(const char *ident) { uint64_t ident_hash = hash_string(ident); - btree_node_t *cur = kext_tree.b_root; + struct btree_node *cur = kext_tree.b_root; while (cur) { struct kext *cur_node = BTREE_CONTAINER(struct kext, k_node, cur); @@ -62,10 +62,10 @@ static void kext_add(struct kext *kext) return; } - btree_node_t *cur = kext_tree.b_root; + struct btree_node *cur = kext_tree.b_root; while (1) { struct kext *cur_node = BTREE_CONTAINER(struct kext, k_node, cur); - btree_node_t *next = NULL; + struct btree_node *next = NULL; if (kext->k_ident_hash > cur_node->k_ident_hash) { next = btree_right(cur); @@ -103,7 +103,7 @@ struct kext *kext_get_by_id(const char *ident) struct kext *kext = kext_get(ident); if (kext) { - object_t *kext_obj = object_header(kext); + struct object *kext_obj = object_header(kext); object_ref(kext_obj); } @@ -121,7 +121,7 @@ kern_status_t kext_cache_init(void) struct kext *kext_alloc(void) { - object_t *kext_obj = object_create(&kext_type); + struct object *kext_obj = object_create(&kext_type); if (!kext_obj) { return NULL; } @@ -145,7 +145,7 @@ kern_status_t kext_register(struct kext *kext) return KERN_NAME_EXISTS; } - object_t *kext_obj = object_header(kext); + struct object *kext_obj = object_header(kext); object_ref(object_header(kext)); kext_add(kext); diff --git a/obj/namespace.c b/obj/namespace.c index 989e7f2..f14efe8 100644 --- a/obj/namespace.c +++ b/obj/namespace.c @@ -1,34 +1,34 @@ #include -static object_namespace_t *global_ns; +static struct object_namespace *global_ns; struct object_namespace { /* root directory set object */ - object_t *ns_root; + struct object *ns_root; }; -static kern_status_t ns_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) +static kern_status_t ns_query_name(struct object *obj, char out[OBJECT_NAME_MAX]) { out[0] = '/'; out[1] = 0; return KERN_OK; } -static kern_status_t ns_get_child_at(object_t *obj, size_t at, object_t **out) +static kern_status_t ns_get_child_at(struct object *obj, size_t at, struct object **out) { - object_namespace_t *ns = object_data(obj); + struct object_namespace *ns = object_data(obj); return object_get_child_at(ns->ns_root, at, out); } -static kern_status_t ns_get_child_named(object_t *obj, const char *name, object_t **out) +static kern_status_t ns_get_child_named(struct object *obj, const char *name, struct object **out) { - object_namespace_t *ns = object_data(obj); + struct object_namespace *ns = object_data(obj); return object_get_child_named(ns->ns_root, name, out); } -static object_type_t ns_type = { +static struct object_type ns_type = { .ob_name = "namespace", - .ob_size = sizeof(object_namespace_t), + .ob_size = sizeof(struct object_namespace), .ob_ops = { .query_name = ns_query_name, .get_named = ns_get_child_named, @@ -43,20 +43,20 @@ void init_global_namespace(void) global_ns = object_namespace_create(); } -object_namespace_t *global_namespace(void) +struct object_namespace *global_namespace(void) { return global_ns; } -object_namespace_t *object_namespace_create(void) +struct object_namespace *object_namespace_create(void) { - object_t *ns_object = object_create(&ns_type); - object_namespace_t *ns = object_data(ns_object); + struct object *ns_object = object_create(&ns_type); + struct object_namespace *ns = object_data(ns_object); ns->ns_root = set_create("/"); return ns; } -kern_status_t object_namespace_get_object(object_namespace_t *ns, const char *path, object_t **out) +kern_status_t object_namespace_get_object(struct object_namespace *ns, const char *path, struct object **out) { return KERN_OK; } @@ -93,7 +93,7 @@ static void cleanup_object_path(char *path, size_t len, size_t *parts) path[final_len] = 0; } -kern_status_t object_publish(object_namespace_t *ns, const char *path, object_t *obj) +kern_status_t object_publish(struct object_namespace *ns, const char *path, struct object *obj) { if (*path != '/') { return KERN_INVALID_ARGUMENT; @@ -119,13 +119,13 @@ kern_status_t object_publish(object_namespace_t *ns, const char *path, object_t char *sp; char *tok = strtok_r(rpath, "/", &sp); - object_t *cur = ns->ns_root; + struct object *cur = ns->ns_root; unsigned long flags; while (tok) { object_lock(cur, &flags); - object_t *next; + struct object *next; kern_status_t status = object_get_child_named(cur, tok, &next); if (status == KERN_NO_ENTRY) { next = set_create(tok); @@ -154,7 +154,7 @@ kern_status_t object_publish(object_namespace_t *ns, const char *path, object_t return set_add_object(cur, obj); } -kern_status_t object_unpublish(object_namespace_t *ns, object_t *obj) +kern_status_t object_unpublish(struct object_namespace *ns, struct object *obj) { return KERN_OK; } diff --git a/obj/object.c b/obj/object.c index bd5be76..8b40f18 100644 --- a/obj/object.c +++ b/obj/object.c @@ -4,7 +4,7 @@ #define HAS_OP(obj, opname) ((obj)->ob_type->ob_ops.opname) -static queue_t object_types; +static struct queue object_types; static spin_lock_t object_types_lock = SPIN_LOCK_INIT; kern_status_t object_bootstrap(void) @@ -14,7 +14,7 @@ kern_status_t object_bootstrap(void) return KERN_OK; } -kern_status_t object_type_register(object_type_t *p) +kern_status_t object_type_register(struct object_type *p) { unsigned long flags; spin_lock_irqsave(&object_types_lock, &flags); @@ -22,7 +22,7 @@ kern_status_t object_type_register(object_type_t *p) spin_unlock_irqrestore(&object_types_lock, flags); p->ob_cache.c_name = p->ob_name; - p->ob_cache.c_obj_size = sizeof(object_t) + p->ob_size; + p->ob_cache.c_obj_size = sizeof(struct object) + p->ob_size; p->ob_cache.c_page_order = VM_PAGE_16K; vm_cache_init(&p->ob_cache); @@ -31,7 +31,7 @@ kern_status_t object_type_register(object_type_t *p) return KERN_OK; } -kern_status_t object_type_unregister(object_type_t *p) +kern_status_t object_type_unregister(struct object_type *p) { unsigned long flags; spin_lock_irqsave(&object_types_lock, &flags); @@ -41,14 +41,14 @@ kern_status_t object_type_unregister(object_type_t *p) return KERN_OK; } -object_t *object_create(object_type_t *type) +struct object *object_create(struct object_type *type) { if (!(type->ob_flags & OBJTYPE_INIT)) { return NULL; } - vm_cache_t *cache = &type->ob_cache; - object_t *obj = vm_cache_alloc(cache, 0); + struct vm_cache *cache = &type->ob_cache; + struct object *obj = vm_cache_alloc(cache, 0); if (!obj) { return NULL; } @@ -62,13 +62,13 @@ object_t *object_create(object_type_t *type) return obj; } -object_t *object_ref(object_t *obj) +struct object *object_ref(struct object *obj) { obj->ob_refcount++; return obj; } -void object_deref(object_t *obj) +void object_deref(struct object *obj) { unsigned long flags; spin_lock_irqsave(&obj->ob_lock, &flags); @@ -92,24 +92,24 @@ void object_deref(object_t *obj) vm_cache_free(&obj->ob_type->ob_cache, obj); } -void object_lock(object_t *obj, unsigned long *flags) +void object_lock(struct object *obj, unsigned long *flags) { spin_lock_irqsave(&obj->ob_lock, flags); } -void object_unlock(object_t *obj, unsigned long flags) +void object_unlock(struct object *obj, unsigned long flags) { spin_unlock_irqrestore(&obj->ob_lock, flags); } -void *object_data(object_t *obj) +void *object_data(struct object *obj) { return (char *)obj + sizeof *obj; } -object_t *object_header(void *p) +struct object *object_header(void *p) { - object_t *obj = (object_t *)((char *)p - sizeof *obj); + struct object *obj = (struct object *)((char *)p - sizeof *obj); if (obj->ob_magic != OBJECT_MAGIC) { return NULL; } @@ -117,7 +117,7 @@ object_t *object_header(void *p) return obj; } -kern_status_t object_get_child_named(object_t *obj, const char *name, object_t **out) +kern_status_t object_get_child_named(struct object *obj, const char *name, struct object **out) { kern_status_t status = KERN_UNSUPPORTED; @@ -128,7 +128,7 @@ kern_status_t object_get_child_named(object_t *obj, const char *name, object_t * return status; } -kern_status_t object_get_child_at(object_t *obj, size_t at, object_t **out) +kern_status_t object_get_child_at(struct object *obj, size_t at, struct object **out) { kern_status_t status = KERN_UNSUPPORTED; @@ -139,7 +139,7 @@ kern_status_t object_get_child_at(object_t *obj, size_t at, object_t **out) return status; } -kern_status_t object_query_name(object_t *obj, char name[OBJECT_NAME_MAX]) +kern_status_t object_query_name(struct object *obj, char name[OBJECT_NAME_MAX]) { if (HAS_OP(obj, query_name)) { return obj->ob_type->ob_ops.query_name(obj, name); diff --git a/obj/set.c b/obj/set.c index da9778b..acb5732 100644 --- a/obj/set.c +++ b/obj/set.c @@ -1,11 +1,11 @@ #include struct set { - queue_t s_list; + struct queue s_list; char s_name[OBJECT_NAME_MAX]; }; -static kern_status_t set_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) +static kern_status_t set_query_name(struct object *obj, char out[OBJECT_NAME_MAX]) { struct set *set = object_data(obj); strncpy(out, set->s_name, OBJECT_NAME_MAX - 1); @@ -14,11 +14,11 @@ static kern_status_t set_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) return KERN_OK; } -static kern_status_t set_get_child_at(object_t *obj, size_t at, object_t **out) +static kern_status_t set_get_child_at(struct object *obj, size_t at, struct object **out) { struct set *set = object_data(obj); size_t i = 0; - queue_foreach(object_t, child, &set->s_list, ob_list) { + queue_foreach(struct object, child, &set->s_list, ob_list) { if (i == at) { *out = object_ref(child); return KERN_OK; @@ -30,12 +30,12 @@ static kern_status_t set_get_child_at(object_t *obj, size_t at, object_t **out) return KERN_NO_ENTRY; } -static kern_status_t set_get_child_named(object_t *obj, const char *name, object_t **out) +static kern_status_t set_get_child_named(struct object *obj, const char *name, struct object **out) { struct set *set = object_data(obj); char child_name[OBJECT_NAME_MAX]; - queue_foreach(object_t, child, &set->s_list, ob_list) { + queue_foreach(struct object, child, &set->s_list, ob_list) { kern_status_t status = object_query_name(child, child_name); if (status != KERN_OK) { continue; @@ -50,7 +50,7 @@ static kern_status_t set_get_child_named(object_t *obj, const char *name, object return KERN_NO_ENTRY; } -static object_type_t set_type = { +static struct object_type set_type = { .ob_name = "set", .ob_size = sizeof(struct set), .ob_ops = { @@ -65,9 +65,9 @@ void init_set_objects(void) object_type_register(&set_type); } -object_t *set_create(const char *name) +struct object *set_create(const char *name) { - object_t *set_obj = object_create(&set_type); + struct object *set_obj = object_create(&set_type); if (!set_obj) { return NULL; } @@ -80,7 +80,7 @@ object_t *set_create(const char *name) return set_obj; } -kern_status_t set_add_object(object_t *set_obj, object_t *obj) +kern_status_t set_add_object(struct object *set_obj, struct object *obj) { if (!object_is_set(set_obj)) { return KERN_INVALID_ARGUMENT; @@ -96,7 +96,7 @@ kern_status_t set_add_object(object_t *set_obj, object_t *obj) return status; } - queue_foreach (object_t, child, &set->s_list, ob_list) { + queue_foreach (struct object, child, &set->s_list, ob_list) { object_query_name(child, child_name); if (!strcmp(child_name, obj_name)) { @@ -109,7 +109,7 @@ kern_status_t set_add_object(object_t *set_obj, object_t *obj) return KERN_OK; } -kern_status_t set_remove_object(object_t *set_obj, object_t *obj) +kern_status_t set_remove_object(struct object *set_obj, struct object *obj) { if (!object_is_set(set_obj)) { return KERN_INVALID_ARGUMENT; @@ -122,7 +122,7 @@ kern_status_t set_remove_object(object_t *set_obj, object_t *obj) return KERN_OK; } -bool object_is_set(object_t *obj) +bool object_is_set(struct object *obj) { return obj->ob_type == &set_type; } diff --git a/sched/core.c b/sched/core.c index ab95a0c..ccedb98 100644 --- a/sched/core.c +++ b/sched/core.c @@ -26,9 +26,9 @@ kern_status_t sched_init(void) return status; } - thread_t *this_thread = QUEUE_CONTAINER(thread_t, tr_threads, queue_first(&kernel_task()->t_threads)); + struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads)); - cpu_data_t *this_cpu = get_this_cpu(); + struct cpu_data *this_cpu = get_this_cpu(); runqueue_init(&this_cpu->c_rq); this_cpu->c_current_thread = this_thread; put_cpu(this_cpu); diff --git a/sched/runqueue.c b/sched/runqueue.c index a4635fb..a121b9d 100644 --- a/sched/runqueue.c +++ b/sched/runqueue.c @@ -1,7 +1,7 @@ #include #include -void runqueue_init(runqueue_t *rq) +void runqueue_init(struct runqueue *rq) { memset(rq, 0x00, sizeof *rq); rq->rq_lock = SPIN_LOCK_INIT; diff --git a/sched/task.c b/sched/task.c index ce3c950..a7764a9 100644 --- a/sched/task.c +++ b/sched/task.c @@ -4,20 +4,20 @@ #include #include -static object_type_t task_type = { +static struct object_type task_type = { .ob_name = "task", - .ob_size = sizeof(task_t), + .ob_size = sizeof(struct task), }; -static task_t *__kernel_task; +static struct task *__kernel_task; static spin_lock_t task_list_lock; -static btree_t task_list; +static struct btree task_list; -BTREE_DEFINE_SIMPLE_GET(task_t, unsigned int, t_tasklist, t_id, task_list_get) -BTREE_DEFINE_SIMPLE_INSERT(task_t, t_tasklist, t_id, task_list_insert) +BTREE_DEFINE_SIMPLE_GET(struct task, unsigned int, t_tasklist, t_id, task_list_get) +BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert) -task_t *kernel_task(void) +struct task *kernel_task(void) { return __kernel_task; } @@ -35,7 +35,7 @@ kern_status_t setup_kernel_task(void) snprintf(__kernel_task->t_name, sizeof __kernel_task->t_name, "kernel_task"); - thread_t *kernel_thread = thread_alloc(); + struct thread *kernel_thread = thread_alloc(); kernel_thread->tr_id = 0; kernel_thread->tr_prio = PRIO_NORMAL; kernel_thread->tr_state = THREAD_READY; @@ -58,29 +58,29 @@ kern_status_t task_object_type_init(void) return object_type_register(&task_type); } -task_t *task_alloc(void) +struct task *task_alloc(void) { - object_t *task_obj = object_create(&task_type); + struct object *task_obj = object_create(&task_type); if (!task_obj) { return NULL; } - task_t *t = object_data(task_obj); + struct task *t = object_data(task_obj); memset(t, 0x00, sizeof *t); return t; } -task_t *task_from_pid(unsigned int pid) +struct task *task_from_pid(unsigned int pid) { unsigned long flags; spin_lock_irqsave(&task_list_lock, &flags); - task_t *t = task_list_get(&task_list, pid); + struct task *t = task_list_get(&task_list, pid); spin_unlock_irqrestore(&task_list_lock, flags); return t; } -task_t *current_task(void) +struct task *current_task(void) { - thread_t *thr = current_thread(); + struct thread *thr = current_thread(); return thr ? thr->tr_parent : NULL; } diff --git a/sched/thread.c b/sched/thread.c index 342c3c6..d761a6d 100644 --- a/sched/thread.c +++ b/sched/thread.c @@ -2,9 +2,9 @@ #include #include -static object_type_t thread_type = { +static struct object_type thread_type = { .ob_name = "thread", - .ob_size = sizeof(thread_t), + .ob_size = sizeof(struct thread), }; kern_status_t thread_object_type_init(void) @@ -12,31 +12,31 @@ kern_status_t thread_object_type_init(void) return object_type_register(&thread_type); } -thread_t *thread_alloc(void) +struct thread *thread_alloc(void) { - object_t *thread_obj = object_create(&thread_type); + struct object *thread_obj = object_create(&thread_type); if (!thread_obj) { return NULL; } - thread_t *t = object_data(thread_obj); + struct thread *t = object_data(thread_obj); memset(t, 0x00, sizeof *t); return t; } -void thread_free(thread_t *thr) +void thread_free(struct thread *thr) { } -thread_t *current_thread(void) +struct thread *current_thread(void) { - cpu_data_t *cpu = get_this_cpu(); + struct cpu_data *cpu = get_this_cpu(); if (!cpu) { return NULL; } - thread_t *out = cpu->c_current_thread; + struct thread *out = cpu->c_current_thread; put_cpu(cpu); return out; } diff --git a/test/obj.c b/test/obj.c index 90eab44..c8aa450 100644 --- a/test/obj.c +++ b/test/obj.c @@ -7,7 +7,7 @@ struct test_object { char name[OBJECT_NAME_MAX]; }; -static kern_status_t test_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) +static kern_status_t test_query_name(struct object *obj, char out[OBJECT_NAME_MAX]) { struct test_object *test = object_data(obj); strncpy(out, test->name, OBJECT_NAME_MAX); @@ -15,7 +15,7 @@ static kern_status_t test_query_name(object_t *obj, char out[OBJECT_NAME_MAX]) return KERN_OK; } -static object_type_t test_type = { +static struct object_type test_type = { .ob_name = "test", .ob_size = sizeof(struct test_object), .ob_ops = { @@ -23,7 +23,7 @@ static object_type_t test_type = { }, }; -static void print_object_tree(object_t *obj, int depth) +static void print_object_tree(struct object *obj, int depth) { char msg[256] = {0}; int len = 0; @@ -38,7 +38,7 @@ static void print_object_tree(object_t *obj, int depth) len += snprintf(msg + len, sizeof msg - len, "%s", name); printk(msg); - object_t *child = NULL; + struct object *child = NULL; size_t i = 0; while (1) { @@ -57,7 +57,7 @@ static int run_obj_tests(void) { object_type_register(&test_type); - object_t *test_obj = object_create(&test_type); + struct object *test_obj = object_create(&test_type); struct test_object *test = object_data(test_obj); snprintf(test->name, sizeof test->name, "object1"); kern_status_t status = object_publish(global_namespace(), "/misc/objects", test_obj); diff --git a/vm/bootstrap.c b/vm/bootstrap.c index d1feaae..eda1bf2 100644 --- a/vm/bootstrap.c +++ b/vm/bootstrap.c @@ -8,15 +8,15 @@ #include #include -/* One vm_pg_data_t per NUMA node. */ -static vm_pg_data_t *node_data = NULL; +/* One struct vm_pg_data per NUMA node. */ +static struct vm_pg_data *node_data = NULL; -kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones) +kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones) { int numa_count = 1; /* we're only worrying about UMA systems for now */ - node_data = memblock_alloc(sizeof(vm_pg_data_t) * numa_count, 8); + node_data = memblock_alloc(sizeof(struct vm_pg_data) * numa_count, 8); /* TODO select which memory model to use automatically, and add a kernel boot parameter to override the choice */ @@ -41,7 +41,7 @@ kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones) return KERN_OK; } -vm_pg_data_t *vm_pg_data_get(vm_node_id_t node) +struct vm_pg_data *vm_pg_data_get(vm_node_id_t node) { if (node == 0) { return node_data; diff --git a/vm/cache.c b/vm/cache.c index 72dd8b2..9bdacf5 100644 --- a/vm/cache.c +++ b/vm/cache.c @@ -6,15 +6,15 @@ #define FREELIST_END ((unsigned int)-1) -static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) }; +static struct vm_cache cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(struct vm_cache) }; -vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags) +struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags) { if (!VM_CACHE_INITIALISED(&cache_cache)) { vm_cache_init(&cache_cache); } - vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0); + struct vm_cache *new_cache = vm_cache_alloc(&cache_cache, 0); new_cache->c_name = name; new_cache->c_obj_size = objsz; @@ -25,7 +25,7 @@ vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t fla return new_cache; } -void vm_cache_init(vm_cache_t *cache) +void vm_cache_init(struct vm_cache *cache) { cache->c_page_order = VM_PAGE_16K; if (cache->c_obj_size >= 512) { @@ -50,7 +50,7 @@ void vm_cache_init(vm_cache_t *cache) cache->c_stride = space_per_item; if (!(cache->c_flags & VM_CACHE_OFFSLAB)) { - available -= sizeof(vm_slab_t); + available -= sizeof(struct vm_slab); } /* one entry in the freelist per object slot */ @@ -61,7 +61,7 @@ void vm_cache_init(vm_cache_t *cache) cache->c_slabs_partial = QUEUE_INIT; cache->c_slabs_empty = QUEUE_INIT; - cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count); + cache->c_hdr_size = sizeof(struct vm_slab) + (sizeof(unsigned int) * cache->c_obj_count); /* for on-slab caches, c_hdr_size is added to the slab pointer to get the object buffer pointer. by aligning c_hdr_size to the @@ -73,15 +73,15 @@ void vm_cache_init(vm_cache_t *cache) } } -void vm_cache_destroy(vm_cache_t *cache) +void vm_cache_destroy(struct vm_cache *cache) { /* TODO */ } -static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags) +static struct vm_slab *alloc_slab(struct vm_cache *cache, enum vm_flags flags) { - vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags); - vm_slab_t *slab_hdr = NULL; + struct vm_page *slab_page = vm_page_alloc(cache->c_page_order, flags); + struct vm_slab *slab_hdr = NULL; void *slab_data = vm_page_get_vaddr(slab_page); if (cache->c_flags & VM_CACHE_OFFSLAB) { @@ -120,12 +120,12 @@ static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags) return slab_hdr; } -static void __used destroy_slab(vm_slab_t *slab) +static void __used destroy_slab(struct vm_slab *slab) { } -static unsigned int slab_allocate_slot(vm_slab_t *slab) +static unsigned int slab_allocate_slot(struct vm_slab *slab) { if (slab->s_free == FREELIST_END) { return FREELIST_END; @@ -139,7 +139,7 @@ static unsigned int slab_allocate_slot(vm_slab_t *slab) return slot; } -static void slab_free_slot(vm_slab_t *slab, unsigned int slot) +static void slab_free_slot(struct vm_slab *slab, unsigned int slot) { unsigned int next = slab->s_free; unsigned int *freelist = (unsigned int *)(slab + 1); @@ -149,30 +149,30 @@ static void slab_free_slot(vm_slab_t *slab, unsigned int slot) slab->s_obj_allocated--; } -static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot) +static void *slot_to_pointer(struct vm_slab *slab, unsigned int slot) { return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride)); } -static unsigned int pointer_to_slot(vm_slab_t *slab, void *p) +static unsigned int pointer_to_slot(struct vm_slab *slab, void *p) { size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects; return offset / slab->s_cache->c_stride; } -void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags) +void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags) { unsigned long irq_flags; spin_lock_irqsave(&cache->c_lock, &irq_flags); - vm_slab_t *slab = NULL; + struct vm_slab *slab = NULL; if (!queue_empty(&cache->c_slabs_partial)) { /* prefer using up partially-full slabs before taking a fresh one */ - queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_partial); - slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry); + struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_partial); + slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry); } else if (!queue_empty(&cache->c_slabs_empty)) { - queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_empty); - slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry); + struct queue_entry *slab_entry = queue_pop_front(&cache->c_slabs_empty); + slab = QUEUE_CONTAINER(struct vm_slab, s_list, slab_entry); } else { /* we've run out of slabs. create a new one */ slab = alloc_slab(cache, flags); @@ -196,20 +196,20 @@ void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags) return p; } -void vm_cache_free(vm_cache_t *cache, void *p) +void vm_cache_free(struct vm_cache *cache, void *p) { unsigned long irq_flags; spin_lock_irqsave(&cache->c_lock, &irq_flags); phys_addr_t phys = vm_virt_to_phys(p); - vm_page_t *pg = vm_page_get(phys); + struct vm_page *pg = vm_page_get(phys); if (!pg || !pg->p_slab) { spin_unlock_irqrestore(&cache->c_lock, irq_flags); return; } - vm_slab_t *slab = pg->p_slab; + struct vm_slab *slab = pg->p_slab; if (slab->s_cache != cache) { spin_unlock_irqrestore(&cache->c_lock, irq_flags); diff --git a/vm/flat.c b/vm/flat.c index 64be9ca..afaa114 100644 --- a/vm/flat.c +++ b/vm/flat.c @@ -20,7 +20,7 @@ #include /* array of pages, one for each physical page frame present in RAM */ -static vm_page_t *page_array = NULL; +static struct vm_page *page_array = NULL; /* number of pages stored in page_array */ static size_t page_array_count = 0; @@ -30,7 +30,7 @@ void vm_flat_init(void) printk("vm: using flat memory model"); size_t pmem_size = 0; - memblock_iter_t it; + struct memblock_iter it; for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) { if (pmem_size < it.it_limit + 1) { pmem_size = it.it_limit + 1; @@ -42,7 +42,7 @@ void vm_flat_init(void) nr_pages++; } - page_array = memblock_alloc(sizeof(vm_page_t) * nr_pages, 8); + page_array = memblock_alloc(sizeof(struct vm_page) * nr_pages, 8); page_array_count = nr_pages; size_t nr_reserved = nr_pages; @@ -63,13 +63,13 @@ void vm_flat_init(void) printk("vm: page array has %zu pages, %zu reserved", nr_pages, nr_reserved); } -vm_page_t *vm_page_get_flat(phys_addr_t addr) +struct vm_page *vm_page_get_flat(phys_addr_t addr) { size_t pfn = addr / VM_PAGE_SIZE; return pfn < page_array_count ? &page_array[pfn] : NULL; } -size_t vm_page_get_pfn_flat(vm_page_t *pg) +size_t vm_page_get_pfn_flat(struct vm_page *pg) { return ((uintptr_t)pg - (uintptr_t)page_array) / sizeof *pg; } diff --git a/vm/kmalloc.c b/vm/kmalloc.c index fac5593..f534859 100644 --- a/vm/kmalloc.c +++ b/vm/kmalloc.c @@ -10,7 +10,7 @@ static int kmalloc_initialised = 0; /* reserve space for the size-N caches: */ -static vm_cache_t size_n_caches[] = { +static struct vm_cache size_n_caches[] = { SIZE_N_CACHE(16), SIZE_N_CACHE(32), SIZE_N_CACHE(48), @@ -40,7 +40,7 @@ void kmalloc_init(void) kmalloc_initialised = 1; } -void *kmalloc(size_t count, vm_flags_t flags) +void *kmalloc(size_t count, enum vm_flags flags) { if (!count) { return NULL; @@ -58,7 +58,7 @@ void *kmalloc(size_t count, vm_flags_t flags) return memblock_alloc(count, align); } - vm_cache_t *best_fit = NULL; + struct vm_cache *best_fit = NULL; for (unsigned int i = 0; i < nr_size_n_caches; i++) { if (size_n_caches[i].c_obj_size >= count) { best_fit = &size_n_caches[i]; @@ -73,7 +73,7 @@ void *kmalloc(size_t count, vm_flags_t flags) return vm_cache_alloc(best_fit, flags); } -void *kzalloc(size_t count, vm_flags_t flags) +void *kzalloc(size_t count, enum vm_flags flags) { void *p = kmalloc(count, flags); if (p) { @@ -92,7 +92,7 @@ void kfree(void *p) } phys_addr_t phys = vm_virt_to_phys(p); - vm_page_t *pg = vm_page_get(phys); + struct vm_page *pg = vm_page_get(phys); if (!pg || !pg->p_slab) { return; } diff --git a/vm/memblock.c b/vm/memblock.c index 14f33ba..e92a4b7 100644 --- a/vm/memblock.c +++ b/vm/memblock.c @@ -38,12 +38,12 @@ be bounded by the defined memory regions, and not by this constant. */ #define ADDR_MAX (~(uintptr_t)0) -static memblock_region_t init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT]; -static memblock_region_t init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT]; +static struct memblock_region init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT]; +static struct memblock_region init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT]; static phys_addr_t do_alloc(size_t size, phys_addr_t align); -memblock_t memblock = { +struct memblock memblock = { .memory.regions = init_memory_regions, .memory.count = 0, .memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT, @@ -55,33 +55,33 @@ memblock_t memblock = { .reserved.name = "reserved", }; -static void memblock_double_capacity(memblock_type_t *type) +static void memblock_double_capacity(struct memblock_type *type) { size_t new_max = type->max * 2; - phys_addr_t new_regions_p = do_alloc(new_max * sizeof(memblock_region_t), 8); + phys_addr_t new_regions_p = do_alloc(new_max * sizeof(struct memblock_region), 8); void *new_regions = (void *)(new_regions_p + memblock.m_voffset); - memcpy(new_regions, type->regions, type->count * sizeof(memblock_region_t)); + memcpy(new_regions, type->regions, type->count * sizeof(struct memblock_region)); type->regions = new_regions; type->max = new_max; } -static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_add) +static int memblock_insert_region(struct memblock_type *type, struct memblock_region *to_add) { unsigned int i = 0; for (i = 0; i < type->count; i++) { - const memblock_region_t *cur = &type->regions[i]; + const struct memblock_region *cur = &type->regions[i]; if (cur->base >= to_add->limit) { break; } } - memblock_region_t *src = &type->regions[i]; - memblock_region_t *dst = &type->regions[i + 1]; + struct memblock_region *src = &type->regions[i]; + struct memblock_region *dst = &type->regions[i + 1]; unsigned int count = type->count - i; memmove(dst, src, count * sizeof *src); @@ -92,14 +92,14 @@ static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_a return 0; } -static int memblock_remove_region(memblock_type_t *type, unsigned int i) +static int memblock_remove_region(struct memblock_type *type, unsigned int i) { if (i >= type->count) { return -1; } - memblock_region_t *src = &type->regions[i + 1]; - memblock_region_t *dst = &type->regions[i]; + struct memblock_region *src = &type->regions[i + 1]; + struct memblock_region *dst = &type->regions[i]; unsigned int count = type->count - i; memmove(dst, src, count * sizeof *src); @@ -116,7 +116,7 @@ int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset) return 0; } -int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, memblock_region_status_t status) +int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, enum memblock_region_status status) { if (size == 0) { return 0; @@ -131,12 +131,12 @@ int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, membl return 0; } - memblock_region_t new_region = { .base = base, .limit = limit, .status = status }; + struct memblock_region new_region = { .base = base, .limit = limit, .status = status }; /* two regions with different statuses CANNOT intersect. we first need to check to make sure the region being added doesn't violate this rule. */ for (unsigned int i = 0; i < type->count; i++) { - memblock_region_t *cur_region = &type->regions[i]; + struct memblock_region *cur_region = &type->regions[i]; if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) { continue; @@ -152,7 +152,7 @@ int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, membl bool add_new = true; for (unsigned int i = 0; i < type->count; i++) { - memblock_region_t *cur_region = &type->regions[i]; + struct memblock_region *cur_region = &type->regions[i]; /* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */ if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) { @@ -244,7 +244,7 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align) phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset; phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset; - memblock_iter_t it; + struct memblock_iter it; for_each_free_mem_range (&it, region_start, region_end) { phys_addr_t base = it.it_base; if (base & (align - 1)) { @@ -306,13 +306,13 @@ int memblock_free_phys(phys_addr_t addr, size_t size) return 0; } -void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock_type_t *type_b, uintptr_t start, uintptr_t end) +void __next_memory_region(struct memblock_iter *it, struct memblock_type *type_a, struct memblock_type *type_b, uintptr_t start, uintptr_t end) { unsigned int idx_a = IDX_A(it->__idx); unsigned int idx_b = IDX_B(it->__idx); for (; idx_a < type_a->count; idx_a++) { - memblock_region_t *m = &type_a->regions[idx_a]; + struct memblock_region *m = &type_a->regions[idx_a]; uintptr_t m_start = m->base; uintptr_t m_end = m->limit; @@ -337,7 +337,7 @@ void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock } for (; idx_b < type_b->count + 1; idx_b++) { - memblock_region_t *r = &type_b->regions[idx_b]; + struct memblock_region *r = &type_b->regions[idx_b]; /* r_start and r_end delimit the region of memory between the current and previous reserved regions. if we have gone past the last reserved region, these variables delimit the range between the end diff --git a/vm/model.c b/vm/model.c index ed379dc..282d704 100644 --- a/vm/model.c +++ b/vm/model.c @@ -1,13 +1,13 @@ #include -static vm_model_t model; +static enum vm_model model; -vm_model_t vm_memory_model(void) +enum vm_model vm_memory_model(void) { return model; } -void vm_set_memory_model(vm_model_t m) +void vm_set_memory_model(enum vm_model m) { model = m; } diff --git a/vm/page.c b/vm/page.c index be884a5..fd33542 100644 --- a/vm/page.c +++ b/vm/page.c @@ -24,7 +24,7 @@ static size_t page_order_bytes[] = { [VM_PAGE_128M] = 0x8000000, /* vm can support pages of this size, but - vm_page_t only has 4 bits with which to store + struct vm_page only has 4 bits with which to store the page order, which cannot accomodate these larger order numbers */ [VM_PAGE_256M] = 0x10000000, @@ -63,7 +63,7 @@ void *vm_phys_to_virt(phys_addr_t p) return (void *)(VM_PAGEMAP_BASE + p); } -vm_page_t *vm_page_get(phys_addr_t addr) +struct vm_page *vm_page_get(phys_addr_t addr) { switch (vm_memory_model()) { case VM_MODEL_FLAT: @@ -75,17 +75,17 @@ vm_page_t *vm_page_get(phys_addr_t addr) } } -phys_addr_t vm_page_get_paddr(vm_page_t *pg) +phys_addr_t vm_page_get_paddr(struct vm_page *pg) { return vm_page_get_pfn(pg) * VM_PAGE_SIZE; } -void *vm_page_get_vaddr(vm_page_t *pg) +void *vm_page_get_vaddr(struct vm_page *pg) { return (void *)(vm_phys_to_virt(vm_page_get_pfn(pg) * VM_PAGE_SIZE)); } -size_t vm_page_get_pfn(vm_page_t *pg) +size_t vm_page_get_pfn(struct vm_page *pg) { switch (vm_memory_model()) { case VM_MODEL_FLAT: @@ -97,7 +97,7 @@ size_t vm_page_get_pfn(vm_page_t *pg) } } -size_t vm_page_order_to_bytes(vm_page_order_t order) +size_t vm_page_order_to_bytes(enum vm_page_order order) { if (order < VM_PAGE_4K || order > VM_PAGE_64G) { return 0; @@ -106,7 +106,7 @@ size_t vm_page_order_to_bytes(vm_page_order_t order) return page_order_bytes[order]; } -phys_addr_t vm_page_order_to_pages(vm_page_order_t order) +phys_addr_t vm_page_order_to_pages(enum vm_page_order order) { if (order < VM_PAGE_4K || order > VM_PAGE_64G) { return 0; @@ -115,7 +115,7 @@ phys_addr_t vm_page_order_to_pages(vm_page_order_t order) return page_order_bytes[order] >> VM_PAGE_SHIFT; } -vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order) +vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order) { if (order < 0 || order > VM_PAGE_MAX_ORDER) { return 0; @@ -136,9 +136,9 @@ size_t vm_bytes_to_pages(size_t bytes) return bytes; } -vm_zone_t *vm_page_get_zone(vm_page_t *pg) +struct vm_zone *vm_page_get_zone(struct vm_page *pg) { - vm_pg_data_t *node = vm_pg_data_get(pg->p_node); + struct vm_pg_data *node = vm_pg_data_get(pg->p_node); if (!node) { return 0; } @@ -151,19 +151,19 @@ vm_zone_t *vm_page_get_zone(vm_page_t *pg) } -vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags) +struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags) { /* TODO prefer nodes closer to us */ - vm_pg_data_t *node = vm_pg_data_get(0); - vm_zone_id_t zone_id = VM_ZONE_HIGHMEM; + struct vm_pg_data *node = vm_pg_data_get(0); + enum vm_zone_id zone_id = VM_ZONE_HIGHMEM; if (flags & VM_GET_DMA) { zone_id = VM_ZONE_DMA; } while (1) { - vm_zone_t *z = &node->pg_zones[zone_id]; + struct vm_zone *z = &node->pg_zones[zone_id]; - vm_page_t *pg = vm_zone_alloc_page(z, order, flags); + struct vm_page *pg = vm_zone_alloc_page(z, order, flags); if (pg) { return pg; } @@ -178,9 +178,9 @@ vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags) return NULL; } -void vm_page_free(vm_page_t *pg) +void vm_page_free(struct vm_page *pg) { - vm_zone_t *z = vm_page_get_zone(pg); + struct vm_zone *z = vm_page_get_zone(pg); if (!z) { return; } @@ -188,7 +188,7 @@ void vm_page_free(vm_page_t *pg) vm_zone_free_page(z, pg); } -int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b) +int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b) { if (pg->p_order == VM_PAGE_MIN_ORDER) { return -1; @@ -202,7 +202,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b) pg[i].p_order--; } - vm_page_t *buddy = vm_page_get_buddy(pg); + struct vm_page *buddy = vm_page_get_buddy(pg); if (pg->p_order == VM_PAGE_MIN_ORDER) { pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD); @@ -218,7 +218,7 @@ int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b) return 0; } -vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b) +struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b) { if (a->p_order != b->p_order) { return NULL; @@ -238,7 +238,7 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b) /* make sure that a comes before b */ if (a > b) { - vm_page_t *tmp = a; + struct vm_page *tmp = a; a = b; b = tmp; } @@ -260,16 +260,16 @@ vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b) return a; } -vm_page_t *vm_page_get_buddy(vm_page_t *pg) +struct vm_page *vm_page_get_buddy(struct vm_page *pg) { phys_addr_t paddr = vm_page_get_paddr(pg); paddr = paddr ^ vm_page_order_to_bytes(pg->p_order); return vm_page_get(paddr); } -vm_page_t *vm_page_get_next_tail(vm_page_t *pg) +struct vm_page *vm_page_get_next_tail(struct vm_page *pg) { - vm_page_t *next = pg + 1; + struct vm_page *next = pg + 1; if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) { return NULL; } diff --git a/vm/sparse.c b/vm/sparse.c index 00c32ea..1944a95 100644 --- a/vm/sparse.c +++ b/vm/sparse.c @@ -28,10 +28,10 @@ #include #include -static vm_sector_t *sector_array = NULL; +static struct vm_sector *sector_array = NULL; static size_t sector_array_count = 0; -static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index) +static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index) { /* all sectors have the same size */ size_t step = vm_page_order_to_bytes(sector_array[0].s_size); @@ -52,16 +52,16 @@ static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sect return §or_array[sector]; } -static vm_page_t *get_or_create_page(phys_addr_t addr) +static struct vm_page *get_or_create_page(phys_addr_t addr) { size_t sector_number, page_number; phys_addr_to_sector_and_index(addr, §or_number, &page_number); - vm_sector_t *sector = §or_array[sector_number]; + struct vm_sector *sector = §or_array[sector_number]; if (!sector->s_pages) { size_t nr_pages = vm_page_order_to_pages(sector->s_size); - sector->s_pages = kzalloc(nr_pages * sizeof(vm_page_t), 0); + sector->s_pages = kzalloc(nr_pages * sizeof(struct vm_page), 0); for (size_t i = 0; i < nr_pages; i++) { sector->s_pages[i].p_flags = VM_PAGE_RESERVED; @@ -73,9 +73,9 @@ static vm_page_t *get_or_create_page(phys_addr_t addr) return §or->s_pages[page_number]; } -static vm_page_order_t find_minimum_sector_size(size_t pmem_size) +static enum vm_page_order find_minimum_sector_size(size_t pmem_size) { - for (vm_page_order_t i = VM_PAGE_4K; i < VM_PAGE_64G; i++) { + for (enum vm_page_order i = VM_PAGE_4K; i < VM_PAGE_64G; i++) { size_t order_bytes = vm_page_order_to_bytes(i); if (order_bytes * VM_MAX_SECTORS >= pmem_size) { return i; @@ -93,12 +93,12 @@ static vm_page_order_t find_minimum_sector_size(size_t pmem_size) this function uses some heuristics and thresholds that are untested and are in need of improvement to ensure that sparse works well on a wide range of systems. */ -static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, vm_page_order_t *out_sector_size) +static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, enum vm_page_order *out_sector_size) { /* we can support up to VM_MAX_SECTORS memory sectors. the minimum sector size is what ever is required to cover all of physical memory in the maximum number of sectors */ - vm_page_order_t sector_size = find_minimum_sector_size(pmem_size); + enum vm_page_order sector_size = find_minimum_sector_size(pmem_size); if (sector_size <= VM_PAGE_2M) { /* override really small sector sizes with something @@ -148,7 +148,7 @@ void vm_sparse_init(void) { size_t pmem_size = 0, reserved_size = 0; - memblock_iter_t it; + struct memblock_iter it; for_each_mem_range (&it, 0x0, UINTPTR_MAX) { if (pmem_size < it.it_limit + 1) { pmem_size = it.it_limit + 1; @@ -159,7 +159,7 @@ void vm_sparse_init(void) reserved_size += it.it_limit - it.it_base + 1; } - vm_page_order_t sector_size; + enum vm_page_order sector_size; size_t sector_bytes = 0; unsigned int nr_sectors = 0; calculate_sector_size_and_count(pmem_size, reserved_size, &nr_sectors, §or_size); @@ -168,7 +168,7 @@ void vm_sparse_init(void) char sector_size_str[64]; data_size_to_string(sector_bytes, sector_size_str, sizeof sector_size_str); - sector_array = kzalloc(sizeof(vm_sector_t) * nr_sectors, 0); + sector_array = kzalloc(sizeof(struct vm_sector) * nr_sectors, 0); sector_array_count = nr_sectors; for (unsigned int i = 0; i < nr_sectors; i++) { @@ -186,7 +186,7 @@ void vm_sparse_init(void) } for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) { - vm_page_t *pg = get_or_create_page(i); + struct vm_page *pg = get_or_create_page(i); pg->p_flags = 0; } } @@ -198,7 +198,7 @@ void vm_sparse_init(void) } for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) { - vm_page_t *pg = vm_page_get(i); + struct vm_page *pg = vm_page_get(i); if (!pg) { /* if the page doesn't exist, it is part of a sector @@ -214,7 +214,7 @@ void vm_sparse_init(void) printk("vm: [sparse] initialised %zu sectors of size %s", nr_sectors, sector_size_str); } -vm_page_t *vm_page_get_sparse(phys_addr_t addr) +struct vm_page *vm_page_get_sparse(phys_addr_t addr) { size_t sector_number, page_number; phys_addr_to_sector_and_index(addr, §or_number, &page_number); @@ -222,7 +222,7 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr) return NULL; } - vm_sector_t *sector = §or_array[sector_number]; + struct vm_sector *sector = §or_array[sector_number]; if (!sector->s_pages || page_number >= vm_page_order_to_pages(sector->s_size)) { return NULL; @@ -231,8 +231,8 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr) return §or->s_pages[page_number]; } -size_t vm_page_get_pfn_sparse(vm_page_t *pg) +size_t vm_page_get_pfn_sparse(struct vm_page *pg) { - vm_sector_t *sector = §or_array[pg->p_sector]; + struct vm_sector *sector = §or_array[pg->p_sector]; return sector->s_first_pfn + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg); } diff --git a/vm/zone.c b/vm/zone.c index 54679fc..3722bd8 100644 --- a/vm/zone.c +++ b/vm/zone.c @@ -8,11 +8,11 @@ #include #include -static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_addr_t limit, int order) +static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t base, phys_addr_t limit, int order) { - vm_page_t *first_page = NULL; + struct vm_page *first_page = NULL; for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) { - vm_page_t *pg = vm_page_get(i); + struct vm_page *pg = vm_page_get(i); if (!pg) { continue; } @@ -37,7 +37,7 @@ static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_ad return first_page; } -static void convert_region_to_blocks(vm_zone_t *zone, +static void convert_region_to_blocks(struct vm_zone *zone, phys_addr_t base, phys_addr_t limit, int reserved) { @@ -60,7 +60,7 @@ static void convert_region_to_blocks(vm_zone_t *zone, } phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1; - vm_page_t *block_page = group_pages_into_block(zone, base, block_limit, order); + struct vm_page *block_page = group_pages_into_block(zone, base, block_limit, order); if (reserved == 0) { queue_push_back(&zone->z_free_pages[order], &block_page->p_list); @@ -80,13 +80,13 @@ static void convert_region_to_blocks(vm_zone_t *zone, } } -static size_t zone_free_bytes(vm_zone_t *z) +static size_t zone_free_bytes(struct vm_zone *z) { size_t free_bytes = 0; - for (vm_page_order_t i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) { + for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) { size_t page_bytes = vm_page_order_to_bytes(i); size_t nr_pages = 0; - queue_foreach (vm_page_t, pg, &z->z_free_pages[i], p_list) { + queue_foreach (struct vm_page, pg, &z->z_free_pages[i], p_list) { free_bytes += page_bytes; nr_pages++; } @@ -95,7 +95,7 @@ static size_t zone_free_bytes(vm_zone_t *z) return free_bytes; } -void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) +void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info) { memset(z, 0x0, sizeof *z); memcpy(&z->z_info, zone_info, sizeof *zone_info); @@ -108,7 +108,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) int this_page_reserved = 0, last_page_reserved = -1; phys_addr_t plimit = 0; - memblock_iter_t it; + struct memblock_iter it; for_each_mem_range (&it, 0x00, UINTPTR_MAX) { if (it.it_limit + 1 > plimit) { plimit = it.it_limit + 1; @@ -121,7 +121,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) size_t nr_pages_found = 0; for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) { - vm_page_t *pg = vm_page_get(i); + struct vm_page *pg = vm_page_get(i); if (pg) { nr_pages_found++; @@ -162,7 +162,7 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info) printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str); } -static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) +static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order) { if (!queue_empty(&z->z_free_pages[order])) { /* we already have pages available. */ @@ -175,9 +175,9 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) } /* the lowest page order that is >= `order` and still has pages available */ - vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS; + enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS; - for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) { + for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) { if (!queue_empty(&z->z_free_pages[i])) { first_order_with_free = i; break; @@ -197,11 +197,11 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) /* starting from the first page list with free pages, take a page, split it in half, and add the sub-pages to the next order's free list. */ - for (vm_page_order_t i = first_order_with_free; i > order; i--) { - queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[i]); - vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry); + for (enum vm_page_order i = first_order_with_free; i > order; i--) { + struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[i]); + struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry); - vm_page_t *a, *b; + struct vm_page *a, *b; vm_page_split(pg, &a, &b); queue_push_back(&z->z_free_pages[i - 1], &a->p_list); @@ -211,7 +211,7 @@ static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order) return 0; } -vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags) +struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags) { unsigned long irq_flags; spin_lock_irqsave(&z->z_lock, &irq_flags); @@ -222,8 +222,8 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl return NULL; } - queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]); - vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry); + struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]); + struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry); vm_page_foreach (pg, i) { i->p_flags |= VM_PAGE_ALLOC; } @@ -232,7 +232,7 @@ vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t fl return pg; } -void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg) +void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg) { unsigned long irq_flags; spin_lock_irqsave(&z->z_lock, &irq_flags); @@ -241,8 +241,8 @@ void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg) queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list); while (1) { - vm_page_t *buddy = vm_page_get_buddy(pg); - vm_page_t *huge = vm_page_merge(pg, buddy); + struct vm_page *buddy = vm_page_get_buddy(pg); + struct vm_page *huge = vm_page_merge(pg, buddy); if (!huge) { break; }