kernel: don't use typedef for enums or non-opaque structs

This commit is contained in:
2023-04-12 20:17:11 +01:00
parent 0d75e347e9
commit b6f8c1ccaa
51 changed files with 663 additions and 665 deletions

View File

@@ -27,34 +27,34 @@ extern "C" {
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define vm_page_foreach(pg, i) \
for (vm_page_t *i = (pg); i; i = vm_page_get_next_tail(i))
for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i))
typedef phys_addr_t vm_alignment_t;
typedef unsigned int vm_node_id_t;
typedef struct vm_object {
struct vm_object {
unsigned int reserved;
} vm_object_t;
};
typedef enum vm_model {
enum vm_model {
VM_MODEL_FLAT = 1,
VM_MODEL_SPARSE,
} vm_model_t;
};
typedef enum vm_prot {
enum vm_prot {
VM_PROT_READ = 0x01u,
VM_PROT_WRITE = 0x02u,
VM_PROT_EXEC = 0x04u,
VM_PROT_USER = 0x08u,
VM_PROT_SVR = 0x10u,
} vm_prot_t;
};
typedef enum vm_flags {
enum vm_flags {
VM_NORMAL = 0x00u,
VM_GET_DMA = 0x01u,
} vm_flags_t;
};
typedef enum vm_zone_id {
enum vm_zone_id {
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
they need to be continuous, and must start at 0! */
VM_ZONE_DMA = 0u,
@@ -62,9 +62,9 @@ typedef enum vm_zone_id {
VM_ZONE_HIGHMEM = 2u,
VM_ZONE_MIN = VM_ZONE_DMA,
VM_ZONE_MAX = VM_ZONE_HIGHMEM,
} vm_zone_id_t;
};
typedef enum vm_page_order {
enum vm_page_order {
VM_PAGE_4K = 0u,
VM_PAGE_8K,
VM_PAGE_16K,
@@ -82,7 +82,7 @@ typedef enum vm_page_order {
VM_PAGE_64M,
VM_PAGE_128M,
/* vm_page_t only has 4 bits to store the page order with.
/* struct vm_page only has 4 bits to store the page order with.
the maximum order that can be stored in 4 bits is 15 (VM_PAGE_128M)
to use any of the page orders listed here, this field
will have to be expanded. */
@@ -95,9 +95,9 @@ typedef enum vm_page_order {
VM_PAGE_16G,
VM_PAGE_32G,
VM_PAGE_64G,
} vm_page_order_t;
};
typedef enum vm_page_flags {
enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
@@ -107,52 +107,52 @@ typedef enum vm_page_flags {
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
} vm_page_flags_t;
};
typedef enum vm_memory_region_status {
enum vm_memory_region_status {
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
} vm_memory_region_status_t;
};
typedef enum vm_cache_flags {
enum vm_cache_flags {
VM_CACHE_OFFSLAB = 0x01u,
VM_CACHE_DMA = 0x02u
} vm_cache_flags_t;
};
typedef struct vm_zone_descriptor {
vm_zone_id_t zd_id;
struct vm_zone_descriptor {
enum vm_zone_id zd_id;
vm_node_id_t zd_node;
const char zd_name[32];
phys_addr_t zd_base;
phys_addr_t zd_limit;
} vm_zone_descriptor_t;
};
typedef struct vm_zone {
vm_zone_descriptor_t z_info;
struct vm_zone {
struct vm_zone_descriptor z_info;
spin_lock_t z_lock;
queue_t z_free_pages[VM_MAX_PAGE_ORDERS];
struct queue z_free_pages[VM_MAX_PAGE_ORDERS];
unsigned long z_size;
} vm_zone_t;
};
typedef struct vm_pg_data {
vm_zone_t pg_zones[VM_MAX_ZONES];
} vm_pg_data_t;
struct vm_pg_data {
struct vm_zone pg_zones[VM_MAX_ZONES];
};
typedef struct vm_region {
vm_memory_region_status_t r_status;
struct vm_region {
enum vm_memory_region_status r_status;
phys_addr_t r_base;
phys_addr_t r_limit;
} vm_region_t;
};
typedef struct vm_cache {
struct vm_cache {
const char *c_name;
vm_cache_flags_t c_flags;
queue_entry_t c_list;
enum vm_cache_flags c_flags;
struct queue_entry c_list;
queue_t c_slabs_full;
queue_t c_slabs_partial;
queue_t c_slabs_empty;
struct queue c_slabs_full;
struct queue c_slabs_partial;
struct queue c_slabs_empty;
spin_lock_t c_lock;
@@ -160,7 +160,7 @@ typedef struct vm_cache {
unsigned int c_obj_count;
/* the size of object kept in the cache */
unsigned int c_obj_size;
/* combined size of vm_slab_t and the freelist */
/* combined size of struct vm_slab and the freelist */
unsigned int c_hdr_size;
/* power of 2 alignment for objects returned from the cache */
unsigned int c_align;
@@ -170,12 +170,12 @@ typedef struct vm_cache {
unsigned int c_stride;
/* size of page used for slabs */
unsigned int c_page_order;
} vm_cache_t;
};
typedef struct vm_slab {
vm_cache_t *s_cache;
/* queue entry for vm_cache_t.c_slabs_* */
queue_entry_t s_list;
struct vm_slab {
struct vm_cache *s_cache;
/* queue entry for struct vm_cache.c_slabs_* */
struct queue_entry s_list;
/* pointer to the first object slot. */
void *s_objects;
/* the number of objects allocated on the slab. */
@@ -193,9 +193,9 @@ typedef struct vm_slab {
this is commented as it as flexible arrays are not supported in c++.
*/
//unsigned int s_freelist[];
} vm_slab_t;
};
typedef struct vm_page {
struct vm_page {
/* order of the page block that this page belongs too */
uint32_t p_order : 4;
/* the id of the NUMA node that this page belongs to */
@@ -214,82 +214,82 @@ typedef struct vm_page {
some examples:
- the buddy allocator uses this to maintain its per-zone free-page lists.
*/
queue_entry_t p_list;
struct queue_entry p_list;
/* owner-specific data */
union {
vm_slab_t *p_slab;
struct vm_slab *p_slab;
};
} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t;
} __attribute__((aligned(2 * sizeof(unsigned long))));
/* represents a sector of memory, containing its own array of vm_pages.
this struct is used under the sparse memory model, instead of the
global vm_page array */
typedef struct vm_sector {
struct vm_sector {
/* sector size. this must be a power of 2.
all sectors in the system have the same size. */
vm_page_order_t s_size;
enum vm_page_order s_size;
/* PFN of the first page contained in s_pages.
to find the PFN of any page contained within s_pages,
simply add its offset within the array to s_first_pfn */
size_t s_first_pfn;
/* array of pages contained in this sector */
vm_page_t *s_pages;
} vm_sector_t;
struct vm_page *s_pages;
};
extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones);
extern vm_model_t vm_memory_model(void);
extern void vm_set_memory_model(vm_model_t model);
extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones);
extern enum vm_model vm_memory_model(void);
extern void vm_set_memory_model(enum vm_model model);
extern vm_pg_data_t *vm_pg_data_get(vm_node_id_t node);
extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node);
extern phys_addr_t vm_virt_to_phys(void *p);
extern void *vm_phys_to_virt(phys_addr_t p);
extern void vm_page_init_array();
extern vm_page_t *vm_page_get(phys_addr_t addr);
extern phys_addr_t vm_page_get_paddr(vm_page_t *pg);
extern vm_zone_t *vm_page_get_zone(vm_page_t *pg);
extern void *vm_page_get_vaddr(vm_page_t *pg);
extern size_t vm_page_get_pfn(vm_page_t *pg);
extern size_t vm_page_order_to_bytes(vm_page_order_t order);
extern size_t vm_page_order_to_pages(vm_page_order_t order);
extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order);
extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags);
extern void vm_page_free(vm_page_t *pg);
extern struct vm_page *vm_page_get(phys_addr_t addr);
extern phys_addr_t vm_page_get_paddr(struct vm_page *pg);
extern struct vm_zone *vm_page_get_zone(struct vm_page *pg);
extern void *vm_page_get_vaddr(struct vm_page *pg);
extern size_t vm_page_get_pfn(struct vm_page *pg);
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags);
extern void vm_page_free(struct vm_page *pg);
extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b);
extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b);
extern vm_page_t *vm_page_get_buddy(vm_page_t *pg);
extern vm_page_t *vm_page_get_next_tail(vm_page_t *pg);
extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b);
extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b);
extern struct vm_page *vm_page_get_buddy(struct vm_page *pg);
extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg);
extern size_t vm_bytes_to_pages(size_t bytes);
extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags);
extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg);
extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags);
extern void vm_cache_init(vm_cache_t *cache);
extern void vm_cache_destroy(vm_cache_t *cache);
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
extern void vm_cache_free(vm_cache_t *cache, void *p);
extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags);
extern void vm_cache_init(struct vm_cache *cache);
extern void vm_cache_destroy(struct vm_cache *cache);
extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags);
extern void vm_cache_free(struct vm_cache *cache, void *p);
extern void kmalloc_init(void);
extern void *kmalloc(size_t count, vm_flags_t flags);
extern void *kzalloc(size_t count, vm_flags_t flags);
extern void *kmalloc(size_t count, enum vm_flags flags);
extern void *kzalloc(size_t count, enum vm_flags flags);
extern void kfree(void *p);
/* Flat memory model functions */
extern void vm_flat_init(void);
extern vm_page_t *vm_page_get_flat(phys_addr_t addr);
extern size_t vm_page_get_pfn_flat(vm_page_t *pg);
extern struct vm_page *vm_page_get_flat(phys_addr_t addr);
extern size_t vm_page_get_pfn_flat(struct vm_page *pg);
/* Sparse memory model functions */
extern void vm_sparse_init(void);
extern vm_page_t *vm_page_get_sparse(phys_addr_t addr);
extern size_t vm_page_get_pfn_sparse(vm_page_t *pg);
extern struct vm_page *vm_page_get_sparse(phys_addr_t addr);
extern size_t vm_page_get_pfn_sparse(struct vm_page *pg);
#ifdef __cplusplus
}