diff --git a/arch/user/include/mango/machine/init.h b/arch/user/include/mango/machine/init.h index fef3e9e..cf16b97 100644 --- a/arch/user/include/mango/machine/init.h +++ b/arch/user/include/mango/machine/init.h @@ -8,16 +8,18 @@ extern "C" { #endif #define __X2(x) #x -#define __X(x) __X2(x) +#define __X(x) __X2(x) #ifdef __APPLE__ -#define __define_initcall(fn, id) \ - static initcall_t __initcall_##fn##id __used \ - __section("__DATA,__initcall" __X(id) ".init") = (fn) +#define __define_initcall(fn, id) \ + static initcall_t __initcall_##fn##id __used __section( \ + "__DATA,__initcall" __X(id) ".init") \ + = (fn) #else -#define __define_initcall(fn, id) \ - static initcall_t __initcall_##fn##id __used \ - __section("initcall" __X(id) "_init") = (fn) +#define __define_initcall(fn, id) \ + static initcall_t __initcall_##fn##id __used __section( \ + "initcall" __X(id) "_init") \ + = (fn) #endif extern int ml_init(uintptr_t arg); diff --git a/ds/bitmap.c b/ds/bitmap.c index e1ffe46..064d10c 100644 --- a/ds/bitmap.c +++ b/ds/bitmap.c @@ -1,5 +1,5 @@ -#include #include +#include void bitmap_zero(unsigned long *map, unsigned long nbits) { @@ -38,7 +38,6 @@ bool bitmap_check(unsigned long *map, unsigned long bit) unsigned long mask = 1ul << offset; return (map[index] & mask) != 0 ? true : false; - } unsigned int bitmap_count_set(unsigned long *map, unsigned long nbits) diff --git a/include/mango/memblock.h b/include/mango/memblock.h index a840b4b..68b7a13 100644 --- a/include/mango/memblock.h +++ b/include/mango/memblock.h @@ -22,21 +22,22 @@ #ifndef MANGO_MEMBLOCK_H_ #define MANGO_MEMBLOCK_H_ -#include #include #include +#include #ifdef __cplusplus extern "C" { #endif -#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128 +#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128 #define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128 -#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \ - for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \ - (i)->__idx != ULLONG_MAX; \ - __next_memory_region(i, type_a, type_b, p_start, p_end)) +#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \ + for ((i)->__idx = 0, \ + __next_memory_region(i, type_a, type_b, p_start, p_end); \ + (i)->__idx != ULLONG_MAX; \ + __next_memory_region(i, type_a, type_b, p_start, p_end)) /* iterate through all memory regions known to memblock. @@ -47,7 +48,7 @@ extern "C" { @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about - the current memory region. + the current memory region. @param p_start the lower bound of the memory region to iterate through. if you don't want to use a lower bound, pass 0. @param p_end the upper bound of the memory region to iterate through. @@ -65,7 +66,7 @@ extern "C" { struct memblock_iter it; for_each_mem_region (&it, 0x40000, 0x80000) { ... } */ -#define for_each_mem_range(i, p_start, p_end) \ +#define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, p_start, p_end) /* iterate through all memory regions reserved using memblock. @@ -77,7 +78,7 @@ extern "C" { @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about - the current memory region. + the current memory region. @param p_start the lower bound of the memory region to iterate through. if you don't want to use a lower bound, pass 0. @param p_end the upper bound of the memory region to iterate through. @@ -95,7 +96,7 @@ extern "C" { struct memblock_iter it; for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... } */ -#define for_each_reserved_mem_range(i, p_start, p_end) \ +#define for_each_reserved_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.reserved, NULL, p_start, p_end) /* iterate through all memory regions known by memblock to be free. @@ -108,7 +109,7 @@ extern "C" { @param i the iterator. this should be a pointer of type struct memblock_iter. for each iteration, this structure will be filled with details about - the current memory region. + the current memory region. @param p_start the lower bound of the memory region to iterate through. if you don't want to use a lower bound, pass 0. @param p_end the upper bound of the memory region to iterate through. @@ -138,19 +139,25 @@ extern "C" { - 0x08000 -> 0x08fff - 0x10000 -> 0x1ffff */ -#define for_each_free_mem_range(i, p_start, p_end) \ - __for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end) +#define for_each_free_mem_range(i, p_start, p_end) \ + __for_each_mem_range( \ + i, \ + &memblock.memory, \ + &memblock.reserved, \ + p_start, \ + p_end) typedef uint64_t memblock_index_t; enum memblock_region_status { - /* Used in memblock.memory regions, indicates that the memory region exists */ + /* Used in memblock.memory regions, indicates that the memory region + * exists */ MEMBLOCK_MEMORY = 0, - /* Used in memblock.reserved regions, indicates that the memory region was reserved - * by a call to memblock_alloc() */ + /* Used in memblock.reserved regions, indicates that the memory region + * was reserved by a call to memblock_alloc() */ MEMBLOCK_ALLOC, - /* Used in memblock.reserved regions, indicates that the memory region was reserved - * by a call to memblock_reserve() */ + /* Used in memblock.reserved regions, indicates that the memory region + * was reserved by a call to memblock_reserve() */ MEMBLOCK_RESERVED, }; @@ -176,9 +183,10 @@ struct memblock { /* bounds of the memory region that can be used by memblock_alloc() both of these are virtual addresses */ uintptr_t m_alloc_start, m_alloc_end; - /* memblock assumes that all memory in the alloc zone is contiguously mapped - (if paging is enabled). m_voffset is the offset that needs to be added to - a given physical address to get the corresponding virtual address */ + /* memblock assumes that all memory in the alloc zone is contiguously + mapped (if paging is enabled). m_voffset is the offset that needs to + be added to a given physical address to get the corresponding virtual + address */ uintptr_t m_voffset; struct memblock_type memory; @@ -212,7 +220,10 @@ extern int __next_mem_range(struct memblock_iter *it); @param voffset the offset between the physical address of a given page and its corresponding virtual address. */ -extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset); +extern int memblock_init( + uintptr_t alloc_start, + uintptr_t alloc_end, + uintptr_t voffset); /* add a region of memory to memblock. @@ -234,7 +245,8 @@ extern int memblock_add(phys_addr_t base, size_t size); reserved memory will not be used by memblock_alloc(), and will remain reserved when the vm_page memory map is initialised. - @param base the physical address of the start of the memory region to reserve. + @param base the physical address of the start of the memory region to + reserve. @oaram size the size of the memory region to reserve in bytes. */ extern int memblock_reserve(phys_addr_t base, size_t size); @@ -257,7 +269,7 @@ extern int memblock_reserve(phys_addr_t base, size_t size); @param size the size of the buffer to allocate in bytes. @param align the alignment to use. for example, an alignment of 4096 will result in the returned pointer being a multiple - of 4096. this must be a power of 2. + of 4096. this must be a power of 2. */ extern void *memblock_alloc(size_t size, phys_addr_t align); @@ -279,7 +291,7 @@ extern void *memblock_alloc(size_t size, phys_addr_t align); @param size the size of the buffer to allocate in bytes. @param align the alignment to use. for example, an alignment of 4096 will result in the returned pointer being a multiple - of 4096. this must be a power of 2. + of 4096. this must be a power of 2. */ extern phys_addr_t memblock_alloc_phys(size_t size, phys_addr_t align); @@ -319,9 +331,12 @@ extern phys_addr_t memblock_virt_to_phys(void *p); */ extern void *memblock_phys_to_virt(phys_addr_t p); -extern void __next_memory_region(struct memblock_iter *it, \ - struct memblock_type *type_a, struct memblock_type *type_b, - phys_addr_t start, phys_addr_t end); +extern void __next_memory_region( + struct memblock_iter *it, + struct memblock_type *type_a, + struct memblock_type *type_b, + phys_addr_t start, + phys_addr_t end); #ifdef __cplusplus } diff --git a/include/mango/panic.h b/include/mango/panic.h index 7019526..cfce017 100644 --- a/include/mango/panic.h +++ b/include/mango/panic.h @@ -3,10 +3,11 @@ #include -struct cpu_context; +struct ml_cpu_context; #define panic(...) panic_irq(NULL, __VA_ARGS__) -extern void __noreturn panic_irq(struct cpu_context *ctx, const char *fmt, ...); +extern void __noreturn +panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...); #endif diff --git a/include/mango/util.h b/include/mango/util.h index 89cfb8c..c2b550e 100644 --- a/include/mango/util.h +++ b/include/mango/util.h @@ -1,27 +1,34 @@ #ifndef MANGO_UTIL_H_ #define MANGO_UTIL_H_ +#include #include #include -#include #ifdef __cplusplus extern "C" { #endif -#define MIN(x, y) ((x) < (y) ? (x) : (y)) -#define MAX(x, y) ((x) > (y) ? (x) : (y)) +#define MIN(x, y) ((x) < (y) ? (x) : (y)) +#define MAX(x, y) ((x) > (y) ? (x) : (y)) #define CLAMP(x, lo, hi) (MIN(MAX(x, lo), hi)) extern uint64_t hash_string(const char *s); extern void data_size_to_string(size_t value, char *out, size_t outsz); -static inline bool power_of_2(size_t x) { return (x > 0 && (x & (x - 1)) == 0); } -static inline unsigned long long div64_pow2(unsigned long long x, unsigned long long y) +static inline bool power_of_2(size_t x) +{ + return (x > 0 && (x & (x - 1)) == 0); +} +static inline unsigned long long div64_pow2( + unsigned long long x, + unsigned long long y) { return x >> (__builtin_ctz(y)); } -static inline unsigned long long absdiff64(unsigned long long x, unsigned long long y) +static inline unsigned long long absdiff64( + unsigned long long x, + unsigned long long y) { return x < y ? y - x : x - y; } diff --git a/include/mango/vm.h b/include/mango/vm.h index 35d3972..9694a34 100644 --- a/include/mango/vm.h +++ b/include/mango/vm.h @@ -1,14 +1,14 @@ #ifndef MANGO_VM_H_ #define MANGO_VM_H_ -#include -#include -#include -#include -#include #include +#include #include #include +#include +#include +#include +#include #ifdef __cplusplus extern "C" { @@ -17,13 +17,13 @@ extern "C" { struct bcache; /* maximum number of NUMA nodes */ -#define VM_MAX_NODES 64 +#define VM_MAX_NODES 64 /* maximum number of memory zones per node */ -#define VM_MAX_ZONES (VM_ZONE_MAX + 1) +#define VM_MAX_ZONES (VM_ZONE_MAX + 1) /* maximum number of supported page orders */ -#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1) +#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1) /* maximum number of sparse memory sectors */ -#define VM_MAX_SECTORS 8192 +#define VM_MAX_SECTORS 8192 /* maximum number of disk sectors that can be stored in a single page. AKA the number of bits in the sector bitmap. @@ -33,44 +33,41 @@ struct bcache; #define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0) #define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0) -#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0) +#define VM_PAGE_IS_FREE(pg) \ + (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0) -#define vm_page_foreach(pg, i) \ +#define vm_page_foreach(pg, i) \ for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i)) typedef phys_addr_t vm_alignment_t; typedef unsigned int vm_node_id_t; -struct vm_object { - unsigned int reserved; -}; - enum vm_model { VM_MODEL_FLAT = 1, VM_MODEL_SPARSE, }; enum vm_prot { - VM_PROT_READ = 0x01u, - VM_PROT_WRITE = 0x02u, - VM_PROT_EXEC = 0x04u, - VM_PROT_USER = 0x08u, - VM_PROT_SVR = 0x10u, + VM_PROT_READ = 0x01u, + VM_PROT_WRITE = 0x02u, + VM_PROT_EXEC = 0x04u, + VM_PROT_USER = 0x08u, + VM_PROT_SVR = 0x10u, VM_PROT_NOCACHE = 0x20u, }; enum vm_flags { - VM_NORMAL = 0x00u, + VM_NORMAL = 0x00u, VM_GET_DMA = 0x01u, }; enum vm_zone_id { - /* NOTE that these are used as indices into the node_zones array in vm/zone.c - they need to be continuous, and must start at 0! + /* NOTE that these are used as indices into the node_zones array in + vm/zone.c they need to be continuous, and must start at 0! not all of these zones are implemented for every architecture. */ - VM_ZONE_DMA = 0u, - VM_ZONE_NORMAL = 1u, + VM_ZONE_DMA = 0u, + VM_ZONE_NORMAL = 1u, VM_ZONE_HIGHMEM = 2u, }; @@ -108,27 +105,28 @@ enum vm_page_order { }; enum vm_page_flags { - /* page is reserved (probably by a call to memblock_reserve()) and cannot be - returned by any allocation function */ - VM_PAGE_RESERVED = 0x01u, + /* page is reserved (probably by a call to memblock_reserve()) and + cannot be returned by any allocation function */ + VM_PAGE_RESERVED = 0x01u, /* page has been allocated by a zone's buddy allocator, and is in-use */ - VM_PAGE_ALLOC = 0x02u, + VM_PAGE_ALLOC = 0x02u, /* page is the first page of a huge-page */ - VM_PAGE_HEAD = 0x04u, + VM_PAGE_HEAD = 0x04u, /* page is part of a huge-page */ - VM_PAGE_HUGE = 0x08u, - /* page is holding cached data from secondary storage, and can be freed if necessary (and not dirty). */ - VM_PAGE_CACHE = 0x10u, + VM_PAGE_HUGE = 0x08u, + /* page is holding cached data from secondary storage, and can be freed + * if necessary (and not dirty). */ + VM_PAGE_CACHE = 0x10u, }; enum vm_memory_region_status { - VM_REGION_FREE = 0x01u, - VM_REGION_RESERVED = 0x02u, + VM_REGION_FREE = 0x01u, + VM_REGION_RESERVED = 0x02u, }; enum vm_cache_flags { VM_CACHE_OFFSLAB = 0x01u, - VM_CACHE_DMA = 0x02u + VM_CACHE_DMA = 0x02u }; struct vm_zone_descriptor { @@ -204,7 +202,7 @@ struct vm_slab { - s_freelist[s_free] should be set to the previous value of s_free. this is commented as it as flexible arrays are not supported in c++. */ - //unsigned int s_freelist[]; + // unsigned int s_freelist[]; }; struct vm_page { @@ -238,13 +236,14 @@ struct vm_page { struct queue_entry p_list; struct btree_node p_bnode; - /* btree_node contains three pointers, so provide three pointer-sized integers for - use if p_bnode isn't needed. */ + /* btree_node contains three pointers, so provide three + pointer-sized integers for use if p_bnode isn't needed. */ uintptr_t priv1[3]; }; union { - /* used by bcache when sector size is < page size. bitmap of present/missing sectors */ + /* used by bcache when sector size is < page size. bitmap of + * present/missing sectors */ DECLARE_BITMAP(p_blockbits, VM_MAX_SECTORS_PER_PAGE); uint32_t p_priv2; }; @@ -252,10 +251,12 @@ struct vm_page { union { /* sector address, used by bcache */ sectors_t p_blockid; + /* offset of this page within the vm_object it is a part of */ + off_t p_vmo_offset; uint32_t p_priv3[2]; }; -} __attribute__((aligned(2 * sizeof(unsigned long)))); +} __aligned(2 * sizeof(unsigned long)); /* represents a sector of memory, containing its own array of vm_pages. this struct is used under the sparse memory model, instead of the @@ -272,39 +273,54 @@ struct vm_sector { struct vm_page *s_pages; }; -extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones); +extern kern_status_t vm_bootstrap( + const struct vm_zone_descriptor *zones, + size_t nr_zones); extern enum vm_model vm_memory_model(void); extern void vm_set_memory_model(enum vm_model model); extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node); -extern phys_addr_t vm_virt_to_phys(void *p); +extern phys_addr_t vm_virt_to_phys(const void *p); extern void *vm_phys_to_virt(phys_addr_t p); -extern void vm_page_init_array(); +extern size_t vm_page_order_to_bytes(enum vm_page_order order); +extern size_t vm_page_order_to_pages(enum vm_page_order order); +extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order); +extern void vm_page_init_array(void); extern struct vm_page *vm_page_get(phys_addr_t addr); extern phys_addr_t vm_page_get_paddr(struct vm_page *pg); extern struct vm_zone *vm_page_get_zone(struct vm_page *pg); extern void *vm_page_get_vaddr(struct vm_page *pg); extern size_t vm_page_get_pfn(struct vm_page *pg); -extern size_t vm_page_order_to_bytes(enum vm_page_order order); -extern size_t vm_page_order_to_pages(enum vm_page_order order); -extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order); -extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags); +extern struct vm_page *vm_page_alloc( + enum vm_page_order order, + enum vm_flags flags); extern void vm_page_free(struct vm_page *pg); -extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b); +extern int vm_page_split( + struct vm_page *pg, + struct vm_page **a, + struct vm_page **b); extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b); extern struct vm_page *vm_page_get_buddy(struct vm_page *pg); extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg); extern size_t vm_bytes_to_pages(size_t bytes); -extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info); -extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags); +extern void vm_zone_init( + struct vm_zone *z, + const struct vm_zone_descriptor *zone_info); +extern struct vm_page *vm_zone_alloc_page( + struct vm_zone *z, + enum vm_page_order order, + enum vm_flags flags); extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg); -extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags); +extern struct vm_cache *vm_cache_create( + const char *name, + size_t objsz, + enum vm_cache_flags flags); extern void vm_cache_init(struct vm_cache *cache); extern void vm_cache_destroy(struct vm_cache *cache); extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags); @@ -330,15 +346,18 @@ extern size_t vm_page_get_pfn_sparse(struct vm_page *pg); #endif #ifdef __cplusplus -inline void *operator new(size_t count, void *p) { return p; } +inline void *operator new(size_t count, void *p) +{ + return p; +} -#define kmalloc_object(objtype, flags, ...) \ - __extension__({ \ - void *p = kmalloc(sizeof(objtype), flags); \ - if (p) { \ - new (p) objtype(__VA_ARGS__); \ - } \ - (objtype *)p; \ +#define kmalloc_object(objtype, flags, ...) \ + __extension__({ \ + void *p = kmalloc(sizeof(objtype), flags); \ + if (p) { \ + new (p) objtype(__VA_ARGS__); \ + } \ + (objtype *)p; \ }) #endif diff --git a/kernel/panic.c b/kernel/panic.c index 8c31dbe..7d5e2cc 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -1,13 +1,13 @@ -#include -#include +#include #include +#include #include #include -#include +#include static int has_panicked = 0; -void panic_irq(struct cpu_context *ctx, const char *fmt, ...) +void panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...) { char buf[512]; va_list args; @@ -22,7 +22,10 @@ void panic_irq(struct cpu_context *ctx, const char *fmt, ...) struct thread *thr = current_thread(); if (task && thr) { - printk("task: %s (id: %d, thread: %d)", task->t_name, task->t_id, thr->tr_id); + printk("task: %s (id: %d, thread: %d)", + task->t_name, + task->t_id, + thr->tr_id); } else { printk("task: [bootstrap]"); } diff --git a/libc/ctype/ctype.c b/libc/ctype/ctype.c index 0b5d06e..d0995ee 100644 --- a/libc/ctype/ctype.c +++ b/libc/ctype/ctype.c @@ -18,77 +18,76 @@ #include #include -static unsigned int random_seed = 53455346; - -int isupper(int c) { return (c >= 65 && c <= 90); } - -int islower(int c) { return (c >= 97 && c <= 122); } - -int toupper(int c) { - if (!islower(c)) { - return c; - } - - return c - 32; +int isupper(int c) +{ + return (c >= 65 && c <= 90); } -int tolower(int c) { - if (!isupper(c)) { - return c; - } - - return c + 32; +int islower(int c) +{ + return (c >= 97 && c <= 122); } -int isdigit(int c) { return (c >= 48 && c <= 57); } +int toupper(int c) +{ + if (!islower(c)) { + return c; + } -int isalpha(int c) { return (c >= 65 && c <= 90) || (c >= 97 && c <= 122); } - -int isalnum(int c) { return isalpha(c) | isdigit(c); } - -int iscntrl(int c) { return (c <= 31) || (c == 127); } - -int isprint(int c) { return (c >= 32 && c <= 126) || (c >= 128 && c <= 254); } - -int isgraph(int c) { return isprint(c) && c != 32; } - -int ispunct(int c) { return isgraph(c) && !isalnum(c); } - -int isspace(int c) { - return (c == ' ') || (c == '\t') || (c == '\n') || (c == '\v') || - (c == '\f') || (c == '\r'); + return c - 32; } -int isxdigit(int c) { - return isdigit(c) || (c >= 65 && c <= 70) || (c >= 97 && c <= 102); +int tolower(int c) +{ + if (!isupper(c)) { + return c; + } + + return c + 32; } -bool fill_random(unsigned char *buffer, unsigned int size) { - if (!buffer || !size) { - return false; - } - - for (uint32_t i = 0; i < size; i++) { - uint32_t next = random_seed; - uint32_t result; - - next *= 1103515245; - next += 12345; - result = (uint32_t)(next / 65536) % 2048; - - next *= 1103515245; - next += 12345; - result <<= 10; - result ^= (uint32_t)(next / 65536) % 1024; - - next *= 1103515245; - next += 12345; - result <<= 10; - result ^= (uint32_t)(next / 65536) % 1024; - random_seed = next; - - buffer[i] = (uint8_t)(result % 256); - } - - return true; +int isdigit(int c) +{ + return (c >= 48 && c <= 57); +} + +int isalpha(int c) +{ + return (c >= 65 && c <= 90) || (c >= 97 && c <= 122); +} + +int isalnum(int c) +{ + return isalpha(c) | isdigit(c); +} + +int iscntrl(int c) +{ + return (c <= 31) || (c == 127); +} + +int isprint(int c) +{ + return (c >= 32 && c <= 126) || (c >= 128 && c <= 254); +} + +int isgraph(int c) +{ + return isprint(c) && c != 32; +} + +int ispunct(int c) +{ + return isgraph(c) && !isalnum(c); +} + +int isspace(int c) +{ + return (c == ' ') || (c == '\t') || (c == '\n') || (c == '\v') + || (c == '\f') || (c == '\r'); +} + +int isxdigit(int c) +{ + return isdigit(c) || (c >= 65 && c <= 70) || (c >= 97 && c <= 102); } diff --git a/sched/core.c b/sched/core.c index d44d083..e29a652 100644 --- a/sched/core.c +++ b/sched/core.c @@ -1,9 +1,9 @@ -#include -#include #include #include -#include #include +#include +#include +#include extern kern_status_t setup_kernel_task(void); extern kern_status_t setup_idle_task(void); @@ -37,8 +37,14 @@ kern_status_t sched_init(void) return status; } - struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads)); - struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads)); + struct thread *this_thread = QUEUE_CONTAINER( + struct thread, + tr_threads, + queue_first(&kernel_task()->t_threads)); + struct thread *idle_thread = QUEUE_CONTAINER( + struct thread, + tr_threads, + queue_first(&idle_task()->t_threads)); struct cpu_data *this_cpu = get_this_cpu(); rq_init(&this_cpu->c_rq); @@ -55,7 +61,8 @@ kern_status_t sched_init(void) static void expire_timers(struct cpu_data *cpu) { - queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) { + queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) + { if (timer->t_expiry <= clock_ticks) { timer->t_callback(timer); } @@ -102,7 +109,8 @@ void __schedule(enum sched_mode mode) enum thread_state prev_state = READ_ONCE(prev->tr_state); - if ((mode == SCHED_IRQ || prev_state == THREAD_READY) && prev != rq->rq_idle) { + if ((mode == SCHED_IRQ || prev_state == THREAD_READY) + && prev != rq->rq_idle) { rq_enqueue(rq, prev); } @@ -213,7 +221,8 @@ void end_charge_period(void) self->tr_charge_period_start = 0; - //printk("%llu cycles charged to %s/%u", charge, self->tr_parent->t_name, self->tr_parent->t_id); + // printk("%llu cycles charged to %s/%u", charge, + // self->tr_parent->t_name, self->tr_parent->t_id); } cycles_t default_quantum(void) diff --git a/sched/task.c b/sched/task.c index 211117b..92011ad 100644 --- a/sched/task.c +++ b/sched/task.c @@ -1,10 +1,10 @@ -#include -#include #include -#include -#include #include #include +#include +#include +#include +#include #define TASK_CAST(p) OBJECT_C_CAST(struct task, t_base, &task_type, p) @@ -20,7 +20,12 @@ static struct task *__idle_task; static spin_lock_t task_list_lock; static struct btree task_list; -BTREE_DEFINE_SIMPLE_GET(struct task, unsigned int, t_tasklist, t_id, task_list_get) +BTREE_DEFINE_SIMPLE_GET( + struct task, + unsigned int, + t_tasklist, + t_id, + task_list_get) BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert) struct task *kernel_task(void) @@ -51,7 +56,10 @@ kern_status_t setup_kernel_task(void) __kernel_task->t_pmap = get_kernel_pmap(); __kernel_task->t_state = TASK_RUNNING; - snprintf(__kernel_task->t_name, sizeof __kernel_task->t_name, "kernel_task"); + snprintf( + __kernel_task->t_name, + sizeof __kernel_task->t_name, + "kernel_task"); struct thread *kernel_thread = thread_alloc(); kernel_thread->tr_id = 0; @@ -62,7 +70,9 @@ kern_status_t setup_kernel_task(void) unsigned long flags; task_lock_irqsave(__kernel_task, &flags); - queue_push_back(&__kernel_task->t_threads, &kernel_thread->tr_threads); + queue_push_back( + &__kernel_task->t_threads, + &kernel_thread->tr_threads); task_unlock_irqrestore(__kernel_task, flags); spin_lock_irqsave(&task_list_lock, &flags); diff --git a/vm/bootstrap.c b/vm/bootstrap.c index 8851cac..37888b9 100644 --- a/vm/bootstrap.c +++ b/vm/bootstrap.c @@ -1,17 +1,18 @@ -#include #include -#include +#include #include #include -#include +#include +#include #include -#include #include /* One struct vm_pg_data per NUMA node. */ static struct vm_pg_data *node_data = NULL; -kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones) +kern_status_t vm_bootstrap( + const struct vm_zone_descriptor *zones, + size_t nr_zones) { int numa_count = 1; @@ -34,7 +35,7 @@ kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zon } for (size_t i = 0; i < nr_zones; i++) { - vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]); + vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]); } kmalloc_init(); diff --git a/vm/memblock.c b/vm/memblock.c index 929452d..3ab9e44 100644 --- a/vm/memblock.c +++ b/vm/memblock.c @@ -19,27 +19,29 @@ contributors may be used to endorse or promote products derived from this software without specific prior written permission. */ -#include #include -#include #include #include +#include +#include -#define MIN(a, b) ((a) < (b) ? (a) : (b)) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) #define ITER(a, b) ((uint64_t)(a) | ((uint64_t)(b) << 32)) -#define ITER_END ULLONG_MAX +#define ITER_END ULLONG_MAX #define IDX_A(idx) ((idx) & 0xFFFFFFFF) #define IDX_B(idx) (((idx) >> 32) & 0xFFFFFFFF) /* the maximum possible value for a pointer type. Note that any pointers returned by the memblock API will still be bounded by the defined memory regions, and not by this constant. */ -#define ADDR_MAX (~(uintptr_t)0) +#define ADDR_MAX (~(uintptr_t)0) -static struct memblock_region init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT]; -static struct memblock_region init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT]; +static struct memblock_region + init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT]; +static struct memblock_region + init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT]; static phys_addr_t do_alloc(size_t size, phys_addr_t align); @@ -59,16 +61,21 @@ static void memblock_double_capacity(struct memblock_type *type) { size_t new_max = type->max * 2; - phys_addr_t new_regions_p = do_alloc(new_max * sizeof(struct memblock_region), 8); + phys_addr_t new_regions_p + = do_alloc(new_max * sizeof(struct memblock_region), 8); void *new_regions = (void *)(new_regions_p + memblock.m_voffset); - memcpy(new_regions, type->regions, type->count * sizeof(struct memblock_region)); + memcpy(new_regions, + type->regions, + type->count * sizeof(struct memblock_region)); type->regions = new_regions; type->max = new_max; } -static int memblock_insert_region(struct memblock_type *type, struct memblock_region *to_add) +static int memblock_insert_region( + struct memblock_type *type, + struct memblock_region *to_add) { unsigned int i = 0; @@ -110,13 +117,17 @@ static int memblock_remove_region(struct memblock_type *type, unsigned int i) int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset) { memblock.m_alloc_start = alloc_start; - memblock.m_alloc_end =alloc_end; + memblock.m_alloc_end = alloc_end; memblock.m_voffset = voffset; return 0; } -int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, enum memblock_region_status status) +int memblock_add_range( + struct memblock_type *type, + uintptr_t base, + size_t size, + enum memblock_region_status status) { if (size == 0) { return 0; @@ -131,14 +142,17 @@ int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, return 0; } - struct memblock_region new_region = { .base = base, .limit = limit, .status = status }; + struct memblock_region new_region + = {.base = base, .limit = limit, .status = status}; - /* two regions with different statuses CANNOT intersect. we first need to check - to make sure the region being added doesn't violate this rule. */ + /* two regions with different statuses CANNOT intersect. we first need + to check to make sure the region being added doesn't violate this + rule. */ for (unsigned int i = 0; i < type->count; i++) { struct memblock_region *cur_region = &type->regions[i]; - if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) { + if (new_region.base > cur_region->limit + || new_region.limit < cur_region->base) { continue; } @@ -154,47 +168,67 @@ int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, for (unsigned int i = 0; i < type->count; i++) { struct memblock_region *cur_region = &type->regions[i]; - /* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */ - if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) { + /* case 1: the region being added and the current region have no + * connection what-so-ever (no overlaps) */ + if (cur_region->limit + 1 < new_region.base + || cur_region->base > new_region.limit) { continue; } - /* case 2: the region being added matches a region already in the list. */ - if (cur_region->base == new_region.base && cur_region->limit == new_region.limit) { + /* case 2: the region being added matches a region already in + * the list. */ + if (cur_region->base == new_region.base + && cur_region->limit == new_region.limit) { /* nothing needs to be done */ add_new = false; break; } - - /* case 3: the region being added completely contains a region already in the list. */ - if (cur_region->base > new_region.base && cur_region->limit <= new_region.limit) { + /* case 3: the region being added completely contains a region + * already in the list. */ + if (cur_region->base > new_region.base + && cur_region->limit <= new_region.limit) { memblock_remove_region(type, i); - /* after memblock_remove_region(), a different region will have moved into the array slot referenced by i. - decrementing i means we'll stay at the current index and process this region. */ + /* after memblock_remove_region(), a different region + will have moved into the array slot referenced by i. + decrementing i means we'll stay at the current index + and process this region. */ i--; continue; } + /* case 4: the region being added meets or partially overlaps a + * region already in the list. */ - /* case 4: the region being added meets or partially overlaps a region already in the list. */ - - /* there can be an overlap at the beginning and the end of the region being added, - anything else is either a full overlap (case 3) or not within the region being added at all. - to handle this, remove the region that's already in the list and extend the region being added to cover it. - the two regions may overlap and have incompatible statuses, but this case was handled earlier in this function. */ - if ((new_region.base > cur_region->base || new_region.base == cur_region->limit - 1) && new_region.status == cur_region->status) { - /* the new region overlaps the END of the current region, change the base of the new region to match that of the current region. */ + /* there can be an overlap at the beginning and the end of the + region being added, anything else is either a full overlap + (case 3) or not within the region being added at all. to + handle this, remove the region that's already in the list and + extend the region being added to cover it. the two regions + may overlap and have incompatible statuses, but this case was + handled earlier in this function. */ + if ((new_region.base > cur_region->base + || new_region.base == cur_region->limit - 1) + && new_region.status == cur_region->status) { + /* the new region overlaps the END of the current + * region, change the base of the new region to match + * that of the current region. */ new_region.base = cur_region->base; - } else if ((new_region.base < cur_region->base || new_region.limit + 1 == cur_region->base) && new_region.status == cur_region->status) { - /* the new region overlaps the BEGINNING of the current region, change the limit of the new region to match that of the current region. */ + } else if ( + (new_region.base < cur_region->base + || new_region.limit + 1 == cur_region->base) + && new_region.status == cur_region->status) { + /* the new region overlaps the BEGINNING of the current + * region, change the limit of the new region to match + * that of the current region. */ new_region.limit = cur_region->limit; } else { continue; } - /* with the new region updated to include the current region, we can remove the current region from the list */ + /* with the new region updated to include the current region, we + * can remove the current region from the list */ memblock_remove_region(type, i); i--; } @@ -216,7 +250,11 @@ int memblock_add(uintptr_t base, size_t size) memblock_double_capacity(&memblock.memory); } - return memblock_add_range(&memblock.memory, base, size, MEMBLOCK_MEMORY); + return memblock_add_range( + &memblock.memory, + base, + size, + MEMBLOCK_MEMORY); } int memblock_reserve(uintptr_t base, size_t size) @@ -225,7 +263,11 @@ int memblock_reserve(uintptr_t base, size_t size) memblock_double_capacity(&memblock.reserved); } - return memblock_add_range(&memblock.reserved, base, size, MEMBLOCK_RESERVED); + return memblock_add_range( + &memblock.reserved, + base, + size, + MEMBLOCK_RESERVED); } static phys_addr_t do_alloc(size_t size, phys_addr_t align) @@ -245,7 +287,8 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align) phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset; struct memblock_iter it; - for_each_free_mem_range (&it, region_start, region_end) { + for_each_free_mem_range(&it, region_start, region_end) + { phys_addr_t base = it.it_base; if (base & (align - 1)) { base &= ~(align - 1); @@ -270,7 +313,11 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align) return 0; } - int status = memblock_add_range(&memblock.reserved, allocated_base, allocated_limit - allocated_base, MEMBLOCK_ALLOC); + int status = memblock_add_range( + &memblock.reserved, + allocated_base, + allocated_limit - allocated_base, + MEMBLOCK_ALLOC); if (status != 0) { return 0; } @@ -312,9 +359,11 @@ int memblock_free_phys(phys_addr_t addr, size_t size) } void __next_memory_region( - struct memblock_iter *it, - struct memblock_type *type_a, struct memblock_type *type_b, - uintptr_t start, uintptr_t end) + struct memblock_iter *it, + struct memblock_type *type_a, + struct memblock_type *type_b, + uintptr_t start, + uintptr_t end) { unsigned int idx_a = IDX_A(it->__idx); unsigned int idx_b = IDX_B(it->__idx); @@ -344,70 +393,85 @@ void __next_memory_region( } if (m_start > end) { - /* we have gone past the requested memory range and can now stop */ + /* we have gone past the requested memory range and can + * now stop */ break; } for (; idx_b < type_b->count + 1; idx_b++) { struct memblock_region *r = &type_b->regions[idx_b]; - /* r_start and r_end delimit the region of memory between the current and previous reserved regions. - if we have gone past the last reserved region, these variables delimit the range between the end - of the last reserved region and the end of memory. */ + /* r_start and r_end delimit the region of memory + between the current and previous reserved regions. if + we have gone past the last reserved region, these + variables delimit the range between the end of the + last reserved region and the end of memory. */ uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0; uintptr_t r_end; if (idx_b < type_b->count) { r_end = r->base; - /* we decrement r_end to get the address of the last byte of the free region. - if r_end is already zero, there is a reserved region starting at address 0x0. - as long as r_end == r_start == 0x00000, we will skip this region. */ + /* we decrement r_end to get the address of the + last byte of the free region. if r_end is + already zero, there is a reserved region + starting at address 0x0. as long as r_end == + r_start == 0x00000, we will skip this region. + */ if (r_end) { r_end--; } } else { - /* this maximum value will be clamped to the bounds of memblock.memory - before being returned to the caller */ + /* this maximum value will be clamped to the + bounds of memblock.memory before being + returned to the caller */ r_end = ADDR_MAX; } if (r_start >= r_end) { - /* this free region has a length of zero, move to the next one */ + /* this free region has a length of zero, move + * to the next one */ continue; } if (r_start >= m_end) { - /* we've gone past the end of the current memory region, and need to go to the next one */ + /* we've gone past the end of the current memory + * region, and need to go to the next one */ break; } - /* we've already gone past this free memory region. move to the next one */ + /* we've already gone past this free memory region. move + * to the next one */ if (m_start >= r_end) { continue; } /* we want the area that is overlapped by both - region M (m_start - m_end) : The region defined as system memory. - region R (r_start - r_end) : The region defined as free / outside of any reserved regions. + region M (m_start - m_end) : The region defined + as system memory. region R (r_start - r_end) : The + region defined as free / outside of any reserved + regions. */ it->it_base = MAX(m_start, r_start); it->it_limit = MIN(m_end, r_end); - /* further limit the region to the intersection between the region itself and the - specified iteration bounds */ + /* further limit the region to the intersection between + the region itself and the specified iteration bounds + */ it->it_base = MAX(it->it_base, start); it->it_limit = MIN(it->it_limit, end); if (it->it_limit <= it->it_base) { - /* this region is not part of the specified bounds, skip it. */ + /* this region is not part of the specified + * bounds, skip it. */ continue; } it->it_status = MEMBLOCK_MEMORY; - /* whichever region is smaller, increment the pointer for that type, so we can - compare the larger region with the next region of the incremented type. */ + /* whichever region is smaller, increment the pointer + for that type, so we can compare the larger region + with the next region of the incremented type. */ if (m_end <= r_end) { idx_a++; } else { diff --git a/vm/page.c b/vm/page.c index c5eef91..6384207 100644 --- a/vm/page.c +++ b/vm/page.c @@ -1,41 +1,41 @@ -#include +#include #include #include +#include #include -#include /* Pre-calculated page order -> size conversion table */ static size_t page_order_bytes[] = { - [VM_PAGE_4K] = 0x1000, - [VM_PAGE_8K] = 0x2000, - [VM_PAGE_16K] = 0x4000, - [VM_PAGE_32K] = 0x8000, - [VM_PAGE_64K] = 0x10000, + [VM_PAGE_4K] = 0x1000, + [VM_PAGE_8K] = 0x2000, + [VM_PAGE_16K] = 0x4000, + [VM_PAGE_32K] = 0x8000, + [VM_PAGE_64K] = 0x10000, [VM_PAGE_128K] = 0x20000, [VM_PAGE_256K] = 0x40000, [VM_PAGE_512K] = 0x80000, - [VM_PAGE_1M] = 0x100000, - [VM_PAGE_2M] = 0x200000, - [VM_PAGE_4M] = 0x400000, - [VM_PAGE_8M] = 0x800000, - [VM_PAGE_16M] = 0x1000000, - [VM_PAGE_32M] = 0x2000000, - [VM_PAGE_64M] = 0x4000000, + [VM_PAGE_1M] = 0x100000, + [VM_PAGE_2M] = 0x200000, + [VM_PAGE_4M] = 0x400000, + [VM_PAGE_8M] = 0x800000, + [VM_PAGE_16M] = 0x1000000, + [VM_PAGE_32M] = 0x2000000, + [VM_PAGE_64M] = 0x4000000, [VM_PAGE_128M] = 0x8000000, /* vm can support pages of this size, but - struct vm_page only has 4 bits with which to store - the page order, which cannot accomodate these - larger order numbers */ + struct vm_page only has 4 bits with which to store + the page order, which cannot accomodate these + larger order numbers */ [VM_PAGE_256M] = 0x10000000, [VM_PAGE_512M] = 0x20000000, - [VM_PAGE_1G] = 0x40000000, - [VM_PAGE_2G] = 0x80000000, - [VM_PAGE_4G] = 0x100000000, - [VM_PAGE_8G] = 0x200000000, - [VM_PAGE_16G] = 0x400000000, - [VM_PAGE_32G] = 0x800000000, - [VM_PAGE_64G] = 0x1000000000, + [VM_PAGE_1G] = 0x40000000, + [VM_PAGE_2G] = 0x80000000, + [VM_PAGE_4G] = 0x100000000, + [VM_PAGE_8G] = 0x200000000, + [VM_PAGE_16G] = 0x400000000, + [VM_PAGE_32G] = 0x800000000, + [VM_PAGE_64G] = 0x1000000000, }; phys_addr_t vm_virt_to_phys(void *p) @@ -56,7 +56,8 @@ phys_addr_t vm_virt_to_phys(void *p) void *vm_phys_to_virt(phys_addr_t p) { - if (p >= (memblock.m_alloc_start - memblock.m_voffset) && p < (memblock.m_alloc_end - memblock.m_voffset)) { + if (p >= (memblock.m_alloc_start - memblock.m_voffset) + && p < (memblock.m_alloc_end - memblock.m_voffset)) { return memblock_phys_to_virt(p); } @@ -124,11 +125,10 @@ vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order) return ~(page_order_bytes[order] - 1); } - size_t vm_bytes_to_pages(size_t bytes) { - if (bytes & (VM_PAGE_SIZE-1)) { - bytes &= ~(VM_PAGE_SIZE-1); + if (bytes & (VM_PAGE_SIZE - 1)) { + bytes &= ~(VM_PAGE_SIZE - 1); bytes += VM_PAGE_SIZE; } @@ -150,7 +150,6 @@ struct vm_zone *vm_page_get_zone(struct vm_page *pg) return &node->pg_zones[pg->p_zone]; } - struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags) { /* TODO prefer nodes closer to us */ @@ -232,7 +231,8 @@ struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b) return NULL; } - if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) { + if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) + != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) { return NULL; } diff --git a/vm/sparse.c b/vm/sparse.c index 9c63bf9..286a5b4 100644 --- a/vm/sparse.c +++ b/vm/sparse.c @@ -22,13 +22,13 @@ of the sparse memory model may be outweighed by the extra overhead, and the flat memory model may be a better choice. */ -#include #include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include static struct vm_sector *sector_array = NULL; static size_t sector_array_count = 0; @@ -53,11 +53,16 @@ static enum sector_coverage_mode get_sector_coverage_mode(void) return SECTOR_COVERAGE_ALL; } - printk("vm: [sparse] ignoring unknown sector coverage mode '%s', using FREE", arg); + printk("vm: [sparse] ignoring unknown sector coverage mode '%s', using " + "FREE", + arg); return SECTOR_COVERAGE_FREE; } -static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index) +static struct vm_sector *phys_addr_to_sector_and_index( + phys_addr_t addr, + size_t *sector_id, + size_t *index) { /* all sectors have the same size */ size_t step = vm_page_order_to_bytes(sector_array[0].s_size); @@ -98,7 +103,6 @@ static struct vm_page *get_or_create_page(phys_addr_t addr) } } - sector->s_pages[page_number].p_sector = sector_number; return §or->s_pages[page_number]; } @@ -123,9 +127,13 @@ static enum vm_page_order find_minimum_sector_size(phys_addr_t pmem_end) are in need of improvement to ensure that sparse works well on a wide range of systems. */ static void calculate_sector_size_and_count( - size_t last_reserved_pfn, size_t last_free_pfn, size_t limit_pfn, - size_t reserved_size, size_t free_size, - unsigned int *out_sector_count, enum vm_page_order *out_sector_size) + size_t last_reserved_pfn, + size_t last_free_pfn, + size_t limit_pfn, + size_t reserved_size, + size_t free_size, + unsigned int *out_sector_count, + enum vm_page_order *out_sector_size) { /* we can support up to VM_MAX_SECTORS memory sectors. the minimum sector size is what ever is required @@ -154,8 +162,8 @@ static void calculate_sector_size_and_count( threshold. */ sector_size++; - /* if the difference is particularly big, increase the sector size - even further */ + /* if the difference is particularly big, increase the sector + size even further */ if (memdiff >= 0x1000000) { sector_size++; } @@ -183,13 +191,15 @@ void vm_sparse_init(void) size_t last_reserved_pfn = 0, last_free_pfn = 0; struct memblock_iter it; - for_each_mem_range (&it, 0x0, UINTPTR_MAX) { + for_each_mem_range(&it, 0x0, UINTPTR_MAX) + { if (pmem_limit < it.it_limit + 1) { pmem_limit = it.it_limit + 1; } } - for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) { + for_each_free_mem_range(&it, 0x0, UINTPTR_MAX) + { free_size += it.it_limit - it.it_base + 1; size_t last_pfn = it.it_limit / VM_PAGE_SIZE; @@ -199,7 +209,8 @@ void vm_sparse_init(void) } } - for_each_reserved_mem_range (&it, 0x0, UINTPTR_MAX) { + for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX) + { reserved_size += it.it_limit - it.it_base + 1; size_t last_pfn = it.it_limit / VM_PAGE_SIZE; @@ -212,7 +223,8 @@ void vm_sparse_init(void) enum sector_coverage_mode mode = get_sector_coverage_mode(); phys_addr_t pmem_end = 0; - enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn); + enum vm_page_order sector_size + = find_minimum_sector_size(last_free_pfn); if (mode == SECTOR_COVERAGE_FREE) { pmem_end = last_free_pfn * VM_PAGE_SIZE; } else { @@ -224,50 +236,63 @@ void vm_sparse_init(void) size_t sector_bytes = 0; unsigned int nr_sectors = 0; calculate_sector_size_and_count( - last_reserved_pfn, last_free_pfn, pmem_end / VM_PAGE_SIZE, - reserved_size, free_size, - &nr_sectors, §or_size); + last_reserved_pfn, + last_free_pfn, + pmem_end / VM_PAGE_SIZE, + reserved_size, + free_size, + &nr_sectors, + §or_size); sector_bytes = vm_page_order_to_bytes(sector_size); char sector_size_str[64]; - data_size_to_string(sector_bytes, sector_size_str, sizeof sector_size_str); + data_size_to_string( + sector_bytes, + sector_size_str, + sizeof sector_size_str); sector_array = kzalloc(sizeof(struct vm_sector) * nr_sectors, 0); sector_array_count = nr_sectors; for (unsigned int i = 0; i < nr_sectors; i++) { sector_array[i].s_size = sector_size; - sector_array[i].s_first_pfn = (i * sector_bytes) >> VM_PAGE_SHIFT; + sector_array[i].s_first_pfn + = (i * sector_bytes) >> VM_PAGE_SHIFT; } size_t s, i; phys_addr_to_sector_and_index(0x3f00000, &s, &i); - for_each_free_mem_range(&it, 0x0, pmem_end) { + for_each_free_mem_range(&it, 0x0, pmem_end) + { if (it.it_base & VM_PAGE_MASK) { it.it_base &= ~VM_PAGE_MASK; it.it_base += VM_PAGE_SIZE; } - for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) { + for (phys_addr_t i = it.it_base; i < it.it_limit; + i += VM_PAGE_SIZE) { struct vm_page *pg = get_or_create_page(i); pg->p_flags = 0; } } - for_each_reserved_mem_range(&it, 0x0, pmem_end) { + for_each_reserved_mem_range(&it, 0x0, pmem_end) + { if (it.it_base & VM_PAGE_MASK) { it.it_base &= ~VM_PAGE_MASK; it.it_base += VM_PAGE_SIZE; } - for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) { + for (phys_addr_t i = it.it_base; i < it.it_limit; + i += VM_PAGE_SIZE) { struct vm_page *pg = vm_page_get(i); if (!pg) { - /* if the page doesn't exist, it is part of a sector - that only contains reserved pages. a NULL page - is implicitly treated as reserved */ + /* if the page doesn't exist, it is part of a + sector that only contains reserved pages. a + NULL page is implicitly treated as reserved + */ continue; } @@ -275,7 +300,9 @@ void vm_sparse_init(void) } } - printk("vm: [sparse] initialised %zu sectors of size %s", nr_sectors, sector_size_str); + printk("vm: [sparse] initialised %zu sectors of size %s", + nr_sectors, + sector_size_str); } struct vm_page *vm_page_get_sparse(phys_addr_t addr) @@ -288,7 +315,8 @@ struct vm_page *vm_page_get_sparse(phys_addr_t addr) struct vm_sector *sector = §or_array[sector_number]; - if (!sector->s_pages || page_number >= vm_page_order_to_pages(sector->s_size)) { + if (!sector->s_pages + || page_number >= vm_page_order_to_pages(sector->s_size)) { return NULL; } @@ -298,5 +326,6 @@ struct vm_page *vm_page_get_sparse(phys_addr_t addr) size_t vm_page_get_pfn_sparse(struct vm_page *pg) { struct vm_sector *sector = §or_array[pg->p_sector]; - return sector->s_first_pfn + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg); + return sector->s_first_pfn + + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg); }