kernel: adjust formatting

This commit is contained in:
2026-02-08 12:17:27 +00:00
parent 49a75a1bbe
commit 0490541dc9
14 changed files with 478 additions and 320 deletions

View File

@@ -12,12 +12,14 @@ extern "C" {
#ifdef __APPLE__
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__section("__DATA,__initcall" __X(id) ".init") = (fn)
static initcall_t __initcall_##fn##id __used __section( \
"__DATA,__initcall" __X(id) ".init") \
= (fn)
#else
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__section("initcall" __X(id) "_init") = (fn)
static initcall_t __initcall_##fn##id __used __section( \
"initcall" __X(id) "_init") \
= (fn)
#endif
extern int ml_init(uintptr_t arg);

View File

@@ -1,5 +1,5 @@
#include <mango/libc/string.h>
#include <mango/bitmap.h>
#include <mango/libc/string.h>
void bitmap_zero(unsigned long *map, unsigned long nbits)
{
@@ -38,7 +38,6 @@ bool bitmap_check(unsigned long *map, unsigned long bit)
unsigned long mask = 1ul << offset;
return (map[index] & mask) != 0 ? true : false;
}
unsigned int bitmap_count_set(unsigned long *map, unsigned long nbits)

View File

@@ -22,9 +22,9 @@
#ifndef MANGO_MEMBLOCK_H_
#define MANGO_MEMBLOCK_H_
#include <stddef.h>
#include <limits.h>
#include <mango/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
@@ -34,7 +34,8 @@ extern "C" {
#define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \
for ((i)->__idx = 0, \
__next_memory_region(i, type_a, type_b, p_start, p_end); \
(i)->__idx != ULLONG_MAX; \
__next_memory_region(i, type_a, type_b, p_start, p_end))
@@ -139,18 +140,24 @@ extern "C" {
- 0x10000 -> 0x1ffff
*/
#define for_each_free_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end)
__for_each_mem_range( \
i, \
&memblock.memory, \
&memblock.reserved, \
p_start, \
p_end)
typedef uint64_t memblock_index_t;
enum memblock_region_status {
/* Used in memblock.memory regions, indicates that the memory region exists */
/* Used in memblock.memory regions, indicates that the memory region
* exists */
MEMBLOCK_MEMORY = 0,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_alloc() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_alloc() */
MEMBLOCK_ALLOC,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_reserve() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_reserve() */
MEMBLOCK_RESERVED,
};
@@ -176,9 +183,10 @@ struct memblock {
/* bounds of the memory region that can be used by memblock_alloc()
both of these are virtual addresses */
uintptr_t m_alloc_start, m_alloc_end;
/* memblock assumes that all memory in the alloc zone is contiguously mapped
(if paging is enabled). m_voffset is the offset that needs to be added to
a given physical address to get the corresponding virtual address */
/* memblock assumes that all memory in the alloc zone is contiguously
mapped (if paging is enabled). m_voffset is the offset that needs to
be added to a given physical address to get the corresponding virtual
address */
uintptr_t m_voffset;
struct memblock_type memory;
@@ -212,7 +220,10 @@ extern int __next_mem_range(struct memblock_iter *it);
@param voffset the offset between the physical address of a given page and
its corresponding virtual address.
*/
extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset);
extern int memblock_init(
uintptr_t alloc_start,
uintptr_t alloc_end,
uintptr_t voffset);
/* add a region of memory to memblock.
@@ -234,7 +245,8 @@ extern int memblock_add(phys_addr_t base, size_t size);
reserved memory will not be used by memblock_alloc(), and will remain
reserved when the vm_page memory map is initialised.
@param base the physical address of the start of the memory region to reserve.
@param base the physical address of the start of the memory region to
reserve.
@oaram size the size of the memory region to reserve in bytes.
*/
extern int memblock_reserve(phys_addr_t base, size_t size);
@@ -319,9 +331,12 @@ extern phys_addr_t memblock_virt_to_phys(void *p);
*/
extern void *memblock_phys_to_virt(phys_addr_t p);
extern void __next_memory_region(struct memblock_iter *it, \
struct memblock_type *type_a, struct memblock_type *type_b,
phys_addr_t start, phys_addr_t end);
extern void __next_memory_region(
struct memblock_iter *it,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t start,
phys_addr_t end);
#ifdef __cplusplus
}

View File

@@ -3,10 +3,11 @@
#include <mango/compiler.h>
struct cpu_context;
struct ml_cpu_context;
#define panic(...) panic_irq(NULL, __VA_ARGS__)
extern void __noreturn panic_irq(struct cpu_context *ctx, const char *fmt, ...);
extern void __noreturn
panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...);
#endif

View File

@@ -1,9 +1,9 @@
#ifndef MANGO_UTIL_H_
#define MANGO_UTIL_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
@@ -15,13 +15,20 @@ extern "C" {
extern uint64_t hash_string(const char *s);
extern void data_size_to_string(size_t value, char *out, size_t outsz);
static inline bool power_of_2(size_t x) { return (x > 0 && (x & (x - 1)) == 0); }
static inline unsigned long long div64_pow2(unsigned long long x, unsigned long long y)
static inline bool power_of_2(size_t x)
{
return (x > 0 && (x & (x - 1)) == 0);
}
static inline unsigned long long div64_pow2(
unsigned long long x,
unsigned long long y)
{
return x >> (__builtin_ctz(y));
}
static inline unsigned long long absdiff64(unsigned long long x, unsigned long long y)
static inline unsigned long long absdiff64(
unsigned long long x,
unsigned long long y)
{
return x < y ? y - x : x - y;
}

View File

@@ -1,14 +1,14 @@
#ifndef MANGO_VM_H_
#define MANGO_VM_H_
#include <stddef.h>
#include <mango/types.h>
#include <mango/status.h>
#include <mango/queue.h>
#include <mango/btree.h>
#include <mango/bitmap.h>
#include <mango/btree.h>
#include <mango/locks.h>
#include <mango/machine/vm.h>
#include <mango/queue.h>
#include <mango/status.h>
#include <mango/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
@@ -33,7 +33,8 @@ struct bcache;
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define VM_PAGE_IS_FREE(pg) \
(((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define vm_page_foreach(pg, i) \
for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i))
@@ -41,10 +42,6 @@ struct bcache;
typedef phys_addr_t vm_alignment_t;
typedef unsigned int vm_node_id_t;
struct vm_object {
unsigned int reserved;
};
enum vm_model {
VM_MODEL_FLAT = 1,
VM_MODEL_SPARSE,
@@ -65,8 +62,8 @@ enum vm_flags {
};
enum vm_zone_id {
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
they need to be continuous, and must start at 0!
/* NOTE that these are used as indices into the node_zones array in
vm/zone.c they need to be continuous, and must start at 0!
not all of these zones are implemented for every architecture. */
VM_ZONE_DMA = 0u,
@@ -108,8 +105,8 @@ enum vm_page_order {
};
enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
/* page is reserved (probably by a call to memblock_reserve()) and
cannot be returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page has been allocated by a zone's buddy allocator, and is in-use */
VM_PAGE_ALLOC = 0x02u,
@@ -117,7 +114,8 @@ enum vm_page_flags {
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
/* page is holding cached data from secondary storage, and can be freed if necessary (and not dirty). */
/* page is holding cached data from secondary storage, and can be freed
* if necessary (and not dirty). */
VM_PAGE_CACHE = 0x10u,
};
@@ -238,13 +236,14 @@ struct vm_page {
struct queue_entry p_list;
struct btree_node p_bnode;
/* btree_node contains three pointers, so provide three pointer-sized integers for
use if p_bnode isn't needed. */
/* btree_node contains three pointers, so provide three
pointer-sized integers for use if p_bnode isn't needed. */
uintptr_t priv1[3];
};
union {
/* used by bcache when sector size is < page size. bitmap of present/missing sectors */
/* used by bcache when sector size is < page size. bitmap of
* present/missing sectors */
DECLARE_BITMAP(p_blockbits, VM_MAX_SECTORS_PER_PAGE);
uint32_t p_priv2;
};
@@ -252,10 +251,12 @@ struct vm_page {
union {
/* sector address, used by bcache */
sectors_t p_blockid;
/* offset of this page within the vm_object it is a part of */
off_t p_vmo_offset;
uint32_t p_priv3[2];
};
} __attribute__((aligned(2 * sizeof(unsigned long))));
} __aligned(2 * sizeof(unsigned long));
/* represents a sector of memory, containing its own array of vm_pages.
this struct is used under the sparse memory model, instead of the
@@ -272,39 +273,54 @@ struct vm_sector {
struct vm_page *s_pages;
};
extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones);
extern kern_status_t vm_bootstrap(
const struct vm_zone_descriptor *zones,
size_t nr_zones);
extern enum vm_model vm_memory_model(void);
extern void vm_set_memory_model(enum vm_model model);
extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node);
extern phys_addr_t vm_virt_to_phys(void *p);
extern phys_addr_t vm_virt_to_phys(const void *p);
extern void *vm_phys_to_virt(phys_addr_t p);
extern void vm_page_init_array();
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern void vm_page_init_array(void);
extern struct vm_page *vm_page_get(phys_addr_t addr);
extern phys_addr_t vm_page_get_paddr(struct vm_page *pg);
extern struct vm_zone *vm_page_get_zone(struct vm_page *pg);
extern void *vm_page_get_vaddr(struct vm_page *pg);
extern size_t vm_page_get_pfn(struct vm_page *pg);
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags);
extern struct vm_page *vm_page_alloc(
enum vm_page_order order,
enum vm_flags flags);
extern void vm_page_free(struct vm_page *pg);
extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b);
extern int vm_page_split(
struct vm_page *pg,
struct vm_page **a,
struct vm_page **b);
extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b);
extern struct vm_page *vm_page_get_buddy(struct vm_page *pg);
extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg);
extern size_t vm_bytes_to_pages(size_t bytes);
extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags);
extern void vm_zone_init(
struct vm_zone *z,
const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(
struct vm_zone *z,
enum vm_page_order order,
enum vm_flags flags);
extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg);
extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags);
extern struct vm_cache *vm_cache_create(
const char *name,
size_t objsz,
enum vm_cache_flags flags);
extern void vm_cache_init(struct vm_cache *cache);
extern void vm_cache_destroy(struct vm_cache *cache);
extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags);
@@ -330,7 +346,10 @@ extern size_t vm_page_get_pfn_sparse(struct vm_page *pg);
#endif
#ifdef __cplusplus
inline void *operator new(size_t count, void *p) { return p; }
inline void *operator new(size_t count, void *p)
{
return p;
}
#define kmalloc_object(objtype, flags, ...) \
__extension__({ \

View File

@@ -1,13 +1,13 @@
#include <stdarg.h>
#include <mango/machine/panic.h>
#include <mango/cpu.h>
#include <mango/libc/stdio.h>
#include <mango/machine/panic.h>
#include <mango/printk.h>
#include <mango/sched.h>
#include <mango/cpu.h>
#include <stdarg.h>
static int has_panicked = 0;
void panic_irq(struct cpu_context *ctx, const char *fmt, ...)
void panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...)
{
char buf[512];
va_list args;
@@ -22,7 +22,10 @@ void panic_irq(struct cpu_context *ctx, const char *fmt, ...)
struct thread *thr = current_thread();
if (task && thr) {
printk("task: %s (id: %d, thread: %d)", task->t_name, task->t_id, thr->tr_id);
printk("task: %s (id: %d, thread: %d)",
task->t_name,
task->t_id,
thr->tr_id);
} else {
printk("task: [bootstrap]");
}

View File

@@ -18,13 +18,18 @@
#include <stdbool.h>
#include <stdint.h>
static unsigned int random_seed = 53455346;
int isupper(int c)
{
return (c >= 65 && c <= 90);
}
int isupper(int c) { return (c >= 65 && c <= 90); }
int islower(int c)
{
return (c >= 97 && c <= 122);
}
int islower(int c) { return (c >= 97 && c <= 122); }
int toupper(int c) {
int toupper(int c)
{
if (!islower(c)) {
return c;
}
@@ -32,7 +37,8 @@ int toupper(int c) {
return c - 32;
}
int tolower(int c) {
int tolower(int c)
{
if (!isupper(c)) {
return c;
}
@@ -40,55 +46,48 @@ int tolower(int c) {
return c + 32;
}
int isdigit(int c) { return (c >= 48 && c <= 57); }
int isalpha(int c) { return (c >= 65 && c <= 90) || (c >= 97 && c <= 122); }
int isalnum(int c) { return isalpha(c) | isdigit(c); }
int iscntrl(int c) { return (c <= 31) || (c == 127); }
int isprint(int c) { return (c >= 32 && c <= 126) || (c >= 128 && c <= 254); }
int isgraph(int c) { return isprint(c) && c != 32; }
int ispunct(int c) { return isgraph(c) && !isalnum(c); }
int isspace(int c) {
return (c == ' ') || (c == '\t') || (c == '\n') || (c == '\v') ||
(c == '\f') || (c == '\r');
int isdigit(int c)
{
return (c >= 48 && c <= 57);
}
int isxdigit(int c) {
int isalpha(int c)
{
return (c >= 65 && c <= 90) || (c >= 97 && c <= 122);
}
int isalnum(int c)
{
return isalpha(c) | isdigit(c);
}
int iscntrl(int c)
{
return (c <= 31) || (c == 127);
}
int isprint(int c)
{
return (c >= 32 && c <= 126) || (c >= 128 && c <= 254);
}
int isgraph(int c)
{
return isprint(c) && c != 32;
}
int ispunct(int c)
{
return isgraph(c) && !isalnum(c);
}
int isspace(int c)
{
return (c == ' ') || (c == '\t') || (c == '\n') || (c == '\v')
|| (c == '\f') || (c == '\r');
}
int isxdigit(int c)
{
return isdigit(c) || (c >= 65 && c <= 70) || (c >= 97 && c <= 102);
}
bool fill_random(unsigned char *buffer, unsigned int size) {
if (!buffer || !size) {
return false;
}
for (uint32_t i = 0; i < size; i++) {
uint32_t next = random_seed;
uint32_t result;
next *= 1103515245;
next += 12345;
result = (uint32_t)(next / 65536) % 2048;
next *= 1103515245;
next += 12345;
result <<= 10;
result ^= (uint32_t)(next / 65536) % 1024;
next *= 1103515245;
next += 12345;
result <<= 10;
result ^= (uint32_t)(next / 65536) % 1024;
random_seed = next;
buffer[i] = (uint8_t)(result % 256);
}
return true;
}

View File

@@ -1,9 +1,9 @@
#include <mango/object.h>
#include <mango/sched.h>
#include <mango/clock.h>
#include <mango/cpu.h>
#include <mango/printk.h>
#include <mango/machine/thread.h>
#include <mango/object.h>
#include <mango/printk.h>
#include <mango/sched.h>
extern kern_status_t setup_kernel_task(void);
extern kern_status_t setup_idle_task(void);
@@ -37,8 +37,14 @@ kern_status_t sched_init(void)
return status;
}
struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads));
struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads));
struct thread *this_thread = QUEUE_CONTAINER(
struct thread,
tr_threads,
queue_first(&kernel_task()->t_threads));
struct thread *idle_thread = QUEUE_CONTAINER(
struct thread,
tr_threads,
queue_first(&idle_task()->t_threads));
struct cpu_data *this_cpu = get_this_cpu();
rq_init(&this_cpu->c_rq);
@@ -55,7 +61,8 @@ kern_status_t sched_init(void)
static void expire_timers(struct cpu_data *cpu)
{
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) {
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry)
{
if (timer->t_expiry <= clock_ticks) {
timer->t_callback(timer);
}
@@ -102,7 +109,8 @@ void __schedule(enum sched_mode mode)
enum thread_state prev_state = READ_ONCE(prev->tr_state);
if ((mode == SCHED_IRQ || prev_state == THREAD_READY) && prev != rq->rq_idle) {
if ((mode == SCHED_IRQ || prev_state == THREAD_READY)
&& prev != rq->rq_idle) {
rq_enqueue(rq, prev);
}
@@ -213,7 +221,8 @@ void end_charge_period(void)
self->tr_charge_period_start = 0;
//printk("%llu cycles charged to %s/%u", charge, self->tr_parent->t_name, self->tr_parent->t_id);
// printk("%llu cycles charged to %s/%u", charge,
// self->tr_parent->t_name, self->tr_parent->t_id);
}
cycles_t default_quantum(void)

View File

@@ -1,10 +1,10 @@
#include <mango/locks.h>
#include <mango/printk.h>
#include <mango/clock.h>
#include <mango/sched.h>
#include <mango/object.h>
#include <mango/cpu.h>
#include <mango/libc/stdio.h>
#include <mango/locks.h>
#include <mango/object.h>
#include <mango/printk.h>
#include <mango/sched.h>
#define TASK_CAST(p) OBJECT_C_CAST(struct task, t_base, &task_type, p)
@@ -20,7 +20,12 @@ static struct task *__idle_task;
static spin_lock_t task_list_lock;
static struct btree task_list;
BTREE_DEFINE_SIMPLE_GET(struct task, unsigned int, t_tasklist, t_id, task_list_get)
BTREE_DEFINE_SIMPLE_GET(
struct task,
unsigned int,
t_tasklist,
t_id,
task_list_get)
BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert)
struct task *kernel_task(void)
@@ -51,7 +56,10 @@ kern_status_t setup_kernel_task(void)
__kernel_task->t_pmap = get_kernel_pmap();
__kernel_task->t_state = TASK_RUNNING;
snprintf(__kernel_task->t_name, sizeof __kernel_task->t_name, "kernel_task");
snprintf(
__kernel_task->t_name,
sizeof __kernel_task->t_name,
"kernel_task");
struct thread *kernel_thread = thread_alloc();
kernel_thread->tr_id = 0;
@@ -62,7 +70,9 @@ kern_status_t setup_kernel_task(void)
unsigned long flags;
task_lock_irqsave(__kernel_task, &flags);
queue_push_back(&__kernel_task->t_threads, &kernel_thread->tr_threads);
queue_push_back(
&__kernel_task->t_threads,
&kernel_thread->tr_threads);
task_unlock_irqrestore(__kernel_task, flags);
spin_lock_irqsave(&task_list_lock, &flags);

View File

@@ -1,17 +1,18 @@
#include <mango/status.h>
#include <limits.h>
#include <mango/vm.h>
#include <mango/machine/cpu.h>
#include <mango/memblock.h>
#include <mango/printk.h>
#include <mango/machine/cpu.h>
#include <mango/status.h>
#include <mango/vm.h>
#include <stddef.h>
#include <limits.h>
#include <stdint.h>
/* One struct vm_pg_data per NUMA node. */
static struct vm_pg_data *node_data = NULL;
kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones)
kern_status_t vm_bootstrap(
const struct vm_zone_descriptor *zones,
size_t nr_zones)
{
int numa_count = 1;

View File

@@ -19,11 +19,11 @@
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
*/
#include <stdbool.h>
#include <limits.h>
#include <mango/types.h>
#include <mango/libc/string.h>
#include <mango/memblock.h>
#include <mango/types.h>
#include <stdbool.h>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -38,8 +38,10 @@
be bounded by the defined memory regions, and not by this constant. */
#define ADDR_MAX (~(uintptr_t)0)
static struct memblock_region init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
static struct memblock_region init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
static struct memblock_region
init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
static struct memblock_region
init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
static phys_addr_t do_alloc(size_t size, phys_addr_t align);
@@ -59,16 +61,21 @@ static void memblock_double_capacity(struct memblock_type *type)
{
size_t new_max = type->max * 2;
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(struct memblock_region), 8);
phys_addr_t new_regions_p
= do_alloc(new_max * sizeof(struct memblock_region), 8);
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
memcpy(new_regions, type->regions, type->count * sizeof(struct memblock_region));
memcpy(new_regions,
type->regions,
type->count * sizeof(struct memblock_region));
type->regions = new_regions;
type->max = new_max;
}
static int memblock_insert_region(struct memblock_type *type, struct memblock_region *to_add)
static int memblock_insert_region(
struct memblock_type *type,
struct memblock_region *to_add)
{
unsigned int i = 0;
@@ -116,7 +123,11 @@ int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
return 0;
}
int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, enum memblock_region_status status)
int memblock_add_range(
struct memblock_type *type,
uintptr_t base,
size_t size,
enum memblock_region_status status)
{
if (size == 0) {
return 0;
@@ -131,14 +142,17 @@ int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size,
return 0;
}
struct memblock_region new_region = { .base = base, .limit = limit, .status = status };
struct memblock_region new_region
= {.base = base, .limit = limit, .status = status};
/* two regions with different statuses CANNOT intersect. we first need to check
to make sure the region being added doesn't violate this rule. */
/* two regions with different statuses CANNOT intersect. we first need
to check to make sure the region being added doesn't violate this
rule. */
for (unsigned int i = 0; i < type->count; i++) {
struct memblock_region *cur_region = &type->regions[i];
if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) {
if (new_region.base > cur_region->limit
|| new_region.limit < cur_region->base) {
continue;
}
@@ -154,47 +168,67 @@ int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size,
for (unsigned int i = 0; i < type->count; i++) {
struct memblock_region *cur_region = &type->regions[i];
/* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */
if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) {
/* case 1: the region being added and the current region have no
* connection what-so-ever (no overlaps) */
if (cur_region->limit + 1 < new_region.base
|| cur_region->base > new_region.limit) {
continue;
}
/* case 2: the region being added matches a region already in the list. */
if (cur_region->base == new_region.base && cur_region->limit == new_region.limit) {
/* case 2: the region being added matches a region already in
* the list. */
if (cur_region->base == new_region.base
&& cur_region->limit == new_region.limit) {
/* nothing needs to be done */
add_new = false;
break;
}
/* case 3: the region being added completely contains a region already in the list. */
if (cur_region->base > new_region.base && cur_region->limit <= new_region.limit) {
/* case 3: the region being added completely contains a region
* already in the list. */
if (cur_region->base > new_region.base
&& cur_region->limit <= new_region.limit) {
memblock_remove_region(type, i);
/* after memblock_remove_region(), a different region will have moved into the array slot referenced by i.
decrementing i means we'll stay at the current index and process this region. */
/* after memblock_remove_region(), a different region
will have moved into the array slot referenced by i.
decrementing i means we'll stay at the current index
and process this region. */
i--;
continue;
}
/* case 4: the region being added meets or partially overlaps a
* region already in the list. */
/* case 4: the region being added meets or partially overlaps a region already in the list. */
/* there can be an overlap at the beginning and the end of the region being added,
anything else is either a full overlap (case 3) or not within the region being added at all.
to handle this, remove the region that's already in the list and extend the region being added to cover it.
the two regions may overlap and have incompatible statuses, but this case was handled earlier in this function. */
if ((new_region.base > cur_region->base || new_region.base == cur_region->limit - 1) && new_region.status == cur_region->status) {
/* the new region overlaps the END of the current region, change the base of the new region to match that of the current region. */
/* there can be an overlap at the beginning and the end of the
region being added, anything else is either a full overlap
(case 3) or not within the region being added at all. to
handle this, remove the region that's already in the list and
extend the region being added to cover it. the two regions
may overlap and have incompatible statuses, but this case was
handled earlier in this function. */
if ((new_region.base > cur_region->base
|| new_region.base == cur_region->limit - 1)
&& new_region.status == cur_region->status) {
/* the new region overlaps the END of the current
* region, change the base of the new region to match
* that of the current region. */
new_region.base = cur_region->base;
} else if ((new_region.base < cur_region->base || new_region.limit + 1 == cur_region->base) && new_region.status == cur_region->status) {
/* the new region overlaps the BEGINNING of the current region, change the limit of the new region to match that of the current region. */
} else if (
(new_region.base < cur_region->base
|| new_region.limit + 1 == cur_region->base)
&& new_region.status == cur_region->status) {
/* the new region overlaps the BEGINNING of the current
* region, change the limit of the new region to match
* that of the current region. */
new_region.limit = cur_region->limit;
} else {
continue;
}
/* with the new region updated to include the current region, we can remove the current region from the list */
/* with the new region updated to include the current region, we
* can remove the current region from the list */
memblock_remove_region(type, i);
i--;
}
@@ -216,7 +250,11 @@ int memblock_add(uintptr_t base, size_t size)
memblock_double_capacity(&memblock.memory);
}
return memblock_add_range(&memblock.memory, base, size, MEMBLOCK_MEMORY);
return memblock_add_range(
&memblock.memory,
base,
size,
MEMBLOCK_MEMORY);
}
int memblock_reserve(uintptr_t base, size_t size)
@@ -225,7 +263,11 @@ int memblock_reserve(uintptr_t base, size_t size)
memblock_double_capacity(&memblock.reserved);
}
return memblock_add_range(&memblock.reserved, base, size, MEMBLOCK_RESERVED);
return memblock_add_range(
&memblock.reserved,
base,
size,
MEMBLOCK_RESERVED);
}
static phys_addr_t do_alloc(size_t size, phys_addr_t align)
@@ -245,7 +287,8 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
struct memblock_iter it;
for_each_free_mem_range (&it, region_start, region_end) {
for_each_free_mem_range(&it, region_start, region_end)
{
phys_addr_t base = it.it_base;
if (base & (align - 1)) {
base &= ~(align - 1);
@@ -270,7 +313,11 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
return 0;
}
int status = memblock_add_range(&memblock.reserved, allocated_base, allocated_limit - allocated_base, MEMBLOCK_ALLOC);
int status = memblock_add_range(
&memblock.reserved,
allocated_base,
allocated_limit - allocated_base,
MEMBLOCK_ALLOC);
if (status != 0) {
return 0;
}
@@ -313,8 +360,10 @@ int memblock_free_phys(phys_addr_t addr, size_t size)
void __next_memory_region(
struct memblock_iter *it,
struct memblock_type *type_a, struct memblock_type *type_b,
uintptr_t start, uintptr_t end)
struct memblock_type *type_a,
struct memblock_type *type_b,
uintptr_t start,
uintptr_t end)
{
unsigned int idx_a = IDX_A(it->__idx);
unsigned int idx_b = IDX_B(it->__idx);
@@ -344,70 +393,85 @@ void __next_memory_region(
}
if (m_start > end) {
/* we have gone past the requested memory range and can now stop */
/* we have gone past the requested memory range and can
* now stop */
break;
}
for (; idx_b < type_b->count + 1; idx_b++) {
struct memblock_region *r = &type_b->regions[idx_b];
/* r_start and r_end delimit the region of memory between the current and previous reserved regions.
if we have gone past the last reserved region, these variables delimit the range between the end
of the last reserved region and the end of memory. */
/* r_start and r_end delimit the region of memory
between the current and previous reserved regions. if
we have gone past the last reserved region, these
variables delimit the range between the end of the
last reserved region and the end of memory. */
uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0;
uintptr_t r_end;
if (idx_b < type_b->count) {
r_end = r->base;
/* we decrement r_end to get the address of the last byte of the free region.
if r_end is already zero, there is a reserved region starting at address 0x0.
as long as r_end == r_start == 0x00000, we will skip this region. */
/* we decrement r_end to get the address of the
last byte of the free region. if r_end is
already zero, there is a reserved region
starting at address 0x0. as long as r_end ==
r_start == 0x00000, we will skip this region.
*/
if (r_end) {
r_end--;
}
} else {
/* this maximum value will be clamped to the bounds of memblock.memory
before being returned to the caller */
/* this maximum value will be clamped to the
bounds of memblock.memory before being
returned to the caller */
r_end = ADDR_MAX;
}
if (r_start >= r_end) {
/* this free region has a length of zero, move to the next one */
/* this free region has a length of zero, move
* to the next one */
continue;
}
if (r_start >= m_end) {
/* we've gone past the end of the current memory region, and need to go to the next one */
/* we've gone past the end of the current memory
* region, and need to go to the next one */
break;
}
/* we've already gone past this free memory region. move to the next one */
/* we've already gone past this free memory region. move
* to the next one */
if (m_start >= r_end) {
continue;
}
/* we want the area that is overlapped by both
region M (m_start - m_end) : The region defined as system memory.
region R (r_start - r_end) : The region defined as free / outside of any reserved regions.
region M (m_start - m_end) : The region defined
as system memory. region R (r_start - r_end) : The
region defined as free / outside of any reserved
regions.
*/
it->it_base = MAX(m_start, r_start);
it->it_limit = MIN(m_end, r_end);
/* further limit the region to the intersection between the region itself and the
specified iteration bounds */
/* further limit the region to the intersection between
the region itself and the specified iteration bounds
*/
it->it_base = MAX(it->it_base, start);
it->it_limit = MIN(it->it_limit, end);
if (it->it_limit <= it->it_base) {
/* this region is not part of the specified bounds, skip it. */
/* this region is not part of the specified
* bounds, skip it. */
continue;
}
it->it_status = MEMBLOCK_MEMORY;
/* whichever region is smaller, increment the pointer for that type, so we can
compare the larger region with the next region of the incremented type. */
/* whichever region is smaller, increment the pointer
for that type, so we can compare the larger region
with the next region of the incremented type. */
if (m_end <= r_end) {
idx_a++;
} else {

View File

@@ -1,8 +1,8 @@
#include <mango/types.h>
#include <mango/libc/string.h>
#include <mango/memblock.h>
#include <mango/printk.h>
#include <mango/types.h>
#include <mango/vm.h>
#include <mango/libc/string.h>
/* Pre-calculated page order -> size conversion table */
static size_t page_order_bytes[] = {
@@ -56,7 +56,8 @@ phys_addr_t vm_virt_to_phys(void *p)
void *vm_phys_to_virt(phys_addr_t p)
{
if (p >= (memblock.m_alloc_start - memblock.m_voffset) && p < (memblock.m_alloc_end - memblock.m_voffset)) {
if (p >= (memblock.m_alloc_start - memblock.m_voffset)
&& p < (memblock.m_alloc_end - memblock.m_voffset)) {
return memblock_phys_to_virt(p);
}
@@ -124,7 +125,6 @@ vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order)
return ~(page_order_bytes[order] - 1);
}
size_t vm_bytes_to_pages(size_t bytes)
{
if (bytes & (VM_PAGE_SIZE - 1)) {
@@ -150,7 +150,6 @@ struct vm_zone *vm_page_get_zone(struct vm_page *pg)
return &node->pg_zones[pg->p_zone];
}
struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags)
{
/* TODO prefer nodes closer to us */
@@ -232,7 +231,8 @@ struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b)
return NULL;
}
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))
!= (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
return NULL;
}

View File

@@ -22,13 +22,13 @@
of the sparse memory model may be outweighed by the extra
overhead, and the flat memory model may be a better choice.
*/
#include <mango/vm.h>
#include <mango/arg.h>
#include <mango/printk.h>
#include <mango/panic.h>
#include <mango/memblock.h>
#include <mango/util.h>
#include <mango/machine/cpu.h>
#include <mango/memblock.h>
#include <mango/panic.h>
#include <mango/printk.h>
#include <mango/util.h>
#include <mango/vm.h>
static struct vm_sector *sector_array = NULL;
static size_t sector_array_count = 0;
@@ -53,11 +53,16 @@ static enum sector_coverage_mode get_sector_coverage_mode(void)
return SECTOR_COVERAGE_ALL;
}
printk("vm: [sparse] ignoring unknown sector coverage mode '%s', using FREE", arg);
printk("vm: [sparse] ignoring unknown sector coverage mode '%s', using "
"FREE",
arg);
return SECTOR_COVERAGE_FREE;
}
static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
static struct vm_sector *phys_addr_to_sector_and_index(
phys_addr_t addr,
size_t *sector_id,
size_t *index)
{
/* all sectors have the same size */
size_t step = vm_page_order_to_bytes(sector_array[0].s_size);
@@ -98,7 +103,6 @@ static struct vm_page *get_or_create_page(phys_addr_t addr)
}
}
sector->s_pages[page_number].p_sector = sector_number;
return &sector->s_pages[page_number];
}
@@ -123,9 +127,13 @@ static enum vm_page_order find_minimum_sector_size(phys_addr_t pmem_end)
are in need of improvement to ensure that sparse works well on a wide
range of systems. */
static void calculate_sector_size_and_count(
size_t last_reserved_pfn, size_t last_free_pfn, size_t limit_pfn,
size_t reserved_size, size_t free_size,
unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
size_t last_reserved_pfn,
size_t last_free_pfn,
size_t limit_pfn,
size_t reserved_size,
size_t free_size,
unsigned int *out_sector_count,
enum vm_page_order *out_sector_size)
{
/* we can support up to VM_MAX_SECTORS memory sectors.
the minimum sector size is what ever is required
@@ -154,8 +162,8 @@ static void calculate_sector_size_and_count(
threshold. */
sector_size++;
/* if the difference is particularly big, increase the sector size
even further */
/* if the difference is particularly big, increase the sector
size even further */
if (memdiff >= 0x1000000) {
sector_size++;
}
@@ -183,13 +191,15 @@ void vm_sparse_init(void)
size_t last_reserved_pfn = 0, last_free_pfn = 0;
struct memblock_iter it;
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
for_each_mem_range(&it, 0x0, UINTPTR_MAX)
{
if (pmem_limit < it.it_limit + 1) {
pmem_limit = it.it_limit + 1;
}
}
for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) {
for_each_free_mem_range(&it, 0x0, UINTPTR_MAX)
{
free_size += it.it_limit - it.it_base + 1;
size_t last_pfn = it.it_limit / VM_PAGE_SIZE;
@@ -199,7 +209,8 @@ void vm_sparse_init(void)
}
}
for_each_reserved_mem_range (&it, 0x0, UINTPTR_MAX) {
for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX)
{
reserved_size += it.it_limit - it.it_base + 1;
size_t last_pfn = it.it_limit / VM_PAGE_SIZE;
@@ -212,7 +223,8 @@ void vm_sparse_init(void)
enum sector_coverage_mode mode = get_sector_coverage_mode();
phys_addr_t pmem_end = 0;
enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn);
enum vm_page_order sector_size
= find_minimum_sector_size(last_free_pfn);
if (mode == SECTOR_COVERAGE_FREE) {
pmem_end = last_free_pfn * VM_PAGE_SIZE;
} else {
@@ -224,50 +236,63 @@ void vm_sparse_init(void)
size_t sector_bytes = 0;
unsigned int nr_sectors = 0;
calculate_sector_size_and_count(
last_reserved_pfn, last_free_pfn, pmem_end / VM_PAGE_SIZE,
reserved_size, free_size,
&nr_sectors, &sector_size);
last_reserved_pfn,
last_free_pfn,
pmem_end / VM_PAGE_SIZE,
reserved_size,
free_size,
&nr_sectors,
&sector_size);
sector_bytes = vm_page_order_to_bytes(sector_size);
char sector_size_str[64];
data_size_to_string(sector_bytes, sector_size_str, sizeof sector_size_str);
data_size_to_string(
sector_bytes,
sector_size_str,
sizeof sector_size_str);
sector_array = kzalloc(sizeof(struct vm_sector) * nr_sectors, 0);
sector_array_count = nr_sectors;
for (unsigned int i = 0; i < nr_sectors; i++) {
sector_array[i].s_size = sector_size;
sector_array[i].s_first_pfn = (i * sector_bytes) >> VM_PAGE_SHIFT;
sector_array[i].s_first_pfn
= (i * sector_bytes) >> VM_PAGE_SHIFT;
}
size_t s, i;
phys_addr_to_sector_and_index(0x3f00000, &s, &i);
for_each_free_mem_range(&it, 0x0, pmem_end) {
for_each_free_mem_range(&it, 0x0, pmem_end)
{
if (it.it_base & VM_PAGE_MASK) {
it.it_base &= ~VM_PAGE_MASK;
it.it_base += VM_PAGE_SIZE;
}
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
for (phys_addr_t i = it.it_base; i < it.it_limit;
i += VM_PAGE_SIZE) {
struct vm_page *pg = get_or_create_page(i);
pg->p_flags = 0;
}
}
for_each_reserved_mem_range(&it, 0x0, pmem_end) {
for_each_reserved_mem_range(&it, 0x0, pmem_end)
{
if (it.it_base & VM_PAGE_MASK) {
it.it_base &= ~VM_PAGE_MASK;
it.it_base += VM_PAGE_SIZE;
}
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
for (phys_addr_t i = it.it_base; i < it.it_limit;
i += VM_PAGE_SIZE) {
struct vm_page *pg = vm_page_get(i);
if (!pg) {
/* if the page doesn't exist, it is part of a sector
that only contains reserved pages. a NULL page
is implicitly treated as reserved */
/* if the page doesn't exist, it is part of a
sector that only contains reserved pages. a
NULL page is implicitly treated as reserved
*/
continue;
}
@@ -275,7 +300,9 @@ void vm_sparse_init(void)
}
}
printk("vm: [sparse] initialised %zu sectors of size %s", nr_sectors, sector_size_str);
printk("vm: [sparse] initialised %zu sectors of size %s",
nr_sectors,
sector_size_str);
}
struct vm_page *vm_page_get_sparse(phys_addr_t addr)
@@ -288,7 +315,8 @@ struct vm_page *vm_page_get_sparse(phys_addr_t addr)
struct vm_sector *sector = &sector_array[sector_number];
if (!sector->s_pages || page_number >= vm_page_order_to_pages(sector->s_size)) {
if (!sector->s_pages
|| page_number >= vm_page_order_to_pages(sector->s_size)) {
return NULL;
}
@@ -298,5 +326,6 @@ struct vm_page *vm_page_get_sparse(phys_addr_t addr)
size_t vm_page_get_pfn_sparse(struct vm_page *pg)
{
struct vm_sector *sector = &sector_array[pg->p_sector];
return sector->s_first_pfn + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg);
return sector->s_first_pfn
+ (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg);
}