kernel: adjust formatting

This commit is contained in:
2026-02-08 12:17:27 +00:00
parent 49a75a1bbe
commit 0490541dc9
14 changed files with 478 additions and 320 deletions

View File

@@ -22,21 +22,22 @@
#ifndef MANGO_MEMBLOCK_H_
#define MANGO_MEMBLOCK_H_
#include <stddef.h>
#include <limits.h>
#include <mango/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
#define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \
(i)->__idx != ULLONG_MAX; \
__next_memory_region(i, type_a, type_b, p_start, p_end))
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
for ((i)->__idx = 0, \
__next_memory_region(i, type_a, type_b, p_start, p_end); \
(i)->__idx != ULLONG_MAX; \
__next_memory_region(i, type_a, type_b, p_start, p_end))
/* iterate through all memory regions known to memblock.
@@ -47,7 +48,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -65,7 +66,7 @@ extern "C" {
struct memblock_iter it;
for_each_mem_region (&it, 0x40000, 0x80000) { ... }
*/
#define for_each_mem_range(i, p_start, p_end) \
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, p_start, p_end)
/* iterate through all memory regions reserved using memblock.
@@ -77,7 +78,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -95,7 +96,7 @@ extern "C" {
struct memblock_iter it;
for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... }
*/
#define for_each_reserved_mem_range(i, p_start, p_end) \
#define for_each_reserved_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.reserved, NULL, p_start, p_end)
/* iterate through all memory regions known by memblock to be free.
@@ -108,7 +109,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -138,19 +139,25 @@ extern "C" {
- 0x08000 -> 0x08fff
- 0x10000 -> 0x1ffff
*/
#define for_each_free_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end)
#define for_each_free_mem_range(i, p_start, p_end) \
__for_each_mem_range( \
i, \
&memblock.memory, \
&memblock.reserved, \
p_start, \
p_end)
typedef uint64_t memblock_index_t;
enum memblock_region_status {
/* Used in memblock.memory regions, indicates that the memory region exists */
/* Used in memblock.memory regions, indicates that the memory region
* exists */
MEMBLOCK_MEMORY = 0,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_alloc() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_alloc() */
MEMBLOCK_ALLOC,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_reserve() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_reserve() */
MEMBLOCK_RESERVED,
};
@@ -176,9 +183,10 @@ struct memblock {
/* bounds of the memory region that can be used by memblock_alloc()
both of these are virtual addresses */
uintptr_t m_alloc_start, m_alloc_end;
/* memblock assumes that all memory in the alloc zone is contiguously mapped
(if paging is enabled). m_voffset is the offset that needs to be added to
a given physical address to get the corresponding virtual address */
/* memblock assumes that all memory in the alloc zone is contiguously
mapped (if paging is enabled). m_voffset is the offset that needs to
be added to a given physical address to get the corresponding virtual
address */
uintptr_t m_voffset;
struct memblock_type memory;
@@ -212,7 +220,10 @@ extern int __next_mem_range(struct memblock_iter *it);
@param voffset the offset between the physical address of a given page and
its corresponding virtual address.
*/
extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset);
extern int memblock_init(
uintptr_t alloc_start,
uintptr_t alloc_end,
uintptr_t voffset);
/* add a region of memory to memblock.
@@ -234,7 +245,8 @@ extern int memblock_add(phys_addr_t base, size_t size);
reserved memory will not be used by memblock_alloc(), and will remain
reserved when the vm_page memory map is initialised.
@param base the physical address of the start of the memory region to reserve.
@param base the physical address of the start of the memory region to
reserve.
@oaram size the size of the memory region to reserve in bytes.
*/
extern int memblock_reserve(phys_addr_t base, size_t size);
@@ -257,7 +269,7 @@ extern int memblock_reserve(phys_addr_t base, size_t size);
@param size the size of the buffer to allocate in bytes.
@param align the alignment to use. for example, an alignment of 4096
will result in the returned pointer being a multiple
of 4096. this must be a power of 2.
of 4096. this must be a power of 2.
*/
extern void *memblock_alloc(size_t size, phys_addr_t align);
@@ -279,7 +291,7 @@ extern void *memblock_alloc(size_t size, phys_addr_t align);
@param size the size of the buffer to allocate in bytes.
@param align the alignment to use. for example, an alignment of 4096
will result in the returned pointer being a multiple
of 4096. this must be a power of 2.
of 4096. this must be a power of 2.
*/
extern phys_addr_t memblock_alloc_phys(size_t size, phys_addr_t align);
@@ -319,9 +331,12 @@ extern phys_addr_t memblock_virt_to_phys(void *p);
*/
extern void *memblock_phys_to_virt(phys_addr_t p);
extern void __next_memory_region(struct memblock_iter *it, \
struct memblock_type *type_a, struct memblock_type *type_b,
phys_addr_t start, phys_addr_t end);
extern void __next_memory_region(
struct memblock_iter *it,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t start,
phys_addr_t end);
#ifdef __cplusplus
}

View File

@@ -3,10 +3,11 @@
#include <mango/compiler.h>
struct cpu_context;
struct ml_cpu_context;
#define panic(...) panic_irq(NULL, __VA_ARGS__)
extern void __noreturn panic_irq(struct cpu_context *ctx, const char *fmt, ...);
extern void __noreturn
panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...);
#endif

View File

@@ -1,27 +1,34 @@
#ifndef MANGO_UTIL_H_
#define MANGO_UTIL_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define CLAMP(x, lo, hi) (MIN(MAX(x, lo), hi))
extern uint64_t hash_string(const char *s);
extern void data_size_to_string(size_t value, char *out, size_t outsz);
static inline bool power_of_2(size_t x) { return (x > 0 && (x & (x - 1)) == 0); }
static inline unsigned long long div64_pow2(unsigned long long x, unsigned long long y)
static inline bool power_of_2(size_t x)
{
return (x > 0 && (x & (x - 1)) == 0);
}
static inline unsigned long long div64_pow2(
unsigned long long x,
unsigned long long y)
{
return x >> (__builtin_ctz(y));
}
static inline unsigned long long absdiff64(unsigned long long x, unsigned long long y)
static inline unsigned long long absdiff64(
unsigned long long x,
unsigned long long y)
{
return x < y ? y - x : x - y;
}

View File

@@ -1,14 +1,14 @@
#ifndef MANGO_VM_H_
#define MANGO_VM_H_
#include <stddef.h>
#include <mango/types.h>
#include <mango/status.h>
#include <mango/queue.h>
#include <mango/btree.h>
#include <mango/bitmap.h>
#include <mango/btree.h>
#include <mango/locks.h>
#include <mango/machine/vm.h>
#include <mango/queue.h>
#include <mango/status.h>
#include <mango/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
@@ -17,13 +17,13 @@ extern "C" {
struct bcache;
/* maximum number of NUMA nodes */
#define VM_MAX_NODES 64
#define VM_MAX_NODES 64
/* maximum number of memory zones per node */
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
/* maximum number of supported page orders */
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
/* maximum number of sparse memory sectors */
#define VM_MAX_SECTORS 8192
#define VM_MAX_SECTORS 8192
/* maximum number of disk sectors that can be stored in a single
page. AKA the number of bits in the sector bitmap.
@@ -33,44 +33,41 @@ struct bcache;
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define VM_PAGE_IS_FREE(pg) \
(((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define vm_page_foreach(pg, i) \
#define vm_page_foreach(pg, i) \
for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i))
typedef phys_addr_t vm_alignment_t;
typedef unsigned int vm_node_id_t;
struct vm_object {
unsigned int reserved;
};
enum vm_model {
VM_MODEL_FLAT = 1,
VM_MODEL_SPARSE,
};
enum vm_prot {
VM_PROT_READ = 0x01u,
VM_PROT_WRITE = 0x02u,
VM_PROT_EXEC = 0x04u,
VM_PROT_USER = 0x08u,
VM_PROT_SVR = 0x10u,
VM_PROT_READ = 0x01u,
VM_PROT_WRITE = 0x02u,
VM_PROT_EXEC = 0x04u,
VM_PROT_USER = 0x08u,
VM_PROT_SVR = 0x10u,
VM_PROT_NOCACHE = 0x20u,
};
enum vm_flags {
VM_NORMAL = 0x00u,
VM_NORMAL = 0x00u,
VM_GET_DMA = 0x01u,
};
enum vm_zone_id {
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
they need to be continuous, and must start at 0!
/* NOTE that these are used as indices into the node_zones array in
vm/zone.c they need to be continuous, and must start at 0!
not all of these zones are implemented for every architecture. */
VM_ZONE_DMA = 0u,
VM_ZONE_NORMAL = 1u,
VM_ZONE_DMA = 0u,
VM_ZONE_NORMAL = 1u,
VM_ZONE_HIGHMEM = 2u,
};
@@ -108,27 +105,28 @@ enum vm_page_order {
};
enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page is reserved (probably by a call to memblock_reserve()) and
cannot be returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page has been allocated by a zone's buddy allocator, and is in-use */
VM_PAGE_ALLOC = 0x02u,
VM_PAGE_ALLOC = 0x02u,
/* page is the first page of a huge-page */
VM_PAGE_HEAD = 0x04u,
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
/* page is holding cached data from secondary storage, and can be freed if necessary (and not dirty). */
VM_PAGE_CACHE = 0x10u,
VM_PAGE_HUGE = 0x08u,
/* page is holding cached data from secondary storage, and can be freed
* if necessary (and not dirty). */
VM_PAGE_CACHE = 0x10u,
};
enum vm_memory_region_status {
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
};
enum vm_cache_flags {
VM_CACHE_OFFSLAB = 0x01u,
VM_CACHE_DMA = 0x02u
VM_CACHE_DMA = 0x02u
};
struct vm_zone_descriptor {
@@ -204,7 +202,7 @@ struct vm_slab {
- s_freelist[s_free] should be set to the previous value of s_free.
this is commented as it as flexible arrays are not supported in c++.
*/
//unsigned int s_freelist[];
// unsigned int s_freelist[];
};
struct vm_page {
@@ -238,13 +236,14 @@ struct vm_page {
struct queue_entry p_list;
struct btree_node p_bnode;
/* btree_node contains three pointers, so provide three pointer-sized integers for
use if p_bnode isn't needed. */
/* btree_node contains three pointers, so provide three
pointer-sized integers for use if p_bnode isn't needed. */
uintptr_t priv1[3];
};
union {
/* used by bcache when sector size is < page size. bitmap of present/missing sectors */
/* used by bcache when sector size is < page size. bitmap of
* present/missing sectors */
DECLARE_BITMAP(p_blockbits, VM_MAX_SECTORS_PER_PAGE);
uint32_t p_priv2;
};
@@ -252,10 +251,12 @@ struct vm_page {
union {
/* sector address, used by bcache */
sectors_t p_blockid;
/* offset of this page within the vm_object it is a part of */
off_t p_vmo_offset;
uint32_t p_priv3[2];
};
} __attribute__((aligned(2 * sizeof(unsigned long))));
} __aligned(2 * sizeof(unsigned long));
/* represents a sector of memory, containing its own array of vm_pages.
this struct is used under the sparse memory model, instead of the
@@ -272,39 +273,54 @@ struct vm_sector {
struct vm_page *s_pages;
};
extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones);
extern kern_status_t vm_bootstrap(
const struct vm_zone_descriptor *zones,
size_t nr_zones);
extern enum vm_model vm_memory_model(void);
extern void vm_set_memory_model(enum vm_model model);
extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node);
extern phys_addr_t vm_virt_to_phys(void *p);
extern phys_addr_t vm_virt_to_phys(const void *p);
extern void *vm_phys_to_virt(phys_addr_t p);
extern void vm_page_init_array();
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern void vm_page_init_array(void);
extern struct vm_page *vm_page_get(phys_addr_t addr);
extern phys_addr_t vm_page_get_paddr(struct vm_page *pg);
extern struct vm_zone *vm_page_get_zone(struct vm_page *pg);
extern void *vm_page_get_vaddr(struct vm_page *pg);
extern size_t vm_page_get_pfn(struct vm_page *pg);
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags);
extern struct vm_page *vm_page_alloc(
enum vm_page_order order,
enum vm_flags flags);
extern void vm_page_free(struct vm_page *pg);
extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b);
extern int vm_page_split(
struct vm_page *pg,
struct vm_page **a,
struct vm_page **b);
extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b);
extern struct vm_page *vm_page_get_buddy(struct vm_page *pg);
extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg);
extern size_t vm_bytes_to_pages(size_t bytes);
extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags);
extern void vm_zone_init(
struct vm_zone *z,
const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(
struct vm_zone *z,
enum vm_page_order order,
enum vm_flags flags);
extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg);
extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags);
extern struct vm_cache *vm_cache_create(
const char *name,
size_t objsz,
enum vm_cache_flags flags);
extern void vm_cache_init(struct vm_cache *cache);
extern void vm_cache_destroy(struct vm_cache *cache);
extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags);
@@ -330,15 +346,18 @@ extern size_t vm_page_get_pfn_sparse(struct vm_page *pg);
#endif
#ifdef __cplusplus
inline void *operator new(size_t count, void *p) { return p; }
inline void *operator new(size_t count, void *p)
{
return p;
}
#define kmalloc_object(objtype, flags, ...) \
__extension__({ \
void *p = kmalloc(sizeof(objtype), flags); \
if (p) { \
new (p) objtype(__VA_ARGS__); \
} \
(objtype *)p; \
#define kmalloc_object(objtype, flags, ...) \
__extension__({ \
void *p = kmalloc(sizeof(objtype), flags); \
if (p) { \
new (p) objtype(__VA_ARGS__); \
} \
(objtype *)p; \
})
#endif