vm: add vm-object to represent non-contiguous physical memory allocations

vm-object can be used to demand-allocate non-contiguous physical memory, and
will provide an api for userspace programs to do the same. unless a vm-object
is created in-place (i.e. to represent a specific area of physical memory),
its memory pages are only allocated when the object is mapped AND someone
attempts to access the memory.
This commit is contained in:
2026-02-08 12:56:43 +00:00
parent 14ebcd4875
commit b8ccffd2d4
3 changed files with 283 additions and 0 deletions

68
include/mango/vm-object.h Normal file
View File

@@ -0,0 +1,68 @@
#ifndef MANGO_VM_OBJECT_H_
#define MANGO_VM_OBJECT_H_
#include <mango/locks.h>
#include <mango/object.h>
#define VM_OBJECT_NAME_MAX 64
enum vm_object_flags {
/* the memory behind this vm-object wasn't allocated by us, and
* therefore shouldn't be freed by us */
VMO_IN_PLACE = 0x01u,
};
struct vm_object {
struct object vo_base;
char vo_name[VM_OBJECT_NAME_MAX];
enum vm_object_flags vo_flags;
/* queue of struct vm_region_mapping */
struct queue vo_mappings;
/* memory protection flags. mappings of this vm_object can only use
* a subset of the flags set in this mask. */
enum vm_prot vo_prot;
/* btree of vm_pages that have been allocated to this vm_object.
* vm_page->p_vmo_offset and the size of each page is the bst key. */
struct btree vo_pages;
/* total length of the vm_object in bytes. */
size_t vo_size;
};
extern kern_status_t vm_object_type_init(void);
/* create a vm_object with the specified length in bytes and protection flags.
* the length will be automatically rounded up to the nearest vm_object page
* order size. the actual page frames themselves won't be allocated until
* they are mapped and accessed. */
extern struct vm_object *vm_object_create(
const char *name,
size_t len,
enum vm_prot prot);
/* create a vm_object that represents the specified range of physical memory.
* the length will be automatically rounded up to the nearest vm_object page
* order size.
* NOTE this function assumes that the physical memory has already been
* reserved, and is not in use by any other kernel component. */
extern struct vm_object *vm_object_create_in_place(
const char *name,
phys_addr_t base,
size_t len,
enum vm_prot prot);
extern struct vm_page *vm_object_get_page(
const struct vm_object *vo,
off_t offset);
extern struct vm_page *vm_object_alloc_page(
struct vm_object *vo,
off_t offset,
enum vm_page_order size);
DEFINE_OBJECT_LOCK_FUNCTION(vm_object, vo_base)
#endif

View File

@@ -3,6 +3,7 @@
#include <mango/memblock.h> #include <mango/memblock.h>
#include <mango/printk.h> #include <mango/printk.h>
#include <mango/status.h> #include <mango/status.h>
#include <mango/vm-object.h>
#include <mango/vm.h> #include <mango/vm.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
@@ -39,6 +40,7 @@ kern_status_t vm_bootstrap(
} }
kmalloc_init(); kmalloc_init();
vm_object_type_init();
return KERN_OK; return KERN_OK;
} }

213
vm/vm-object.c Normal file
View File

@@ -0,0 +1,213 @@
#include <mango/printk.h>
#include <mango/vm-object.h>
#define VM_OBJECT_CAST(p) \
OBJECT_C_CAST(struct vm_object, vo_base, &vm_object_type, p)
static struct object_type vm_object_type = {
.ob_name = "vm-object",
.ob_size = sizeof(struct vm_object),
.ob_header_offset = offsetof(struct vm_object, vo_base),
};
static const enum vm_page_order GLOBAL_PAGE_ORDER = VM_PAGE_4K;
static void put_page(
struct vm_object *vmo,
struct vm_page *new_page,
off_t offset)
{
struct btree_node *cur = vmo->vo_pages.b_root;
new_page->p_vmo_offset = offset;
if (!cur) {
vmo->vo_pages.b_root = &new_page->p_bnode;
btree_insert_fixup(&vmo->vo_pages, &new_page->p_bnode);
return;
}
while (cur) {
struct vm_page *cur_page
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
struct btree_node *next = NULL;
off_t base = cur_page->p_vmo_offset;
off_t limit = base + vm_page_get_size_bytes(cur_page);
if (offset < base) {
next = btree_left(cur);
} else if (offset >= limit) {
next = btree_right(cur);
} else {
return;
}
if (next) {
cur = next;
continue;
}
if (offset < base) {
btree_put_left(cur, &new_page->p_bnode);
} else {
btree_put_right(cur, &new_page->p_bnode);
}
btree_insert_fixup(&vmo->vo_pages, &new_page->p_bnode);
return;
}
}
kern_status_t vm_object_type_init(void)
{
return object_type_register(&vm_object_type);
}
enum vm_page_order vm_object_global_page_order(void)
{
return GLOBAL_PAGE_ORDER;
}
struct vm_object *vm_object_create(
const char *name,
size_t len,
enum vm_prot prot)
{
size_t page_bytes = VM_PAGE_SIZE;
uintptr_t page_mask = page_bytes - 1;
if (len & page_mask) {
len &= ~page_mask;
len += page_bytes;
}
struct object *obj = object_create(&vm_object_type);
if (!obj) {
return NULL;
}
struct vm_object *out = VM_OBJECT_CAST(obj);
out->vo_size = len;
out->vo_prot = prot;
if (name) {
strncpy(out->vo_name, name, sizeof out->vo_name);
out->vo_name[sizeof out->vo_name - 1] = '\0';
}
return out;
}
extern struct vm_object *vm_object_create_in_place(
const char *name,
phys_addr_t base,
size_t len,
enum vm_prot prot)
{
struct vm_object *vmo = vm_object_create(name, len, prot);
if (!vmo) {
return NULL;
}
for (phys_addr_t i = base, offset = 0; i < base + vmo->vo_size;
i += VM_PAGE_SIZE, offset += VM_PAGE_SIZE) {
struct vm_page *pg = vm_page_get(i);
if (!pg) {
printk("vm-object: invalid physical address %08llx", i);
object_unref(&vmo->vo_base);
return NULL;
}
put_page(vmo, pg, offset);
}
vmo->vo_flags |= VMO_IN_PLACE;
return vmo;
}
extern struct vm_page *vm_object_get_page(
const struct vm_object *vo,
off_t offset)
{
struct btree_node *cur = vo->vo_pages.b_root;
while (cur) {
struct vm_page *page
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
struct btree_node *next = NULL;
off_t base = page->p_vmo_offset;
off_t limit = base + vm_page_get_size_bytes(page);
if (offset < base) {
next = btree_left(cur);
} else if (offset >= limit) {
next = btree_right(cur);
} else {
return page;
}
cur = next;
}
return NULL;
}
extern struct vm_page *vm_object_alloc_page(
struct vm_object *vo,
off_t offset,
enum vm_page_order size)
{
struct vm_page *page = NULL;
struct btree_node *cur = vo->vo_pages.b_root;
if (!cur) {
page = vm_page_alloc(VM_PAGE_4K, VM_NORMAL);
if (!page) {
return NULL;
}
page->p_vmo_offset = offset;
vo->vo_pages.b_root = &page->p_bnode;
btree_insert_fixup(&vo->vo_pages, &page->p_bnode);
return page;
}
while (cur) {
struct vm_page *page
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
struct btree_node *next = NULL;
off_t base = page->p_vmo_offset;
off_t limit = base + vm_page_get_size_bytes(page);
if (offset < base) {
next = btree_left(cur);
} else if (offset >= limit) {
next = btree_right(cur);
} else {
return page;
}
if (next) {
cur = next;
continue;
}
page = vm_page_alloc(VM_PAGE_4K, VM_NORMAL);
if (!page) {
return NULL;
}
page->p_vmo_offset = offset;
if (offset < base) {
btree_put_left(cur, &page->p_bnode);
} else {
btree_put_right(cur, &page->p_bnode);
}
btree_insert_fixup(&vo->vo_pages, &page->p_bnode);
return page;
}
return NULL;
}