vm: add vm-object to represent non-contiguous physical memory allocations
vm-object can be used to demand-allocate non-contiguous physical memory, and will provide an api for userspace programs to do the same. unless a vm-object is created in-place (i.e. to represent a specific area of physical memory), its memory pages are only allocated when the object is mapped AND someone attempts to access the memory.
This commit is contained in:
213
vm/vm-object.c
Normal file
213
vm/vm-object.c
Normal file
@@ -0,0 +1,213 @@
|
||||
#include <mango/printk.h>
|
||||
#include <mango/vm-object.h>
|
||||
|
||||
#define VM_OBJECT_CAST(p) \
|
||||
OBJECT_C_CAST(struct vm_object, vo_base, &vm_object_type, p)
|
||||
|
||||
static struct object_type vm_object_type = {
|
||||
.ob_name = "vm-object",
|
||||
.ob_size = sizeof(struct vm_object),
|
||||
.ob_header_offset = offsetof(struct vm_object, vo_base),
|
||||
};
|
||||
|
||||
static const enum vm_page_order GLOBAL_PAGE_ORDER = VM_PAGE_4K;
|
||||
|
||||
static void put_page(
|
||||
struct vm_object *vmo,
|
||||
struct vm_page *new_page,
|
||||
off_t offset)
|
||||
{
|
||||
struct btree_node *cur = vmo->vo_pages.b_root;
|
||||
new_page->p_vmo_offset = offset;
|
||||
|
||||
if (!cur) {
|
||||
vmo->vo_pages.b_root = &new_page->p_bnode;
|
||||
btree_insert_fixup(&vmo->vo_pages, &new_page->p_bnode);
|
||||
return;
|
||||
}
|
||||
|
||||
while (cur) {
|
||||
struct vm_page *cur_page
|
||||
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
|
||||
struct btree_node *next = NULL;
|
||||
|
||||
off_t base = cur_page->p_vmo_offset;
|
||||
off_t limit = base + vm_page_get_size_bytes(cur_page);
|
||||
if (offset < base) {
|
||||
next = btree_left(cur);
|
||||
} else if (offset >= limit) {
|
||||
next = btree_right(cur);
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
|
||||
if (next) {
|
||||
cur = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (offset < base) {
|
||||
btree_put_left(cur, &new_page->p_bnode);
|
||||
} else {
|
||||
btree_put_right(cur, &new_page->p_bnode);
|
||||
}
|
||||
|
||||
btree_insert_fixup(&vmo->vo_pages, &new_page->p_bnode);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
kern_status_t vm_object_type_init(void)
|
||||
{
|
||||
return object_type_register(&vm_object_type);
|
||||
}
|
||||
|
||||
enum vm_page_order vm_object_global_page_order(void)
|
||||
{
|
||||
return GLOBAL_PAGE_ORDER;
|
||||
}
|
||||
|
||||
struct vm_object *vm_object_create(
|
||||
const char *name,
|
||||
size_t len,
|
||||
enum vm_prot prot)
|
||||
{
|
||||
size_t page_bytes = VM_PAGE_SIZE;
|
||||
uintptr_t page_mask = page_bytes - 1;
|
||||
|
||||
if (len & page_mask) {
|
||||
len &= ~page_mask;
|
||||
len += page_bytes;
|
||||
}
|
||||
|
||||
struct object *obj = object_create(&vm_object_type);
|
||||
if (!obj) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct vm_object *out = VM_OBJECT_CAST(obj);
|
||||
|
||||
out->vo_size = len;
|
||||
out->vo_prot = prot;
|
||||
|
||||
if (name) {
|
||||
strncpy(out->vo_name, name, sizeof out->vo_name);
|
||||
out->vo_name[sizeof out->vo_name - 1] = '\0';
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
extern struct vm_object *vm_object_create_in_place(
|
||||
const char *name,
|
||||
phys_addr_t base,
|
||||
size_t len,
|
||||
enum vm_prot prot)
|
||||
{
|
||||
struct vm_object *vmo = vm_object_create(name, len, prot);
|
||||
if (!vmo) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (phys_addr_t i = base, offset = 0; i < base + vmo->vo_size;
|
||||
i += VM_PAGE_SIZE, offset += VM_PAGE_SIZE) {
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
if (!pg) {
|
||||
printk("vm-object: invalid physical address %08llx", i);
|
||||
object_unref(&vmo->vo_base);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
put_page(vmo, pg, offset);
|
||||
}
|
||||
|
||||
vmo->vo_flags |= VMO_IN_PLACE;
|
||||
|
||||
return vmo;
|
||||
}
|
||||
|
||||
extern struct vm_page *vm_object_get_page(
|
||||
const struct vm_object *vo,
|
||||
off_t offset)
|
||||
{
|
||||
struct btree_node *cur = vo->vo_pages.b_root;
|
||||
while (cur) {
|
||||
struct vm_page *page
|
||||
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
|
||||
struct btree_node *next = NULL;
|
||||
|
||||
off_t base = page->p_vmo_offset;
|
||||
off_t limit = base + vm_page_get_size_bytes(page);
|
||||
if (offset < base) {
|
||||
next = btree_left(cur);
|
||||
} else if (offset >= limit) {
|
||||
next = btree_right(cur);
|
||||
} else {
|
||||
return page;
|
||||
}
|
||||
|
||||
cur = next;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern struct vm_page *vm_object_alloc_page(
|
||||
struct vm_object *vo,
|
||||
off_t offset,
|
||||
enum vm_page_order size)
|
||||
{
|
||||
struct vm_page *page = NULL;
|
||||
struct btree_node *cur = vo->vo_pages.b_root;
|
||||
if (!cur) {
|
||||
page = vm_page_alloc(VM_PAGE_4K, VM_NORMAL);
|
||||
if (!page) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page->p_vmo_offset = offset;
|
||||
vo->vo_pages.b_root = &page->p_bnode;
|
||||
btree_insert_fixup(&vo->vo_pages, &page->p_bnode);
|
||||
return page;
|
||||
}
|
||||
|
||||
while (cur) {
|
||||
struct vm_page *page
|
||||
= BTREE_CONTAINER(struct vm_page, p_bnode, cur);
|
||||
struct btree_node *next = NULL;
|
||||
|
||||
off_t base = page->p_vmo_offset;
|
||||
off_t limit = base + vm_page_get_size_bytes(page);
|
||||
if (offset < base) {
|
||||
next = btree_left(cur);
|
||||
} else if (offset >= limit) {
|
||||
next = btree_right(cur);
|
||||
} else {
|
||||
return page;
|
||||
}
|
||||
|
||||
if (next) {
|
||||
cur = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
page = vm_page_alloc(VM_PAGE_4K, VM_NORMAL);
|
||||
if (!page) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
page->p_vmo_offset = offset;
|
||||
|
||||
if (offset < base) {
|
||||
btree_put_left(cur, &page->p_bnode);
|
||||
} else {
|
||||
btree_put_right(cur, &page->p_bnode);
|
||||
}
|
||||
|
||||
btree_insert_fixup(&vo->vo_pages, &page->p_bnode);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
Reference in New Issue
Block a user