vm: implement demand-paging via userspace services with vm-controller

This commit is contained in:
2026-03-14 22:39:14 +00:00
parent f04c524bb5
commit 0af35c70ef
12 changed files with 826 additions and 100 deletions

View File

@@ -200,13 +200,24 @@ static bool is_area_free(
return false;
}
if (base < cur_area->vma_base && limit > cur_area->vma_limit) {
return false;
}
if (base > cur_area->vma_limit) {
cur = btree_right(cur);
} else if (limit < cur_area->vma_base) {
cur = btree_left(cur);
} else {
/* what */
panic("unhandled case in is_area_free");
panic("unhandled case in is_area_free. base=%zx, "
"len=%zx, "
"limit=%zx, cur_area=[%zx-%zx]",
base,
len,
limit,
cur_area->vma_base,
cur_area->vma_limit);
}
}
@@ -247,13 +258,24 @@ static bool is_area_reserved(
return true;
}
if (base < cur_area->vma_base && limit > cur_area->vma_limit) {
return false;
}
if (base > cur_area->vma_limit) {
cur = btree_right(cur);
} else if (limit < cur_area->vma_base) {
cur = btree_left(cur);
} else {
/* what */
panic("unhandled case in is_area_reserved");
panic("unhandled case in is_area_reserved. base=%zx, "
"len=%zx, "
"limit=%zx, cur_area=[%zx-%zx]",
base,
len,
limit,
cur_area->vma_base,
cur_area->vma_limit);
}
}
@@ -335,17 +357,17 @@ static void vm_iterator_begin(
off_t object_offset = base - it->it_mapping->vma_base
+ it->it_mapping->vma_object_offset;
struct vm_page *pg = NULL;
enum vm_object_flags flags = 0;
if (prot & VM_PROT_WRITE) {
pg = vm_object_alloc_page(
it->it_mapping->vma_object,
object_offset,
VM_PAGE_4K);
} else {
pg = vm_object_get_page(
it->it_mapping->vma_object,
object_offset);
flags |= VMO_ALLOCATE_MISSING_PAGE;
}
pg = vm_object_get_page(
it->it_mapping->vma_object,
object_offset,
flags,
NULL);
if (!pg) {
return;
}
@@ -405,17 +427,17 @@ static kern_status_t vm_iterator_seek(struct vm_iterator *it, size_t nr_bytes)
+ it->it_mapping->vma_object_offset;
struct vm_page *pg = NULL;
enum vm_object_flags flags = 0;
if (it->it_prot & VM_PROT_WRITE) {
pg = vm_object_alloc_page(
it->it_mapping->vma_object,
object_offset,
VM_PAGE_4K);
} else {
pg = vm_object_get_page(
it->it_mapping->vma_object,
object_offset);
flags |= VMO_ALLOCATE_MISSING_PAGE;
}
pg = vm_object_get_page(
it->it_mapping->vma_object,
object_offset,
flags,
NULL);
if (!pg) {
return KERN_NO_MEMORY;
}
@@ -1094,6 +1116,48 @@ bool address_space_validate_access(
return true;
}
static kern_status_t request_missing_page(
struct address_space *region,
virt_addr_t addr,
off_t object_offset,
struct vm_object *object,
vm_prot_t prot,
enum pmap_fault_flags flags,
unsigned long *irq_flags)
{
/* here:
* `region` is locked.
* `object` is unlocked.
* `irq_flags` must be restored when `region` is unlocked.
* the relevant page in `object` may or may not be committed.
* if it isn't, it needs to be requested.
*/
vm_object_lock(object);
address_space_unlock(region);
struct vm_page *pg = vm_object_get_page(
object,
object_offset,
VMO_ALLOCATE_MISSING_PAGE | VMO_REQUEST_MISSING_PAGE,
irq_flags);
if (!pg) {
vm_object_unlock_irqrestore(object, *irq_flags);
return KERN_FATAL_ERROR;
}
/* now: `region` is unlocked, and `object` is locked */
kern_status_t status = pmap_add(
region->s_pmap,
addr,
vm_page_get_pfn(pg),
prot,
PMAP_NORMAL);
vm_object_unlock_irqrestore(object, *irq_flags);
return status;
}
/* this function must be called with `region` locked */
kern_status_t address_space_demand_map(
struct address_space *region,
@@ -1105,12 +1169,26 @@ kern_status_t address_space_demand_map(
return KERN_NO_ENTRY;
}
unsigned long irq_flags;
address_space_lock_irqsave(region, &irq_flags);
struct vm_area *area = get_entry(region, addr, GET_ENTRY_EXACT);
if (!area) {
if (!area || !area->vma_object) {
address_space_unlock_irqrestore(region, irq_flags);
return KERN_NO_ENTRY;
}
off_t object_offset = addr - area->vma_base + area->vma_object_offset;
if (area->vma_object->vo_ctrl) {
return request_missing_page(
region,
addr,
object_offset,
area->vma_object,
area->vma_prot,
flags,
&irq_flags);
}
#if 0
tracek("vm: tried to access vm-object %s at offset=%05llx",
@@ -1118,20 +1196,25 @@ kern_status_t address_space_demand_map(
object_offset);
#endif
unsigned long lock_flags;
vm_object_lock_irqsave(area->vma_object, &lock_flags);
struct vm_page *pg = vm_object_alloc_page(
/* simple case: this vm-object is not attached to a controller */
vm_object_lock(area->vma_object);
struct vm_page *pg = vm_object_get_page(
area->vma_object,
object_offset,
VM_PAGE_4K);
vm_object_unlock_irqrestore(area->vma_object, lock_flags);
// tracek("vm: mapping %07llx -> %10llx", vm_page_get_paddr(pg), addr);
return pmap_add(
VMO_ALLOCATE_MISSING_PAGE,
NULL);
// tracek("vm: mapping %07llx -> %10llx", vm_page_get_paddr(pg),
// addr);
kern_status_t status = pmap_add(
region->s_pmap,
addr,
vm_page_get_pfn(pg),
area->vma_prot,
PMAP_NORMAL);
vm_object_unlock(area->vma_object);
address_space_unlock_irqrestore(region, irq_flags);
return status;
}
virt_addr_t address_space_get_base_address(const struct address_space *region)