vm: implement demand-paging via userspace services with vm-controller

This commit is contained in:
2026-03-14 22:39:14 +00:00
parent f04c524bb5
commit 0af35c70ef
12 changed files with 826 additions and 100 deletions

View File

@@ -1,23 +1,112 @@
#include <kernel/equeue.h>
#include <kernel/sched.h>
#include <kernel/thread.h>
#include <kernel/util.h>
#include <kernel/vm-controller.h>
#include <kernel/vm-object.h>
#include <mango/signal.h>
#define VM_CONTROLLER_CAST(p) \
OBJECT_C_CAST(struct vm_controller, vc_base, &vm_controller_type, p)
BTREE_DEFINE_SIMPLE_INSERT(struct vm_object, vo_ctrl_node, vo_key, put_object)
BTREE_DEFINE_SIMPLE_GET(
struct vm_object,
equeue_key_t,
vo_ctrl_node,
vo_key,
get_object)
static struct object_type vm_controller_type = {
.ob_name = "vm-controller",
.ob_size = sizeof(struct vm_controller),
.ob_header_offset = offsetof(struct vm_controller, vc_base),
};
kern_status_t vm_controller_type_init(void)
{
return KERN_UNIMPLEMENTED;
return object_type_register(&vm_controller_type);
}
struct vm_controller *vm_controller_cast(struct object *obj)
{
return NULL;
return VM_CONTROLLER_CAST(obj);
}
struct vm_controller *vm_controller_create(void)
{
struct object *ctrl_object = object_create(&vm_controller_type);
if (!ctrl_object) {
return NULL;
}
struct vm_controller *ctrl = VM_CONTROLLER_CAST(ctrl_object);
return ctrl;
}
static struct page_request *get_next_request(struct vm_controller *ctrl)
{
struct btree_node *cur = btree_first(&ctrl->vc_requests);
while (cur) {
struct page_request *req
= BTREE_CONTAINER(struct page_request, req_node, cur);
spin_lock(&req->req_lock);
if (req->req_status == PAGE_REQUEST_PENDING) {
req->req_status = PAGE_REQUEST_IN_PROGRESS;
ctrl->vc_requests_waiting--;
return req;
}
spin_unlock(&req->req_lock);
cur = btree_next(cur);
}
return NULL;
}
kern_status_t vm_controller_recv(
struct vm_controller *ctrl,
equeue_packet_page_request_t *out)
{
struct page_request *req = NULL;
req = get_next_request(ctrl);
if (!req) {
return KERN_NO_ENTRY;
}
if (ctrl->vc_requests_waiting == 0) {
object_clear_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
}
out->req_vmo = req->req_object->vo_key;
out->req_type = req->req_type;
out->req_offset = req->req_offset;
out->req_length = req->req_length;
spin_unlock(&req->req_lock);
return KERN_OK;
}
kern_status_t vm_controller_recv_async(
struct vm_controller *ctrl,
struct equeue *eq,
equeue_key_t key)
{
if (ctrl->vc_eq) {
object_unref(&ctrl->vc_eq->eq_base);
}
object_ref(&eq->eq_base);
ctrl->vc_eq = eq;
ctrl->vc_eq_key = key;
return KERN_OK;
}
kern_status_t vm_controller_create_object(
struct vm_controller *ctrl,
const char *name,
@@ -27,14 +116,45 @@ kern_status_t vm_controller_create_object(
vm_prot_t prot,
struct vm_object **out)
{
return KERN_UNIMPLEMENTED;
struct vm_object *vmo = get_object(&ctrl->vc_objects, key);
if (vmo) {
return KERN_NAME_EXISTS;
}
vmo = vm_object_create(name, name_len, data_len, prot);
if (!vmo) {
return KERN_NO_MEMORY;
}
object_ref(&ctrl->vc_base);
object_ref(&vmo->vo_base);
vmo->vo_flags |= VMO_CONTROLLER;
vmo->vo_ctrl = ctrl;
vmo->vo_key = key;
put_object(&ctrl->vc_objects, vmo);
*out = vmo;
return KERN_OK;
}
kern_status_t vm_controller_detach_object(
struct vm_controller *ctrl,
struct vm_object *vmo)
{
return KERN_UNIMPLEMENTED;
if (vmo->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
vmo->vo_ctrl = NULL;
vmo->vo_key = 0;
btree_delete(&ctrl->vc_objects, &vmo->vo_ctrl_node);
object_unref(&ctrl->vc_base);
object_unref(&vmo->vo_base);
return KERN_OK;
}
static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
@@ -91,22 +211,103 @@ static void wait_for_reply(
break;
}
vm_controller_unlock_irqrestore(ctrl, *lock_flags);
spin_unlock_irqrestore(&req->req_lock, *lock_flags);
schedule(SCHED_NORMAL);
vm_controller_lock_irqsave(ctrl, lock_flags);
spin_lock_irqsave(&req->req_lock, lock_flags);
}
self->tr_state = THREAD_READY;
}
static void fulfill_requests(
struct vm_controller *ctrl,
struct vm_object *obj,
off_t offset,
size_t length,
kern_status_t result)
{
off_t limit = offset + length - 1;
struct btree_node *cur = btree_first(&ctrl->vc_requests);
while (cur) {
struct page_request *req
= BTREE_CONTAINER(struct page_request, req_node, cur);
spin_lock(&req->req_lock);
bool match = false;
off_t req_base = req->req_offset;
off_t req_limit = req->req_offset + req->req_length - 1;
if (req_base >= offset && req_base <= limit) {
match = true;
} else if (req_limit >= offset && req_limit <= limit) {
match = true;
}
if (req->req_object != obj) {
match = false;
}
if (match) {
req->req_status = PAGE_REQUEST_COMPLETE;
req->req_result = result;
thread_awaken(req->req_sender);
}
spin_unlock(&req->req_lock);
cur = btree_next(cur);
}
}
kern_status_t vm_controller_supply_pages(
struct vm_controller *ctrl,
struct vm_object *dst,
off_t dst_offset,
struct vm_object *src,
off_t src_offset,
size_t count)
{
if (src->vo_flags & VMO_CONTROLLER) {
return KERN_INVALID_ARGUMENT;
}
if (dst->vo_ctrl != ctrl) {
return KERN_INVALID_ARGUMENT;
}
kern_status_t status = vm_object_transfer(
dst,
dst_offset,
src,
src_offset,
count,
NULL);
fulfill_requests(ctrl, dst, dst_offset, count, status);
return status;
}
kern_status_t vm_controller_send_request(
struct vm_controller *ctrl,
struct page_request *req)
struct page_request *req,
unsigned long *irq_flags)
{
fill_random(&req->req_id, sizeof req->req_id);
while (!try_enqueue(&ctrl->vc_requests, req)) {
req->req_id++;
}
ctrl->vc_requests_waiting++;
object_assert_signal(
&ctrl->vc_base,
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
vm_controller_unlock(ctrl);
wait_for_reply(ctrl, req, irq_flags);
spin_unlock_irqrestore(&req->req_lock, *irq_flags);
vm_controller_lock_irqsave(ctrl, irq_flags);
spin_lock(&req->req_lock);
btree_delete(&ctrl->vc_requests, &req->req_node);
return KERN_OK;
}