2026-03-14 22:39:14 +00:00
|
|
|
#include <kernel/equeue.h>
|
2026-03-12 20:39:28 +00:00
|
|
|
#include <kernel/sched.h>
|
|
|
|
|
#include <kernel/thread.h>
|
|
|
|
|
#include <kernel/util.h>
|
|
|
|
|
#include <kernel/vm-controller.h>
|
2026-03-14 22:39:14 +00:00
|
|
|
#include <kernel/vm-object.h>
|
|
|
|
|
#include <mango/signal.h>
|
|
|
|
|
|
|
|
|
|
#define VM_CONTROLLER_CAST(p) \
|
|
|
|
|
OBJECT_C_CAST(struct vm_controller, vc_base, &vm_controller_type, p)
|
|
|
|
|
|
|
|
|
|
BTREE_DEFINE_SIMPLE_INSERT(struct vm_object, vo_ctrl_node, vo_key, put_object)
|
|
|
|
|
BTREE_DEFINE_SIMPLE_GET(
|
|
|
|
|
struct vm_object,
|
|
|
|
|
equeue_key_t,
|
|
|
|
|
vo_ctrl_node,
|
|
|
|
|
vo_key,
|
|
|
|
|
get_object)
|
|
|
|
|
|
|
|
|
|
static struct object_type vm_controller_type = {
|
|
|
|
|
.ob_name = "vm-controller",
|
|
|
|
|
.ob_size = sizeof(struct vm_controller),
|
|
|
|
|
.ob_header_offset = offsetof(struct vm_controller, vc_base),
|
|
|
|
|
};
|
2026-03-12 20:39:28 +00:00
|
|
|
|
|
|
|
|
kern_status_t vm_controller_type_init(void)
|
|
|
|
|
{
|
2026-03-14 22:39:14 +00:00
|
|
|
return object_type_register(&vm_controller_type);
|
2026-03-12 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct vm_controller *vm_controller_cast(struct object *obj)
|
|
|
|
|
{
|
2026-03-14 22:39:14 +00:00
|
|
|
return VM_CONTROLLER_CAST(obj);
|
2026-03-12 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct vm_controller *vm_controller_create(void)
|
|
|
|
|
{
|
2026-03-14 22:39:14 +00:00
|
|
|
struct object *ctrl_object = object_create(&vm_controller_type);
|
|
|
|
|
if (!ctrl_object) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct vm_controller *ctrl = VM_CONTROLLER_CAST(ctrl_object);
|
|
|
|
|
|
|
|
|
|
return ctrl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct page_request *get_next_request(struct vm_controller *ctrl)
|
|
|
|
|
{
|
|
|
|
|
struct btree_node *cur = btree_first(&ctrl->vc_requests);
|
|
|
|
|
while (cur) {
|
|
|
|
|
struct page_request *req
|
|
|
|
|
= BTREE_CONTAINER(struct page_request, req_node, cur);
|
|
|
|
|
spin_lock(&req->req_lock);
|
|
|
|
|
if (req->req_status == PAGE_REQUEST_PENDING) {
|
|
|
|
|
req->req_status = PAGE_REQUEST_IN_PROGRESS;
|
|
|
|
|
ctrl->vc_requests_waiting--;
|
|
|
|
|
return req;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock(&req->req_lock);
|
|
|
|
|
cur = btree_next(cur);
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-12 20:39:28 +00:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-14 22:39:14 +00:00
|
|
|
kern_status_t vm_controller_recv(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
equeue_packet_page_request_t *out)
|
|
|
|
|
{
|
|
|
|
|
struct page_request *req = NULL;
|
|
|
|
|
|
|
|
|
|
req = get_next_request(ctrl);
|
|
|
|
|
if (!req) {
|
|
|
|
|
return KERN_NO_ENTRY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ctrl->vc_requests_waiting == 0) {
|
|
|
|
|
object_clear_signal(
|
|
|
|
|
&ctrl->vc_base,
|
|
|
|
|
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out->req_vmo = req->req_object->vo_key;
|
|
|
|
|
out->req_type = req->req_type;
|
|
|
|
|
out->req_offset = req->req_offset;
|
|
|
|
|
out->req_length = req->req_length;
|
|
|
|
|
|
|
|
|
|
spin_unlock(&req->req_lock);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t vm_controller_recv_async(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
struct equeue *eq,
|
|
|
|
|
equeue_key_t key)
|
|
|
|
|
{
|
|
|
|
|
if (ctrl->vc_eq) {
|
|
|
|
|
object_unref(&ctrl->vc_eq->eq_base);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(&eq->eq_base);
|
|
|
|
|
ctrl->vc_eq = eq;
|
|
|
|
|
ctrl->vc_eq_key = key;
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-12 20:39:28 +00:00
|
|
|
kern_status_t vm_controller_create_object(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
const char *name,
|
|
|
|
|
size_t name_len,
|
|
|
|
|
equeue_key_t key,
|
|
|
|
|
size_t data_len,
|
|
|
|
|
vm_prot_t prot,
|
|
|
|
|
struct vm_object **out)
|
|
|
|
|
{
|
2026-03-14 22:39:14 +00:00
|
|
|
struct vm_object *vmo = get_object(&ctrl->vc_objects, key);
|
|
|
|
|
if (vmo) {
|
|
|
|
|
return KERN_NAME_EXISTS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vmo = vm_object_create(name, name_len, data_len, prot);
|
|
|
|
|
if (!vmo) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(&ctrl->vc_base);
|
|
|
|
|
object_ref(&vmo->vo_base);
|
|
|
|
|
|
|
|
|
|
vmo->vo_flags |= VMO_CONTROLLER;
|
|
|
|
|
vmo->vo_ctrl = ctrl;
|
|
|
|
|
vmo->vo_key = key;
|
|
|
|
|
|
|
|
|
|
put_object(&ctrl->vc_objects, vmo);
|
|
|
|
|
|
|
|
|
|
*out = vmo;
|
|
|
|
|
return KERN_OK;
|
2026-03-12 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t vm_controller_detach_object(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
struct vm_object *vmo)
|
|
|
|
|
{
|
2026-03-14 22:39:14 +00:00
|
|
|
if (vmo->vo_ctrl != ctrl) {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vmo->vo_ctrl = NULL;
|
|
|
|
|
vmo->vo_key = 0;
|
|
|
|
|
btree_delete(&ctrl->vc_objects, &vmo->vo_ctrl_node);
|
|
|
|
|
|
|
|
|
|
object_unref(&ctrl->vc_base);
|
|
|
|
|
object_unref(&vmo->vo_base);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
2026-03-12 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static kern_status_t try_enqueue(struct btree *tree, struct page_request *req)
|
|
|
|
|
{
|
|
|
|
|
if (!tree->b_root) {
|
|
|
|
|
tree->b_root = &req->req_node;
|
|
|
|
|
btree_insert_fixup(tree, &req->req_node);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct btree_node *cur = tree->b_root;
|
|
|
|
|
while (1) {
|
|
|
|
|
struct page_request *cur_node
|
|
|
|
|
= BTREE_CONTAINER(struct page_request, req_node, cur);
|
|
|
|
|
struct btree_node *next = NULL;
|
|
|
|
|
|
|
|
|
|
if (req->req_id > cur_node->req_id) {
|
|
|
|
|
next = btree_right(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_right(cur, &req->req_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else if (req->req_id < cur_node->req_id) {
|
|
|
|
|
next = btree_left(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_left(cur, &req->req_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cur = next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
btree_insert_fixup(tree, &req->req_node);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void wait_for_reply(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
struct page_request *req,
|
|
|
|
|
unsigned long *lock_flags)
|
|
|
|
|
{
|
|
|
|
|
struct wait_item waiter;
|
|
|
|
|
struct thread *self = current_thread();
|
|
|
|
|
|
|
|
|
|
wait_item_init(&waiter, self);
|
|
|
|
|
for (;;) {
|
|
|
|
|
self->tr_state = THREAD_SLEEPING;
|
|
|
|
|
if (req->req_status == PAGE_REQUEST_COMPLETE) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-14 22:39:14 +00:00
|
|
|
spin_unlock_irqrestore(&req->req_lock, *lock_flags);
|
2026-03-12 20:39:28 +00:00
|
|
|
schedule(SCHED_NORMAL);
|
2026-03-14 22:39:14 +00:00
|
|
|
spin_lock_irqsave(&req->req_lock, lock_flags);
|
2026-03-12 20:39:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self->tr_state = THREAD_READY;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-14 22:39:14 +00:00
|
|
|
static void fulfill_requests(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
struct vm_object *obj,
|
|
|
|
|
off_t offset,
|
|
|
|
|
size_t length,
|
|
|
|
|
kern_status_t result)
|
|
|
|
|
{
|
|
|
|
|
off_t limit = offset + length - 1;
|
|
|
|
|
struct btree_node *cur = btree_first(&ctrl->vc_requests);
|
|
|
|
|
while (cur) {
|
|
|
|
|
struct page_request *req
|
|
|
|
|
= BTREE_CONTAINER(struct page_request, req_node, cur);
|
|
|
|
|
spin_lock(&req->req_lock);
|
|
|
|
|
bool match = false;
|
|
|
|
|
off_t req_base = req->req_offset;
|
|
|
|
|
off_t req_limit = req->req_offset + req->req_length - 1;
|
|
|
|
|
|
|
|
|
|
if (req_base >= offset && req_base <= limit) {
|
|
|
|
|
match = true;
|
|
|
|
|
} else if (req_limit >= offset && req_limit <= limit) {
|
|
|
|
|
match = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (req->req_object != obj) {
|
|
|
|
|
match = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (match) {
|
|
|
|
|
req->req_status = PAGE_REQUEST_COMPLETE;
|
|
|
|
|
req->req_result = result;
|
|
|
|
|
thread_awaken(req->req_sender);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock(&req->req_lock);
|
|
|
|
|
cur = btree_next(cur);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t vm_controller_supply_pages(
|
|
|
|
|
struct vm_controller *ctrl,
|
|
|
|
|
struct vm_object *dst,
|
|
|
|
|
off_t dst_offset,
|
|
|
|
|
struct vm_object *src,
|
|
|
|
|
off_t src_offset,
|
|
|
|
|
size_t count)
|
|
|
|
|
{
|
|
|
|
|
if (src->vo_flags & VMO_CONTROLLER) {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dst->vo_ctrl != ctrl) {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t status = vm_object_transfer(
|
|
|
|
|
dst,
|
|
|
|
|
dst_offset,
|
|
|
|
|
src,
|
|
|
|
|
src_offset,
|
|
|
|
|
count,
|
|
|
|
|
NULL);
|
|
|
|
|
fulfill_requests(ctrl, dst, dst_offset, count, status);
|
|
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-12 20:39:28 +00:00
|
|
|
kern_status_t vm_controller_send_request(
|
|
|
|
|
struct vm_controller *ctrl,
|
2026-03-14 22:39:14 +00:00
|
|
|
struct page_request *req,
|
|
|
|
|
unsigned long *irq_flags)
|
2026-03-12 20:39:28 +00:00
|
|
|
{
|
|
|
|
|
fill_random(&req->req_id, sizeof req->req_id);
|
|
|
|
|
while (!try_enqueue(&ctrl->vc_requests, req)) {
|
|
|
|
|
req->req_id++;
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-14 22:39:14 +00:00
|
|
|
ctrl->vc_requests_waiting++;
|
|
|
|
|
object_assert_signal(
|
|
|
|
|
&ctrl->vc_base,
|
|
|
|
|
VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED);
|
|
|
|
|
|
|
|
|
|
vm_controller_unlock(ctrl);
|
|
|
|
|
wait_for_reply(ctrl, req, irq_flags);
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&req->req_lock, *irq_flags);
|
|
|
|
|
vm_controller_lock_irqsave(ctrl, irq_flags);
|
|
|
|
|
spin_lock(&req->req_lock);
|
|
|
|
|
|
|
|
|
|
btree_delete(&ctrl->vc_requests, &req->req_node);
|
|
|
|
|
|
2026-03-12 20:39:28 +00:00
|
|
|
return KERN_OK;
|
|
|
|
|
}
|