Files
mango/syscall/vm-controller.c

310 lines
7.0 KiB
C
Raw Normal View History

#include <kernel/equeue.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/task.h>
#include <kernel/vm-controller.h>
#include <kernel/vm-object.h>
kern_status_t sys_vm_controller_create(kern_handle_t *out)
{
struct task *self = current_task();
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
struct vm_controller *ctrl = vm_controller_create();
if (!ctrl) {
return KERN_NO_MEMORY;
}
kern_status_t status = task_open_handle(self, &ctrl->vc_base, 0, out);
if (status != KERN_OK) {
object_unref(&ctrl->vc_base);
return status;
}
return KERN_OK;
}
kern_status_t sys_vm_controller_recv(
kern_handle_t ctrl_handle,
equeue_packet_page_request_t *out)
{
struct task *self = current_task();
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *ctrl_obj = NULL;
handle_flags_t handle_flags = 0;
status = task_resolve_handle(
self,
ctrl_handle,
&ctrl_obj,
&handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_controller *ctrl = vm_controller_cast(ctrl_obj);
task_unlock_irqrestore(self, flags);
if (!ctrl) {
object_unref(ctrl_obj);
return KERN_INVALID_ARGUMENT;
}
vm_controller_lock_irqsave(ctrl, &flags);
status = vm_controller_recv(ctrl, out);
vm_controller_unlock_irqrestore(ctrl, flags);
object_unref(ctrl_obj);
return status;
}
kern_status_t sys_vm_controller_recv_async(
kern_handle_t ctrl_handle,
kern_handle_t eq_handle,
equeue_key_t key)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *ctrl_obj = NULL, *eq_obj = NULL;
handle_flags_t ctrl_flags = 0, eq_flags = 0;
status = task_resolve_handle(self, ctrl_handle, &ctrl_obj, &ctrl_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, eq_handle, &eq_obj, &eq_flags);
if (status != KERN_OK) {
object_unref(ctrl_obj);
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_controller *ctrl = vm_controller_cast(ctrl_obj);
struct equeue *eq = equeue_cast(eq_obj);
task_unlock_irqrestore(self, flags);
if (!ctrl || !eq) {
object_unref(ctrl_obj);
object_unref(eq_obj);
return KERN_INVALID_ARGUMENT;
}
vm_controller_lock_irqsave(ctrl, &flags);
status = vm_controller_recv_async(ctrl, eq, key);
vm_controller_unlock_irqrestore(ctrl, flags);
object_unref(ctrl_obj);
object_unref(eq_obj);
return status;
}
kern_status_t sys_vm_controller_create_object(
kern_handle_t ctrl_handle,
const char *name,
size_t name_len,
equeue_key_t key,
size_t data_len,
vm_prot_t prot,
kern_handle_t *out)
{
struct task *self = current_task();
if (!validate_access_r(self, name, name_len)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *ctrl_obj = NULL;
handle_flags_t handle_flags = 0;
status = task_resolve_handle(
self,
ctrl_handle,
&ctrl_obj,
&handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct handle *out_slot = NULL;
kern_handle_t out_handle = KERN_HANDLE_INVALID;
status = handle_table_alloc_handle(
self->t_handles,
&out_slot,
&out_handle);
struct vm_controller *ctrl = vm_controller_cast(ctrl_obj);
task_unlock_irqrestore(self, flags);
if (!ctrl) {
object_unref(ctrl_obj);
return KERN_INVALID_ARGUMENT;
}
vm_controller_lock_irqsave(ctrl, &flags);
struct vm_object *out_vmo = NULL;
status = vm_controller_create_object(
ctrl,
name,
name_len,
key,
data_len,
prot,
&out_vmo);
vm_controller_unlock_irqrestore(ctrl, flags);
object_unref(ctrl_obj);
if (status != KERN_OK) {
task_lock_irqsave(self, &flags);
handle_table_free_handle(self->t_handles, out_handle);
task_unlock_irqrestore(self, flags);
return status;
}
out_slot->h_object = &out_vmo->vo_base;
object_add_handle(&out_vmo->vo_base);
object_unref(&out_vmo->vo_base);
*out = out_handle;
return KERN_OK;
}
kern_status_t sys_vm_controller_detach_object(
kern_handle_t ctrl_handle,
kern_handle_t vmo_handle)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *ctrl_obj = NULL, *vmo_obj = NULL;
handle_flags_t ctrl_flags = 0, vmo_flags = 0;
status = task_resolve_handle(self, ctrl_handle, &ctrl_obj, &ctrl_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, vmo_handle, &vmo_obj, &vmo_flags);
if (status != KERN_OK) {
object_unref(ctrl_obj);
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_controller *ctrl = vm_controller_cast(ctrl_obj);
struct vm_object *vmo = vm_object_cast(vmo_obj);
task_unlock_irqrestore(self, flags);
if (!ctrl || !vmo) {
object_unref(ctrl_obj);
object_unref(vmo_obj);
return KERN_INVALID_ARGUMENT;
}
vm_controller_lock_irqsave(ctrl, &flags);
vm_object_lock(vmo);
status = vm_controller_detach_object(ctrl, vmo);
vm_object_unlock(vmo);
vm_controller_unlock_irqrestore(ctrl, flags);
object_unref(ctrl_obj);
object_unref(vmo_obj);
return status;
}
kern_status_t sys_vm_controller_supply_pages(
kern_handle_t ctrl_handle,
kern_handle_t dst_handle,
off_t dst_offset,
kern_handle_t src_handle,
off_t src_offset,
size_t count)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *ctrl_obj = NULL, *src_obj = NULL, *dst_obj = NULL;
handle_flags_t ctrl_flags = 0, src_flags = 0, dst_flags = 0;
status = task_resolve_handle(self, ctrl_handle, &ctrl_obj, &ctrl_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, dst_handle, &dst_obj, &dst_flags);
if (status != KERN_OK) {
object_unref(ctrl_obj);
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, src_handle, &src_obj, &src_flags);
if (status != KERN_OK) {
object_unref(ctrl_obj);
object_unref(dst_obj);
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_controller *ctrl = vm_controller_cast(ctrl_obj);
struct vm_object *dst = vm_object_cast(dst_obj);
struct vm_object *src = vm_object_cast(src_obj);
task_unlock_irqrestore(self, flags);
if (!ctrl || !dst || !src) {
object_unref(ctrl_obj);
object_unref(dst_obj);
object_unref(src_obj);
return KERN_INVALID_ARGUMENT;
}
vm_controller_lock_irqsave(ctrl, &flags);
vm_object_lock_pair(src, dst);
status = vm_controller_supply_pages(
ctrl,
dst,
dst_offset,
src,
src_offset,
count);
vm_object_unlock_pair(src, dst);
vm_controller_unlock_irqrestore(ctrl, flags);
object_unref(ctrl_obj);
object_unref(dst_obj);
object_unref(src_obj);
return status;
}