Files
mango/syscall/vm-region.c

435 lines
9.0 KiB
C
Raw Normal View History

2026-02-19 19:21:50 +00:00
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-object.h>
#include <kernel/vm-region.h>
kern_status_t sys_vm_region_create(
kern_handle_t parent,
const char *name,
size_t name_len,
off_t offset,
size_t region_len,
vm_prot_t prot,
kern_handle_t *out,
virt_addr_t *out_base_address)
{
struct task *self = current_task();
if (name_len && !validate_access_r(self, name, name_len)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, parent, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *parent_region = vm_region_cast(obj);
if (!parent_region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct handle *child_handle_slot = NULL;
kern_handle_t child_handle = KERN_HANDLE_INVALID;
status = handle_table_alloc_handle(
self->t_handles,
&child_handle_slot,
&child_handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
struct vm_region *child = NULL;
status = vm_region_create(
parent_region,
name,
name_len,
offset,
region_len,
prot,
&child);
object_unref(obj);
if (status != KERN_OK) {
task_lock_irqsave(self, &flags);
handle_table_free_handle(self->t_handles, child_handle);
task_unlock_irqrestore(self, flags);
return status;
}
child_handle_slot->h_object = &child->vr_base;
object_add_handle(&child->vr_base);
object_unref(&child->vr_base);
*out = child_handle;
*out_base_address = vm_region_get_base_address(child);
return KERN_OK;
}
kern_status_t sys_vm_region_read(
kern_handle_t region_handle,
void *dst,
off_t offset,
size_t count,
size_t *nr_read)
{
struct task *self = current_task();
if (!validate_access_w(self, dst, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_read && !validate_access_w(self, nr_read, sizeof *nr_read)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
virt_addr_t src_address = vm_region_get_base_address(region) + offset;
status = vm_region_memmove(
self->t_address_space,
(virt_addr_t)dst,
region,
src_address,
count,
nr_read);
object_unref(obj);
return status;
}
kern_status_t sys_vm_region_write(
kern_handle_t region_handle,
const void *src,
off_t offset,
size_t count,
size_t *nr_written)
{
struct task *self = current_task();
if (!validate_access_r(self, src, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_written
&& !validate_access_w(self, nr_written, sizeof *nr_written)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
virt_addr_t dst_address = vm_region_get_base_address(region) + offset;
status = vm_region_memmove(
region,
dst_address,
self->t_address_space,
(virt_addr_t)src,
count,
nr_written);
object_unref(obj);
return status;
}
kern_status_t sys_vm_region_map_absolute(
kern_handle_t region_handle,
virt_addr_t map_address,
kern_handle_t object_handle,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address)
{
struct task *self = current_task();
if (out_base_address
&& !validate_access_r(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL, *vmo_obj = NULL;
handle_flags_t region_flags = 0, vmo_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, object_handle, &vmo_obj, &vmo_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_object *vmo = vm_object_cast(vmo_obj);
if (!vmo) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(vmo_obj);
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
off_t region_offset = VM_REGION_ANY_OFFSET;
if (map_address != VM_REGION_ANY_OFFSET) {
region_offset
= map_address - vm_region_get_base_address(region);
}
status = vm_region_map_object(
region,
region_offset,
vmo,
object_offset,
length,
prot,
out_base_address);
object_unref(vmo_obj);
object_unref(region_obj);
return status;
}
kern_status_t sys_vm_region_map_relative(
kern_handle_t region_handle,
off_t region_offset,
kern_handle_t object_handle,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address)
{
tracek("vm_region_map_relative(%x, %x, %x, %x, %x, %x, %p)",
region_handle,
region_offset,
object_handle,
object_offset,
length,
prot,
out_base_address);
struct task *self = current_task();
if (out_base_address
&& !validate_access_r(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL, *vmo_obj = NULL;
handle_flags_t region_flags = 0, vmo_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, object_handle, &vmo_obj, &vmo_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_object *vmo = vm_object_cast(vmo_obj);
if (!vmo) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(vmo_obj);
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
status = vm_region_map_object(
region,
region_offset,
vmo,
object_offset,
length,
prot,
out_base_address);
object_unref(vmo_obj);
object_unref(region_obj);
tracek("result: %u", status);
return status;
}
kern_status_t sys_vm_region_unmap_absolute(
kern_handle_t region_handle,
virt_addr_t address,
size_t length)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL;
handle_flags_t region_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
off_t region_offset = address - vm_region_get_base_address(region);
status = vm_region_unmap(region, region_offset, length);
object_unref(region_obj);
return status;
}
kern_status_t sys_vm_region_unmap_relative(
kern_handle_t region_handle,
off_t offset,
size_t length)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL;
handle_flags_t region_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
status = vm_region_unmap(region, offset, length);
object_unref(region_obj);
return status;
}