syscall: add lots of syscalls

This commit is contained in:
2026-02-19 19:21:50 +00:00
parent dbc7b8fc59
commit 1d4fd4f586
8 changed files with 1061 additions and 14 deletions

30
syscall/config.c Normal file
View File

@@ -0,0 +1,30 @@
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-region.h>
kern_status_t sys_kern_config_get(kern_config_key_t key, void *ptr, size_t len)
{
struct task *self = current_task();
switch (key) {
case KERN_CFG_PAGE_SIZE:
if (!validate_access_w(self, ptr, sizeof(uintptr_t))) {
return KERN_MEMORY_FAULT;
}
*(uint32_t *)ptr = VM_PAGE_SIZE;
return KERN_OK;
default:
return KERN_INVALID_ARGUMENT;
}
return KERN_UNSUPPORTED;
}
kern_status_t sys_kern_config_set(
kern_config_key_t key,
const void *ptr,
size_t len)
{
return KERN_ACCESS_DENIED;
}

View File

@@ -5,8 +5,36 @@
#define SYSCALL_TABLE_ENTRY(id, p) [SYS_##id] = (virt_addr_t)(sys_##p)
static const virt_addr_t syscall_table[] = {
SYSCALL_TABLE_ENTRY(EXIT, exit),
SYSCALL_TABLE_ENTRY(TASK_EXIT, task_exit),
SYSCALL_TABLE_ENTRY(TASK_CREATE, task_create),
SYSCALL_TABLE_ENTRY(TASK_CREATE_THREAD, task_create_thread),
SYSCALL_TABLE_ENTRY(THREAD_START, thread_start),
SYSCALL_TABLE_ENTRY(VM_OBJECT_CREATE, vm_object_create),
SYSCALL_TABLE_ENTRY(VM_OBJECT_READ, vm_object_read),
SYSCALL_TABLE_ENTRY(VM_OBJECT_WRITE, vm_object_write),
SYSCALL_TABLE_ENTRY(VM_OBJECT_COPY, vm_object_copy),
SYSCALL_TABLE_ENTRY(VM_REGION_CREATE, vm_region_create),
SYSCALL_TABLE_ENTRY(VM_REGION_READ, vm_region_read),
SYSCALL_TABLE_ENTRY(VM_REGION_WRITE, vm_region_write),
SYSCALL_TABLE_ENTRY(VM_REGION_MAP_ABSOLUTE, vm_region_map_absolute),
SYSCALL_TABLE_ENTRY(VM_REGION_MAP_RELATIVE, vm_region_map_relative),
SYSCALL_TABLE_ENTRY(VM_REGION_UNMAP_ABSOLUTE, vm_region_unmap_absolute),
SYSCALL_TABLE_ENTRY(VM_REGION_UNMAP_RELATIVE, vm_region_unmap_relative),
SYSCALL_TABLE_ENTRY(KERN_LOG, kern_log),
SYSCALL_TABLE_ENTRY(KERN_HANDLE_CLOSE, kern_handle_close),
SYSCALL_TABLE_ENTRY(KERN_CONFIG_GET, kern_config_get),
SYSCALL_TABLE_ENTRY(KERN_CONFIG_SET, kern_config_set),
SYSCALL_TABLE_ENTRY(CHANNEL_CREATE, channel_create),
SYSCALL_TABLE_ENTRY(PORT_CREATE, port_create),
SYSCALL_TABLE_ENTRY(PORT_CONNECT, port_connect),
SYSCALL_TABLE_ENTRY(PORT_DISCONNECT, port_disconnect),
SYSCALL_TABLE_ENTRY(MSG_SEND, msg_send),
SYSCALL_TABLE_ENTRY(MSG_RECV, msg_recv),
SYSCALL_TABLE_ENTRY(MSG_REPLY, msg_reply),
SYSCALL_TABLE_ENTRY(MSG_READ, msg_read),
SYSCALL_TABLE_ENTRY(MSG_READ_HANDLES, msg_read_handles),
SYSCALL_TABLE_ENTRY(MSG_WRITE, msg_write),
SYSCALL_TABLE_ENTRY(MSG_WRITE_HANDLES, msg_write_handles),
};
static const size_t syscall_table_count
= sizeof syscall_table / sizeof syscall_table[0];

8
syscall/handle.c Normal file
View File

@@ -0,0 +1,8 @@
#include <kernel/sched.h>
#include <kernel/syscall.h>
kern_status_t sys_kern_handle_close(kern_handle_t handle)
{
struct task *self = current_task();
return task_close_handle(self, handle);
}

9
syscall/log.c Normal file
View File

@@ -0,0 +1,9 @@
#include <kernel/printk.h>
#include <kernel/sched.h>
kern_status_t sys_kern_log(const char *s)
{
struct task *task = current_task();
printk("%s: %s", task->t_name, s);
return KERN_OK;
}

196
syscall/msg.c Normal file
View File

@@ -0,0 +1,196 @@
#include <kernel/channel.h>
#include <kernel/port.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-region.h>
kern_status_t sys_channel_create(
unsigned int id,
channel_flags_t flags,
kern_handle_t *out)
{
struct task *self = current_task();
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
struct channel *channel = channel_create();
if (!channel) {
return KERN_NO_MEMORY;
}
unsigned long irq_flags;
task_lock_irqsave(self, &irq_flags);
if (task_get_channel(self, id)) {
task_unlock_irqrestore(self, irq_flags);
return KERN_NAME_EXISTS;
}
kern_handle_t handle;
kern_status_t status
= task_open_handle(self, &channel->c_base, 0, &handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, irq_flags);
object_unref(&channel->c_base);
return status;
}
task_add_channel(self, channel, id);
task_unlock_irqrestore(self, irq_flags);
*out = handle;
return KERN_OK;
}
kern_status_t sys_port_create(kern_handle_t *out)
{
struct task *self = current_task();
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
struct port *port = port_create();
if (!port) {
return KERN_NO_MEMORY;
}
unsigned long irq_flags;
task_lock_irqsave(self, &irq_flags);
kern_handle_t handle;
kern_status_t status
= task_open_handle(self, &port->p_base, 0, &handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, irq_flags);
object_unref(&port->p_base);
return status;
}
task_unlock_irqrestore(self, irq_flags);
*out = handle;
return KERN_OK;
}
kern_status_t sys_port_connect(
kern_handle_t port_handle,
tid_t task_id,
unsigned int channel_id)
{
unsigned long flags;
struct task *self = current_task();
task_lock_irqsave(self, &flags);
struct object *port_obj = NULL;
handle_flags_t port_handle_flags = 0;
kern_status_t status = task_resolve_handle(
self,
port_handle,
&port_obj,
&port_handle_flags);
if (status != KERN_OK) {
return status;
}
/* add a reference to the port object to make sure it isn't deleted
* while we're using it */
object_ref(port_obj);
task_unlock_irqrestore(self, flags);
struct task *remote_task = task_from_tid(task_id);
if (!remote_task) {
return KERN_NO_ENTRY;
}
task_lock_irqsave(remote_task, &flags);
struct channel *remote = task_get_channel(remote_task, channel_id);
if (!remote) {
task_unlock_irqrestore(remote_task, flags);
return KERN_NO_ENTRY;
}
object_ref(&remote->c_base);
task_unlock_irqrestore(remote_task, flags);
status = port_connect(port_cast(port_obj), remote);
object_unref(port_obj);
object_unref(&remote->c_base);
return KERN_OK;
}
kern_status_t sys_port_disconnect(kern_handle_t port)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_send(
kern_handle_t port,
msg_flags_t flags,
const struct msg *req,
struct msg *resp)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_recv(
kern_handle_t channel,
msg_flags_t flags,
msgid_t *out_id,
struct msg *out_msg)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_reply(
kern_handle_t channel,
msg_flags_t flags,
msgid_t id,
const struct msg *reply)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_read(
kern_handle_t channel,
msgid_t id,
size_t offset,
struct iovec *out,
size_t nr_out)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_read_handles(
kern_handle_t channel,
msgid_t id,
size_t offset,
struct handle_list *out,
size_t nr_out)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_write(
kern_handle_t channel,
msgid_t id,
size_t offset,
const struct iovec *in,
size_t nr_in)
{
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_msg_write_handles(
kern_handle_t channel,
msgid_t id,
size_t offset,
const struct handle_list *in,
size_t nr_in)
{
return KERN_UNIMPLEMENTED;
}

View File

@@ -1,13 +1,203 @@
#include <mango/machine/cpu.h>
#include <mango/printk.h>
#include <mango/sched.h>
#include <kernel/machine/cpu.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-region.h>
extern kern_status_t sys_exit(int status)
extern kern_status_t sys_task_exit(int status)
{
printk("sys_exit(%d)", status);
while (1) {
ml_cpu_pause();
printk("sys_exit(%d)", status);
milli_sleep(1000);
}
return KERN_UNIMPLEMENTED;
}
kern_status_t sys_task_create(
kern_handle_t parent_handle,
const char *name,
size_t name_len,
kern_handle_t *out_task,
kern_handle_t *out_address_space)
{
unsigned long flags;
struct task *self = current_task();
if (name_len && !validate_access_r(self, name, name_len)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out_task, sizeof *out_task)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(
self,
out_address_space,
sizeof *out_address_space)) {
return KERN_MEMORY_FAULT;
}
struct object *parent_obj;
handle_flags_t parent_flags;
task_lock_irqsave(self, &flags);
kern_status_t status = task_resolve_handle(
self,
parent_handle,
&parent_obj,
&parent_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(parent_obj);
struct task *parent = task_cast(parent_obj);
struct handle *child_handle_slot = NULL, *space_handle_slot = NULL;
kern_handle_t child_handle, space_handle;
status = handle_table_alloc_handle(
self->t_handles,
&child_handle_slot,
&child_handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = handle_table_alloc_handle(
self->t_handles,
&space_handle_slot,
&space_handle);
if (status != KERN_OK) {
handle_table_free_handle(self->t_handles, child_handle);
task_unlock_irqrestore(self, flags);
return status;
}
task_unlock_irqrestore(self, flags);
struct task *child = task_create(name, name_len);
if (!child) {
object_unref(parent_obj);
task_lock_irqsave(self, &flags);
handle_table_free_handle(self->t_handles, child_handle);
handle_table_free_handle(self->t_handles, space_handle);
task_unlock_irqrestore(self, flags);
return KERN_NO_MEMORY;
}
task_lock_irqsave(parent, &flags);
task_add_child(parent, child);
task_unlock_irqrestore(parent, flags);
child_handle_slot->h_object = &child->t_base;
space_handle_slot->h_object = &child->t_address_space->vr_base;
object_add_handle(&child->t_base);
object_add_handle(&child->t_address_space->vr_base);
object_unref(parent_obj);
*out_task = child_handle;
*out_address_space = space_handle;
return KERN_OK;
}
kern_status_t sys_task_create_thread(
kern_handle_t task,
virt_addr_t ip,
virt_addr_t sp,
uintptr_t *args,
size_t nr_args,
kern_handle_t *out_thread)
{
unsigned long flags;
struct task *self = current_task();
if (!validate_access_r(self, args, nr_args * sizeof(uintptr_t))) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out_thread, sizeof *out_thread)) {
return KERN_MEMORY_FAULT;
}
struct object *target_obj;
handle_flags_t target_flags;
task_lock_irqsave(self, &flags);
kern_status_t status
= task_resolve_handle(self, task, &target_obj, &target_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(target_obj);
struct task *target = task_cast(target_obj);
struct handle *target_handle = NULL;
kern_handle_t out_handle;
status = handle_table_alloc_handle(
self->t_handles,
&target_handle,
&out_handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
task_unlock_irqrestore(self, flags);
task_lock_irqsave(target, &flags);
struct thread *thread = task_create_thread(target);
if (!thread) {
object_unref(target_obj);
task_unlock_irqrestore(target, flags);
task_lock_irqsave(self, &flags);
handle_table_free_handle(self->t_handles, out_handle);
task_unlock_irqrestore(self, flags);
return KERN_NO_MEMORY;
}
thread_init_user(thread, ip, sp, args, nr_args);
target_handle->h_object = &thread->thr_base;
object_add_handle(&thread->thr_base);
task_unlock_irqrestore(target, flags);
*out_thread = out_handle;
return KERN_OK;
}
kern_status_t sys_thread_start(kern_handle_t thread_handle)
{
unsigned long flags;
struct task *self = current_task();
struct object *thread_obj;
handle_flags_t thread_flags;
task_lock_irqsave(self, &flags);
kern_status_t status = task_resolve_handle(
self,
thread_handle,
&thread_obj,
&thread_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(thread_obj);
struct thread *thread = thread_cast(thread_obj);
task_unlock_irqrestore(self, flags);
schedule_thread_on_cpu(thread);
return KERN_OK;
}

View File

@@ -1,13 +1,165 @@
#include <mango/handle.h>
#include <mango/printk.h>
#include <mango/vm-region.h>
#include <kernel/handle.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-object.h>
#include <kernel/vm-region.h>
kern_status_t sys_vm_object_create(
const char *name,
size_t len,
enum vm_prot prot,
size_t name_len,
size_t data_len,
vm_prot_t prot,
kern_handle_t *out_handle)
{
printk("sys_vm_object_create()");
return KERN_UNIMPLEMENTED;
struct task *self = current_task();
if ((name || name_len) && !validate_access_r(self, name, name_len)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out_handle, sizeof *out_handle)) {
return KERN_MEMORY_FAULT;
}
struct vm_object *obj
= vm_object_create(name, name_len, data_len, prot);
if (!obj) {
return KERN_NO_MEMORY;
}
kern_status_t status
= task_open_handle(self, &obj->vo_base, 0, out_handle);
if (status != KERN_OK) {
object_unref(&obj->vo_base);
return status;
}
return KERN_OK;
}
kern_status_t sys_vm_object_read(
kern_handle_t object,
void *dst,
off_t offset,
size_t count,
size_t *nr_read)
{
struct task *self = current_task();
if (!validate_access_w(self, dst, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_read && !validate_access_w(self, nr_read, sizeof *nr_read)) {
return KERN_MEMORY_FAULT;
}
struct object *obj = NULL;
handle_flags_t flags = 0;
kern_status_t status = task_resolve_handle(self, object, &obj, &flags);
if (status != KERN_OK) {
return status;
}
struct vm_object *vmo = vm_object_cast(obj);
if (!vmo) {
return KERN_INVALID_ARGUMENT;
}
return vm_object_read(vmo, dst, offset, count, nr_read);
}
kern_status_t sys_vm_object_write(
kern_handle_t object,
const void *src,
off_t offset,
size_t count,
size_t *nr_written)
{
struct task *self = current_task();
if (!validate_access_r(self, src, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_written
&& !validate_access_w(self, nr_written, sizeof *nr_written)) {
return KERN_MEMORY_FAULT;
}
struct object *obj = NULL;
handle_flags_t flags = 0;
kern_status_t status = task_resolve_handle(self, object, &obj, &flags);
if (status != KERN_OK) {
return status;
}
struct vm_object *vmo = vm_object_cast(obj);
if (!vmo) {
return KERN_INVALID_ARGUMENT;
}
return vm_object_write(vmo, src, offset, count, nr_written);
}
kern_status_t sys_vm_object_copy(
kern_handle_t dst,
off_t dst_offset,
kern_handle_t src,
off_t src_offset,
size_t count,
size_t *nr_copied)
{
struct task *self = current_task();
if (nr_copied
&& !validate_access_w(self, nr_copied, sizeof *nr_copied)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
kern_status_t status;
struct object *dst_obj = NULL, *src_obj = NULL;
handle_flags_t dst_flags = 0, src_flags = 0;
status = task_resolve_handle(self, dst, &dst_obj, &dst_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, src, &src_obj, &src_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(src_obj);
object_ref(dst_obj);
task_unlock_irqrestore(self, flags);
struct vm_object *dst_vmo = vm_object_cast(dst_obj);
struct vm_object *src_vmo = vm_object_cast(src_obj);
if (!dst_vmo || !src_vmo) {
object_unref(src_obj);
object_unref(dst_obj);
return KERN_INVALID_ARGUMENT;
}
status = vm_object_copy(
dst_vmo,
dst_offset,
src_vmo,
src_offset,
count,
nr_copied);
object_unref(src_obj);
object_unref(dst_obj);
return status;
}

434
syscall/vm-region.c Normal file
View File

@@ -0,0 +1,434 @@
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <kernel/vm-object.h>
#include <kernel/vm-region.h>
kern_status_t sys_vm_region_create(
kern_handle_t parent,
const char *name,
size_t name_len,
off_t offset,
size_t region_len,
vm_prot_t prot,
kern_handle_t *out,
virt_addr_t *out_base_address)
{
struct task *self = current_task();
if (name_len && !validate_access_r(self, name, name_len)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(self, out, sizeof *out)) {
return KERN_MEMORY_FAULT;
}
if (!validate_access_w(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, parent, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *parent_region = vm_region_cast(obj);
if (!parent_region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct handle *child_handle_slot = NULL;
kern_handle_t child_handle = KERN_HANDLE_INVALID;
status = handle_table_alloc_handle(
self->t_handles,
&child_handle_slot,
&child_handle);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
struct vm_region *child = NULL;
status = vm_region_create(
parent_region,
name,
name_len,
offset,
region_len,
prot,
&child);
object_unref(obj);
if (status != KERN_OK) {
task_lock_irqsave(self, &flags);
handle_table_free_handle(self->t_handles, child_handle);
task_unlock_irqrestore(self, flags);
return status;
}
child_handle_slot->h_object = &child->vr_base;
object_add_handle(&child->vr_base);
object_unref(&child->vr_base);
*out = child_handle;
*out_base_address = vm_region_get_base_address(child);
return KERN_OK;
}
kern_status_t sys_vm_region_read(
kern_handle_t region_handle,
void *dst,
off_t offset,
size_t count,
size_t *nr_read)
{
struct task *self = current_task();
if (!validate_access_w(self, dst, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_read && !validate_access_w(self, nr_read, sizeof *nr_read)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
virt_addr_t src_address = vm_region_get_base_address(region) + offset;
status = vm_region_memmove(
self->t_address_space,
(virt_addr_t)dst,
region,
src_address,
count,
nr_read);
object_unref(obj);
return status;
}
kern_status_t sys_vm_region_write(
kern_handle_t region_handle,
const void *src,
off_t offset,
size_t count,
size_t *nr_written)
{
struct task *self = current_task();
if (!validate_access_r(self, src, count)) {
return KERN_MEMORY_FAULT;
}
if (nr_written
&& !validate_access_w(self, nr_written, sizeof *nr_written)) {
return KERN_MEMORY_FAULT;
}
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *obj = NULL;
handle_flags_t handle_flags = 0;
kern_status_t status
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(obj);
task_unlock_irqrestore(self, flags);
virt_addr_t dst_address = vm_region_get_base_address(region) + offset;
status = vm_region_memmove(
region,
dst_address,
self->t_address_space,
(virt_addr_t)src,
count,
nr_written);
object_unref(obj);
return status;
}
kern_status_t sys_vm_region_map_absolute(
kern_handle_t region_handle,
virt_addr_t map_address,
kern_handle_t object_handle,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address)
{
struct task *self = current_task();
if (out_base_address
&& !validate_access_r(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL, *vmo_obj = NULL;
handle_flags_t region_flags = 0, vmo_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, object_handle, &vmo_obj, &vmo_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_object *vmo = vm_object_cast(vmo_obj);
if (!vmo) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(vmo_obj);
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
off_t region_offset = VM_REGION_ANY_OFFSET;
if (map_address != VM_REGION_ANY_OFFSET) {
region_offset
= map_address - vm_region_get_base_address(region);
}
status = vm_region_map_object(
region,
region_offset,
vmo,
object_offset,
length,
prot,
out_base_address);
object_unref(vmo_obj);
object_unref(region_obj);
return status;
}
kern_status_t sys_vm_region_map_relative(
kern_handle_t region_handle,
off_t region_offset,
kern_handle_t object_handle,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address)
{
tracek("vm_region_map_relative(%x, %x, %x, %x, %x, %x, %p)",
region_handle,
region_offset,
object_handle,
object_offset,
length,
prot,
out_base_address);
struct task *self = current_task();
if (out_base_address
&& !validate_access_r(
self,
out_base_address,
sizeof *out_base_address)) {
return KERN_MEMORY_FAULT;
}
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL, *vmo_obj = NULL;
handle_flags_t region_flags = 0, vmo_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
status = task_resolve_handle(self, object_handle, &vmo_obj, &vmo_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_object *vmo = vm_object_cast(vmo_obj);
if (!vmo) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(vmo_obj);
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
status = vm_region_map_object(
region,
region_offset,
vmo,
object_offset,
length,
prot,
out_base_address);
object_unref(vmo_obj);
object_unref(region_obj);
tracek("result: %u", status);
return status;
}
kern_status_t sys_vm_region_unmap_absolute(
kern_handle_t region_handle,
virt_addr_t address,
size_t length)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL;
handle_flags_t region_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
off_t region_offset = address - vm_region_get_base_address(region);
status = vm_region_unmap(region, region_offset, length);
object_unref(region_obj);
return status;
}
kern_status_t sys_vm_region_unmap_relative(
kern_handle_t region_handle,
off_t offset,
size_t length)
{
struct task *self = current_task();
kern_status_t status = KERN_OK;
unsigned long flags;
task_lock_irqsave(self, &flags);
struct object *region_obj = NULL;
handle_flags_t region_flags = 0;
status = task_resolve_handle(
self,
region_handle,
&region_obj,
&region_flags);
if (status != KERN_OK) {
task_unlock_irqrestore(self, flags);
return status;
}
struct vm_region *region = vm_region_cast(region_obj);
if (!region) {
task_unlock_irqrestore(self, flags);
return KERN_INVALID_ARGUMENT;
}
object_ref(region_obj);
task_unlock_irqrestore(self, flags);
status = vm_region_unmap(region, offset, length);
object_unref(region_obj);
return status;
}