address-space is a non-recursive data structure, which contains a flat list of vm_areas representing mapped vm-objects. userspace programs can no longer create sub-address-spaces. instead, they can reserve portions of the address space, and use that reserved space to create mappings.
314 lines
6.5 KiB
C
314 lines
6.5 KiB
C
#include <kernel/address-space.h>
|
|
#include <kernel/printk.h>
|
|
#include <kernel/sched.h>
|
|
#include <kernel/syscall.h>
|
|
#include <kernel/vm-object.h>
|
|
|
|
kern_status_t sys_address_space_read(
|
|
kern_handle_t region_handle,
|
|
void *dst,
|
|
virt_addr_t base,
|
|
size_t count,
|
|
size_t *nr_read)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (!validate_access_w(self, dst, count)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
if (nr_read && !validate_access_w(self, nr_read, sizeof *nr_read)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *obj = NULL;
|
|
handle_flags_t handle_flags = 0;
|
|
kern_status_t status
|
|
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
address_space_lock_irqsave(region, &flags);
|
|
status = address_space_memmove(
|
|
self->t_address_space,
|
|
(virt_addr_t)dst,
|
|
region,
|
|
base,
|
|
count,
|
|
nr_read);
|
|
address_space_unlock_irqrestore(region, flags);
|
|
|
|
object_unref(obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_address_space_write(
|
|
kern_handle_t region_handle,
|
|
const void *src,
|
|
virt_addr_t base,
|
|
size_t count,
|
|
size_t *nr_written)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (!validate_access_r(self, src, count)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
if (nr_written
|
|
&& !validate_access_w(self, nr_written, sizeof *nr_written)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *obj = NULL;
|
|
handle_flags_t handle_flags = 0;
|
|
kern_status_t status
|
|
= task_resolve_handle(self, region_handle, &obj, &handle_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
address_space_lock_irqsave(region, &flags);
|
|
status = address_space_memmove(
|
|
region,
|
|
base,
|
|
self->t_address_space,
|
|
(virt_addr_t)src,
|
|
count,
|
|
nr_written);
|
|
address_space_unlock_irqrestore(region, flags);
|
|
|
|
object_unref(obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_address_space_map(
|
|
kern_handle_t region_handle,
|
|
virt_addr_t map_address,
|
|
kern_handle_t object_handle,
|
|
off_t object_offset,
|
|
size_t length,
|
|
vm_prot_t prot,
|
|
virt_addr_t *out_base_address)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (out_base_address
|
|
&& !validate_access_r(
|
|
self,
|
|
out_base_address,
|
|
sizeof *out_base_address)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
kern_status_t status = KERN_OK;
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *region_obj = NULL, *vmo_obj = NULL;
|
|
handle_flags_t region_flags = 0, vmo_flags = 0;
|
|
status = task_resolve_handle(
|
|
self,
|
|
region_handle,
|
|
®ion_obj,
|
|
®ion_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
status = task_resolve_handle(self, object_handle, &vmo_obj, &vmo_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(region_obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
struct vm_object *vmo = vm_object_cast(vmo_obj);
|
|
if (!vmo) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
address_space_lock_irqsave(region, &flags);
|
|
/* address_space_map will take care of locking `vmo` */
|
|
status = address_space_map(
|
|
region,
|
|
map_address,
|
|
vmo,
|
|
object_offset,
|
|
length,
|
|
prot,
|
|
out_base_address);
|
|
address_space_unlock_irqrestore(region, flags);
|
|
|
|
object_unref(vmo_obj);
|
|
object_unref(region_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_address_space_unmap(
|
|
kern_handle_t region_handle,
|
|
virt_addr_t base,
|
|
size_t length)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
kern_status_t status = KERN_OK;
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *region_obj = NULL;
|
|
handle_flags_t region_flags = 0;
|
|
status = task_resolve_handle(
|
|
self,
|
|
region_handle,
|
|
®ion_obj,
|
|
®ion_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(region_obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
status = address_space_unmap(region, base, length);
|
|
|
|
object_unref(region_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_address_space_reserve(
|
|
kern_handle_t region_handle,
|
|
virt_addr_t map_address,
|
|
size_t length,
|
|
virt_addr_t *out_base_address)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (out_base_address
|
|
&& !validate_access_r(
|
|
self,
|
|
out_base_address,
|
|
sizeof *out_base_address)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
kern_status_t status = KERN_OK;
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *region_obj = NULL;
|
|
handle_flags_t region_flags = 0;
|
|
status = task_resolve_handle(
|
|
self,
|
|
region_handle,
|
|
®ion_obj,
|
|
®ion_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(region_obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
address_space_lock_irqsave(region, &flags);
|
|
status = address_space_reserve(
|
|
region,
|
|
map_address,
|
|
length,
|
|
out_base_address);
|
|
address_space_unlock_irqrestore(region, flags);
|
|
|
|
object_unref(region_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_address_space_release(
|
|
kern_handle_t region_handle,
|
|
virt_addr_t base,
|
|
size_t length)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
kern_status_t status = KERN_OK;
|
|
unsigned long flags;
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *region_obj = NULL;
|
|
handle_flags_t region_flags = 0;
|
|
status = task_resolve_handle(
|
|
self,
|
|
region_handle,
|
|
®ion_obj,
|
|
®ion_flags);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return status;
|
|
}
|
|
|
|
struct address_space *region = address_space_cast(region_obj);
|
|
if (!region) {
|
|
task_unlock_irqrestore(self, flags);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
address_space_lock_irqsave(region, &flags);
|
|
status = address_space_unmap(region, base, length);
|
|
address_space_unlock_irqrestore(region, flags);
|
|
|
|
object_unref(region_obj);
|
|
|
|
return status;
|
|
}
|