477 lines
9.8 KiB
C
477 lines
9.8 KiB
C
#include <kernel/channel.h>
|
|
#include <kernel/port.h>
|
|
#include <kernel/printk.h>
|
|
#include <kernel/sched.h>
|
|
#include <kernel/syscall.h>
|
|
#include <kernel/vm-region.h>
|
|
|
|
kern_status_t sys_channel_create(unsigned int id, kern_handle_t *out)
|
|
{
|
|
struct task *self = current_task();
|
|
if (!validate_access_w(self, out, sizeof *out)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
struct channel *channel = channel_create();
|
|
if (!channel) {
|
|
return KERN_NO_MEMORY;
|
|
}
|
|
|
|
unsigned long irq_flags;
|
|
task_lock_irqsave(self, &irq_flags);
|
|
|
|
if (task_get_channel(self, id)) {
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
return KERN_NAME_EXISTS;
|
|
}
|
|
|
|
kern_handle_t handle;
|
|
kern_status_t status
|
|
= task_open_handle(self, &channel->c_base, 0, &handle);
|
|
if (status != KERN_OK) {
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
object_unref(&channel->c_base);
|
|
return status;
|
|
}
|
|
|
|
task_add_channel(self, channel, id);
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
*out = handle;
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t sys_port_create(kern_handle_t *out)
|
|
{
|
|
struct task *self = current_task();
|
|
if (!validate_access_w(self, out, sizeof *out)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
struct port *port = port_create();
|
|
if (!port) {
|
|
return KERN_NO_MEMORY;
|
|
}
|
|
|
|
unsigned long irq_flags;
|
|
task_lock_irqsave(self, &irq_flags);
|
|
|
|
kern_handle_t handle;
|
|
kern_status_t status
|
|
= task_open_handle(self, &port->p_base, 0, &handle);
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
if (status != KERN_OK) {
|
|
object_unref(&port->p_base);
|
|
return status;
|
|
}
|
|
|
|
*out = handle;
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t sys_port_connect(
|
|
kern_handle_t port_handle,
|
|
tid_t task_id,
|
|
unsigned int channel_id)
|
|
{
|
|
unsigned long flags;
|
|
|
|
struct task *self = current_task();
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *port_obj = NULL;
|
|
handle_flags_t port_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
port_handle,
|
|
&port_obj,
|
|
&port_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(port_obj);
|
|
struct port *port = port_cast(port_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct task *remote_task = task_from_tid(task_id);
|
|
if (!remote_task) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
task_lock_irqsave(remote_task, &flags);
|
|
|
|
struct channel *remote = task_get_channel(remote_task, channel_id);
|
|
if (!remote) {
|
|
task_unlock_irqrestore(remote_task, flags);
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
object_ref(&remote->c_base);
|
|
task_unlock_irqrestore(remote_task, flags);
|
|
|
|
port_lock_irqsave(port, &flags);
|
|
status = port_connect(port, remote);
|
|
port_unlock_irqrestore(port, flags);
|
|
object_unref(port_obj);
|
|
object_unref(&remote->c_base);
|
|
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t sys_port_disconnect(kern_handle_t port_handle)
|
|
{
|
|
unsigned long flags;
|
|
|
|
struct task *self = current_task();
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *port_obj = NULL;
|
|
handle_flags_t port_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
port_handle,
|
|
&port_obj,
|
|
&port_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(port_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct port *port = port_cast(port_obj);
|
|
if (!port) {
|
|
object_unref(port_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
object_unref(port_obj);
|
|
port_lock_irqsave(port, &flags);
|
|
status = port_disconnect(port);
|
|
port_unlock_irqrestore(port, flags);
|
|
|
|
return status;
|
|
}
|
|
|
|
static bool validate_iovec(
|
|
struct task *task,
|
|
const kern_iovec_t *iov,
|
|
size_t count,
|
|
bool rw)
|
|
{
|
|
if (!validate_access_r(task, iov, count * sizeof(*iov))) {
|
|
return false;
|
|
}
|
|
|
|
for (size_t i = 0; i < count; i++) {
|
|
bool ok = false;
|
|
const kern_iovec_t *vec = &iov[i];
|
|
if (rw) {
|
|
ok = validate_access_w(task, vec->io_base, vec->io_len);
|
|
} else {
|
|
ok = validate_access_r(task, vec->io_base, vec->io_len);
|
|
}
|
|
|
|
if (!ok) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static bool validate_msg(struct task *task, const kern_msg_t *msg, bool rw)
|
|
{
|
|
if (!msg) {
|
|
return false;
|
|
}
|
|
|
|
vm_prot_t flags;
|
|
if (rw) {
|
|
flags = VM_PROT_WRITE | VM_PROT_USER;
|
|
} else {
|
|
flags = VM_PROT_READ | VM_PROT_USER;
|
|
}
|
|
|
|
if (!validate_access(task, msg, sizeof *msg, flags)) {
|
|
return false;
|
|
}
|
|
|
|
if (!validate_iovec(task, msg->msg_data, msg->msg_data_count, rw)) {
|
|
return false;
|
|
}
|
|
|
|
size_t handle_buffer_len
|
|
= msg->msg_handles_count * sizeof(*msg->msg_handles);
|
|
if (!validate_access(
|
|
task,
|
|
msg->msg_handles,
|
|
handle_buffer_len,
|
|
flags)) {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
kern_status_t sys_msg_send(
|
|
kern_handle_t port_handle,
|
|
const kern_msg_t *msg,
|
|
kern_msg_t *out_reply)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (!validate_msg(self, msg, false)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
if (!validate_msg(self, out_reply, true)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *port_obj = NULL;
|
|
handle_flags_t port_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
port_handle,
|
|
&port_obj,
|
|
&port_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(port_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct port *port = port_cast(port_obj);
|
|
if (!port) {
|
|
object_unref(port_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
port_lock_irqsave(port, &flags);
|
|
status = port_send_msg(port, msg, out_reply, &flags);
|
|
port_unlock_irqrestore(port, flags);
|
|
object_unref(port_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_msg_recv(kern_handle_t channel_handle, kern_msg_t *out_msg)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (!validate_msg(self, out_msg, true)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *channel_obj = NULL;
|
|
handle_flags_t channel_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
channel_handle,
|
|
&channel_obj,
|
|
&channel_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(channel_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
if (!channel) {
|
|
object_unref(channel_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
status = channel_recv_msg(channel, out_msg, &flags);
|
|
channel_unlock_irqrestore(channel, flags);
|
|
object_unref(channel_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_msg_reply(
|
|
kern_handle_t channel_handle,
|
|
msgid_t id,
|
|
const kern_msg_t *reply)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (!validate_msg(self, reply, true)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *channel_obj = NULL;
|
|
handle_flags_t channel_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
channel_handle,
|
|
&channel_obj,
|
|
&channel_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(channel_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
if (!channel) {
|
|
object_unref(channel_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
status = channel_reply_msg(channel, id, reply, &flags);
|
|
channel_unlock_irqrestore(channel, flags);
|
|
object_unref(channel_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_msg_read(
|
|
kern_handle_t channel_handle,
|
|
msgid_t id,
|
|
size_t offset,
|
|
const kern_iovec_t *iov,
|
|
size_t iov_count,
|
|
size_t *nr_read)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (nr_read && !validate_access_w(self, nr_read, sizeof *nr_read)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
if (!validate_iovec(self, iov, iov_count, true)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *channel_obj = NULL;
|
|
handle_flags_t channel_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
channel_handle,
|
|
&channel_obj,
|
|
&channel_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(channel_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
if (!channel) {
|
|
object_unref(channel_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
status = channel_read_msg(
|
|
channel,
|
|
id,
|
|
offset,
|
|
self->t_address_space,
|
|
iov,
|
|
iov_count,
|
|
nr_read);
|
|
channel_unlock_irqrestore(channel, flags);
|
|
object_unref(channel_obj);
|
|
|
|
return status;
|
|
}
|
|
|
|
kern_status_t sys_msg_write(
|
|
kern_handle_t channel_handle,
|
|
msgid_t id,
|
|
size_t offset,
|
|
const kern_iovec_t *iov,
|
|
size_t iov_count,
|
|
size_t *nr_written)
|
|
{
|
|
struct task *self = current_task();
|
|
|
|
if (nr_written
|
|
&& !validate_access_w(self, nr_written, sizeof *nr_written)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
if (!validate_iovec(self, iov, iov_count, false)) {
|
|
return KERN_MEMORY_FAULT;
|
|
}
|
|
|
|
unsigned long flags;
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
struct object *channel_obj = NULL;
|
|
handle_flags_t channel_handle_flags = 0;
|
|
kern_status_t status = task_resolve_handle(
|
|
self,
|
|
channel_handle,
|
|
&channel_obj,
|
|
&channel_handle_flags);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
* while we're using it */
|
|
object_ref(channel_obj);
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
if (!channel) {
|
|
object_unref(channel_obj);
|
|
return KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
status = channel_write_msg(
|
|
channel,
|
|
id,
|
|
offset,
|
|
self->t_address_space,
|
|
iov,
|
|
iov_count,
|
|
nr_written);
|
|
channel_unlock_irqrestore(channel, flags);
|
|
object_unref(channel_obj);
|
|
|
|
return status;
|
|
}
|