2026-02-19 19:21:50 +00:00
|
|
|
#include <kernel/channel.h>
|
|
|
|
|
#include <kernel/port.h>
|
|
|
|
|
#include <kernel/printk.h>
|
|
|
|
|
#include <kernel/sched.h>
|
|
|
|
|
#include <kernel/syscall.h>
|
|
|
|
|
#include <kernel/vm-region.h>
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_channel_create(
|
|
|
|
|
unsigned int id,
|
|
|
|
|
channel_flags_t flags,
|
|
|
|
|
kern_handle_t *out)
|
|
|
|
|
{
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
if (!validate_access_w(self, out, sizeof *out)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct channel *channel = channel_create();
|
|
|
|
|
if (!channel) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
task_lock_irqsave(self, &irq_flags);
|
|
|
|
|
|
|
|
|
|
if (task_get_channel(self, id)) {
|
|
|
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
|
return KERN_NAME_EXISTS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_handle_t handle;
|
|
|
|
|
kern_status_t status
|
|
|
|
|
= task_open_handle(self, &channel->c_base, 0, &handle);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
|
object_unref(&channel->c_base);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_add_channel(self, channel, id);
|
|
|
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
|
|
|
|
|
|
*out = handle;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_port_create(kern_handle_t *out)
|
|
|
|
|
{
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
if (!validate_access_w(self, out, sizeof *out)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct port *port = port_create();
|
|
|
|
|
if (!port) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long irq_flags;
|
|
|
|
|
task_lock_irqsave(self, &irq_flags);
|
|
|
|
|
|
|
|
|
|
kern_handle_t handle;
|
|
|
|
|
kern_status_t status
|
|
|
|
|
= task_open_handle(self, &port->p_base, 0, &handle);
|
2026-02-21 11:32:57 +00:00
|
|
|
task_unlock_irqrestore(self, irq_flags);
|
|
|
|
|
|
2026-02-19 19:21:50 +00:00
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
object_unref(&port->p_base);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*out = handle;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_port_connect(
|
|
|
|
|
kern_handle_t port_handle,
|
|
|
|
|
tid_t task_id,
|
|
|
|
|
unsigned int channel_id)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *port_obj = NULL;
|
|
|
|
|
handle_flags_t port_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
port_handle,
|
|
|
|
|
&port_obj,
|
|
|
|
|
&port_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(port_obj);
|
2026-02-21 11:32:57 +00:00
|
|
|
struct port *port = port_cast(port_obj);
|
2026-02-19 19:21:50 +00:00
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct task *remote_task = task_from_tid(task_id);
|
|
|
|
|
if (!remote_task) {
|
|
|
|
|
return KERN_NO_ENTRY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(remote_task, &flags);
|
|
|
|
|
|
|
|
|
|
struct channel *remote = task_get_channel(remote_task, channel_id);
|
|
|
|
|
if (!remote) {
|
|
|
|
|
task_unlock_irqrestore(remote_task, flags);
|
|
|
|
|
return KERN_NO_ENTRY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(&remote->c_base);
|
|
|
|
|
task_unlock_irqrestore(remote_task, flags);
|
|
|
|
|
|
2026-02-21 11:32:57 +00:00
|
|
|
port_lock_irqsave(port, &flags);
|
|
|
|
|
status = port_connect(port, remote);
|
|
|
|
|
port_unlock_irqrestore(port, flags);
|
2026-02-19 19:21:50 +00:00
|
|
|
object_unref(port_obj);
|
|
|
|
|
object_unref(&remote->c_base);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-21 11:32:57 +00:00
|
|
|
kern_status_t sys_port_disconnect(kern_handle_t port_handle)
|
2026-02-19 19:21:50 +00:00
|
|
|
{
|
2026-02-21 11:32:57 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *port_obj = NULL;
|
|
|
|
|
handle_flags_t port_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
port_handle,
|
|
|
|
|
&port_obj,
|
|
|
|
|
&port_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(port_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct port *port = port_cast(port_obj);
|
|
|
|
|
if (!port) {
|
|
|
|
|
object_unref(port_obj);
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_unref(port_obj);
|
|
|
|
|
port_lock_irqsave(port, &flags);
|
|
|
|
|
status = port_disconnect(port);
|
|
|
|
|
port_unlock_irqrestore(port, flags);
|
|
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 21:51:59 +00:00
|
|
|
static bool validate_iovec(
|
|
|
|
|
struct task *task,
|
|
|
|
|
const struct iovec *iov,
|
|
|
|
|
size_t count,
|
|
|
|
|
bool rw)
|
|
|
|
|
{
|
|
|
|
|
for (size_t i = 0; i < count; i++) {
|
|
|
|
|
bool ok = false;
|
|
|
|
|
const struct iovec *vec = &iov[i];
|
|
|
|
|
if (rw) {
|
|
|
|
|
ok = validate_access_w(task, vec->io_base, vec->io_len);
|
|
|
|
|
} else {
|
|
|
|
|
ok = validate_access_r(task, vec->io_base, vec->io_len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-21 11:32:57 +00:00
|
|
|
static bool validate_msg(struct task *task, const struct msg *msg, bool rw)
|
|
|
|
|
{
|
|
|
|
|
if (!validate_access_r(task, msg, sizeof *msg)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (msg->msg_data_count
|
|
|
|
|
&& !validate_access_r(
|
|
|
|
|
task,
|
|
|
|
|
msg->msg_data,
|
|
|
|
|
sizeof(struct iovec) * msg->msg_data_count)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (msg->msg_handles_count
|
|
|
|
|
&& !validate_access_r(
|
|
|
|
|
task,
|
|
|
|
|
msg->msg_handles,
|
|
|
|
|
sizeof(struct handle_list) * msg->msg_handles_count)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 21:51:59 +00:00
|
|
|
if (!validate_iovec(task, msg->msg_data, msg->msg_data_count, rw)) {
|
|
|
|
|
return false;
|
2026-02-21 11:32:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < msg->msg_handles_count; i++) {
|
|
|
|
|
bool ok = false;
|
|
|
|
|
const struct handle_list *list = &msg->msg_handles[i];
|
|
|
|
|
if (rw) {
|
|
|
|
|
ok = validate_access_w(
|
|
|
|
|
task,
|
|
|
|
|
list->l_handles,
|
|
|
|
|
list->l_nr_handles * sizeof(kern_handle_t));
|
|
|
|
|
} else {
|
|
|
|
|
ok = validate_access_r(
|
|
|
|
|
task,
|
|
|
|
|
list->l_handles,
|
|
|
|
|
list->l_nr_handles * sizeof(kern_handle_t));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!ok) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
2026-02-19 19:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_send(
|
2026-02-21 11:32:57 +00:00
|
|
|
kern_handle_t port_handle,
|
|
|
|
|
msg_flags_t msg_flags,
|
2026-02-19 19:21:50 +00:00
|
|
|
const struct msg *req,
|
|
|
|
|
struct msg *resp)
|
|
|
|
|
{
|
2026-02-21 11:32:57 +00:00
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
if (!validate_msg(self, req, false)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!validate_msg(self, resp, true)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *port_obj = NULL;
|
|
|
|
|
handle_flags_t port_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
port_handle,
|
|
|
|
|
&port_obj,
|
|
|
|
|
&port_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(port_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct port *port = port_cast(port_obj);
|
|
|
|
|
if (!port) {
|
|
|
|
|
object_unref(port_obj);
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
port_lock_irqsave(port, &flags);
|
|
|
|
|
status = port_send_msg(port, req, resp, &flags);
|
|
|
|
|
port_unlock_irqrestore(port, flags);
|
|
|
|
|
object_unref(port_obj);
|
|
|
|
|
|
|
|
|
|
return status;
|
2026-02-19 19:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_recv(
|
2026-02-21 11:32:57 +00:00
|
|
|
kern_handle_t channel_handle,
|
|
|
|
|
msg_flags_t msg_flags,
|
2026-02-19 19:21:50 +00:00
|
|
|
msgid_t *out_id,
|
|
|
|
|
struct msg *out_msg)
|
|
|
|
|
{
|
2026-02-21 11:32:57 +00:00
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
if (!validate_access_w(self, out_id, sizeof *out_id)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!validate_msg(self, out_msg, true)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *channel_obj = NULL;
|
|
|
|
|
handle_flags_t channel_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
channel_handle,
|
|
|
|
|
&channel_obj,
|
|
|
|
|
&channel_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(channel_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
|
|
|
if (!channel) {
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
|
|
|
status = channel_recv_msg(channel, out_msg, out_id, &flags);
|
|
|
|
|
channel_unlock_irqrestore(channel, flags);
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
|
|
|
|
|
return status;
|
2026-02-19 19:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_reply(
|
2026-02-21 11:32:57 +00:00
|
|
|
kern_handle_t channel_handle,
|
|
|
|
|
msg_flags_t msg_flags,
|
2026-02-19 19:21:50 +00:00
|
|
|
msgid_t id,
|
|
|
|
|
const struct msg *reply)
|
|
|
|
|
{
|
2026-02-21 11:32:57 +00:00
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
if (!validate_msg(self, reply, false)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *channel_obj = NULL;
|
|
|
|
|
handle_flags_t channel_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
channel_handle,
|
|
|
|
|
&channel_obj,
|
|
|
|
|
&channel_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(channel_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
|
|
|
if (!channel) {
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
|
|
|
status = channel_reply_msg(channel, id, reply, &flags);
|
|
|
|
|
channel_unlock_irqrestore(channel, flags);
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
|
|
|
|
|
return status;
|
2026-02-19 19:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_read(
|
2026-02-23 21:51:59 +00:00
|
|
|
kern_handle_t channel_handle,
|
2026-02-19 19:21:50 +00:00
|
|
|
msgid_t id,
|
|
|
|
|
size_t offset,
|
2026-02-23 21:51:59 +00:00
|
|
|
const struct iovec *iov,
|
|
|
|
|
size_t iov_count,
|
|
|
|
|
size_t *nr_read)
|
2026-02-19 19:21:50 +00:00
|
|
|
{
|
2026-02-23 21:51:59 +00:00
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
|
|
|
|
|
struct object *channel_obj = NULL;
|
|
|
|
|
handle_flags_t channel_handle_flags = 0;
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
channel_handle,
|
|
|
|
|
&channel_obj,
|
|
|
|
|
&channel_handle_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* add a reference to the port object to make sure it isn't deleted
|
|
|
|
|
* while we're using it */
|
|
|
|
|
object_ref(channel_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct channel *channel = channel_cast(channel_obj);
|
|
|
|
|
if (!channel) {
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
channel_lock_irqsave(channel, &flags);
|
|
|
|
|
vm_region_lock(self->t_address_space);
|
|
|
|
|
status = channel_read_msg(
|
|
|
|
|
channel,
|
|
|
|
|
id,
|
|
|
|
|
offset,
|
|
|
|
|
self->t_address_space,
|
|
|
|
|
iov,
|
|
|
|
|
iov_count,
|
|
|
|
|
nr_read);
|
|
|
|
|
vm_region_unlock(self->t_address_space);
|
|
|
|
|
channel_unlock_irqrestore(channel, flags);
|
|
|
|
|
object_unref(channel_obj);
|
|
|
|
|
|
|
|
|
|
return status;
|
2026-02-19 19:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_read_handles(
|
|
|
|
|
kern_handle_t channel,
|
|
|
|
|
msgid_t id,
|
|
|
|
|
size_t offset,
|
|
|
|
|
struct handle_list *out,
|
|
|
|
|
size_t nr_out)
|
|
|
|
|
{
|
|
|
|
|
return KERN_UNIMPLEMENTED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_write(
|
|
|
|
|
kern_handle_t channel,
|
|
|
|
|
msgid_t id,
|
|
|
|
|
size_t offset,
|
|
|
|
|
const struct iovec *in,
|
|
|
|
|
size_t nr_in)
|
|
|
|
|
{
|
|
|
|
|
return KERN_UNIMPLEMENTED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_msg_write_handles(
|
|
|
|
|
kern_handle_t channel,
|
|
|
|
|
msgid_t id,
|
|
|
|
|
size_t offset,
|
|
|
|
|
const struct handle_list *in,
|
|
|
|
|
size_t nr_in)
|
|
|
|
|
{
|
|
|
|
|
return KERN_UNIMPLEMENTED;
|
|
|
|
|
}
|