Files
mango/kernel/channel.c

328 lines
7.2 KiB
C
Raw Normal View History

2026-02-19 19:21:04 +00:00
#include <kernel/channel.h>
#include <kernel/msg.h>
#include <kernel/port.h>
2026-02-19 19:21:04 +00:00
#include <kernel/util.h>
#include <kernel/vm-region.h>
#define CHANNEL_CAST(p) OBJECT_C_CAST(struct channel, c_base, &channel_type, p)
static struct object_type channel_type = {
.ob_name = "channel",
.ob_size = sizeof(struct channel),
.ob_header_offset = offsetof(struct channel, c_base),
};
BTREE_DEFINE_SIMPLE_GET(struct msg, msgid_t, msg_node, msg_id, get_msg_with_id)
2026-02-19 19:21:04 +00:00
kern_status_t channel_type_init(void)
{
return object_type_register(&channel_type);
}
struct channel *channel_cast(struct object *obj)
{
return CHANNEL_CAST(obj);
}
2026-02-19 19:21:04 +00:00
extern struct channel *channel_create(void)
{
struct object *channel_object = object_create(&channel_type);
if (!channel_object) {
return NULL;
}
struct channel *channel = CHANNEL_CAST(channel_object);
return channel;
}
static bool try_enqueue(struct btree *tree, struct msg *msg)
2026-02-19 19:21:04 +00:00
{
if (!tree->b_root) {
tree->b_root = &msg->msg_node;
btree_insert_fixup(tree, &msg->msg_node);
return true;
}
struct btree_node *cur = tree->b_root;
while (1) {
struct msg *cur_node
= BTREE_CONTAINER(struct msg, msg_node, cur);
2026-02-19 19:21:04 +00:00
struct btree_node *next = NULL;
if (msg->msg_id > cur_node->msg_id) {
next = btree_right(cur);
if (!next) {
btree_put_right(cur, &msg->msg_node);
break;
}
} else if (msg->msg_id < cur_node->msg_id) {
next = btree_left(cur);
if (!next) {
btree_put_left(cur, &msg->msg_node);
break;
}
} else {
return false;
}
cur = next;
}
btree_insert_fixup(tree, &msg->msg_node);
return true;
}
static void kmsg_reply_error(
struct msg *msg,
kern_status_t status,
unsigned long *lock_flags)
2026-02-19 19:21:04 +00:00
{
msg->msg_status = KMSG_REPLY_SENT;
msg->msg_sender_port->p_status = PORT_READY;
msg->msg_result = status;
2026-02-19 19:21:04 +00:00
thread_awaken(msg->msg_sender_thread);
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
2026-02-19 19:21:04 +00:00
}
static struct msg *get_next_msg(
struct channel *channel,
unsigned long *lock_flags)
2026-02-19 19:21:04 +00:00
{
struct btree_node *cur = btree_first(&channel->c_msg);
while (cur) {
struct msg *msg = BTREE_CONTAINER(struct msg, msg_node, cur);
spin_lock_irqsave(&msg->msg_lock, lock_flags);
2026-02-19 19:21:04 +00:00
if (msg->msg_status == KMSG_WAIT_RECEIVE) {
msg->msg_status = KMSG_WAIT_REPLY;
msg->msg_sender_port->p_status = PORT_REPLY_BLOCKED;
2026-02-19 19:21:04 +00:00
return msg;
}
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
2026-02-19 19:21:04 +00:00
cur = btree_next(cur);
}
return NULL;
}
extern kern_status_t channel_enqueue_msg(
struct channel *channel,
struct msg *msg)
2026-02-19 19:21:04 +00:00
{
fill_random(&msg->msg_id, sizeof msg->msg_id);
while (!try_enqueue(&channel->c_msg, msg)) {
msg->msg_id++;
}
wakeup_one(&channel->c_wq);
return KERN_OK;
}
extern kern_status_t channel_recv_msg(
struct channel *channel,
msgid_t *out_id,
struct iovec *out_data,
size_t out_data_count,
2026-02-19 19:21:04 +00:00
unsigned long *irq_flags)
{
struct wait_item waiter;
struct thread *self = current_thread();
struct msg *msg = NULL;
unsigned long msg_lock_flags;
2026-02-19 19:21:04 +00:00
wait_item_init(&waiter, self);
for (;;) {
thread_wait_begin(&waiter, &channel->c_wq);
msg = get_next_msg(channel, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
if (msg) {
break;
}
object_unlock_irqrestore(&channel->c_base, *irq_flags);
schedule(SCHED_NORMAL);
object_lock_irqsave(&channel->c_base, irq_flags);
}
thread_wait_end(&waiter, &channel->c_wq);
/* msg is now set to the next message to process */
struct task *sender = msg->msg_sender_thread->tr_parent;
struct task *receiver = self->tr_parent;
struct vm_region *src = sender->t_address_space,
*dst = receiver->t_address_space;
unsigned long f;
vm_region_lock_pair_irqsave(src, dst, &f);
2026-02-19 19:21:04 +00:00
kern_status_t status = vm_region_memmove_v(
dst,
2026-02-19 19:21:04 +00:00
0,
out_data,
out_data_count,
src,
2026-02-19 19:21:04 +00:00
0,
msg->msg_req_data,
msg->msg_req_data_count,
VM_REGION_COPY_ALL,
NULL);
vm_region_unlock_pair_irqrestore(src, dst, f);
2026-02-19 19:21:04 +00:00
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
*out_id = msg->msg_id;
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_OK;
}
extern kern_status_t channel_reply_msg(
struct channel *channel,
msgid_t id,
const struct iovec *resp_data,
size_t resp_data_count,
2026-02-19 19:21:04 +00:00
unsigned long *irq_flags)
{
unsigned long msg_lock_flags;
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
if (!msg) {
return KERN_INVALID_ARGUMENT;
}
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
if (msg->msg_status != KMSG_WAIT_REPLY) {
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_INVALID_ARGUMENT;
}
struct thread *self = current_thread();
/* the task that is about to receive the response */
struct task *receiver = msg->msg_sender_thread->tr_parent;
/* the task that is about to send the response */
struct task *sender = self->tr_parent;
2026-02-19 19:21:04 +00:00
struct vm_region *src = sender->t_address_space,
*dst = receiver->t_address_space;
unsigned long f;
vm_region_lock_pair_irqsave(src, dst, &f);
2026-02-19 19:21:04 +00:00
kern_status_t status = vm_region_memmove_v(
dst,
2026-02-19 19:21:04 +00:00
0,
msg->msg_resp_data,
msg->msg_resp_data_count,
src,
2026-02-19 19:21:04 +00:00
0,
resp_data,
resp_data_count,
VM_REGION_COPY_ALL,
NULL);
vm_region_unlock_pair_irqrestore(src, dst, f);
2026-02-19 19:21:04 +00:00
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
kmsg_reply_error(msg, KERN_OK, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_OK;
2026-02-19 19:21:04 +00:00
}
extern kern_status_t channel_read_msg(
struct channel *channel,
msgid_t id,
2026-02-19 19:21:04 +00:00
size_t offset,
struct vm_region *dest_region,
const struct iovec *dest_iov,
size_t dest_iov_count,
2026-02-19 19:21:04 +00:00
size_t *nr_read)
{
unsigned long msg_lock_flags;
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
if (!msg) {
return KERN_INVALID_ARGUMENT;
}
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
if (msg->msg_status != KMSG_WAIT_REPLY) {
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_region *src_region
= msg->msg_sender_thread->tr_parent->t_address_space;
unsigned long f;
vm_region_lock_pair_irqsave(src_region, dest_region, &f);
kern_status_t status = vm_region_memmove_v(
dest_region,
0,
dest_iov,
dest_iov_count,
src_region,
offset,
msg->msg_req_data,
msg->msg_req_data_count,
VM_REGION_COPY_ALL,
nr_read);
vm_region_unlock_pair_irqrestore(src_region, dest_region, f);
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
return status;
2026-02-19 19:21:04 +00:00
}
extern kern_status_t channel_write_msg(
struct channel *channel,
msgid_t id,
2026-02-19 19:21:04 +00:00
size_t offset,
struct vm_region *src_region,
const struct iovec *src_iov,
size_t src_iov_count,
2026-02-19 19:21:04 +00:00
size_t *nr_written)
{
unsigned long msg_lock_flags;
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
if (!msg) {
return KERN_INVALID_ARGUMENT;
}
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
if (msg->msg_status != KMSG_WAIT_REPLY) {
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
return KERN_INVALID_ARGUMENT;
}
struct vm_region *dest_region
= msg->msg_sender_thread->tr_parent->t_address_space;
unsigned long f;
vm_region_lock_pair_irqsave(src_region, dest_region, &f);
kern_status_t status = vm_region_memmove_v(
dest_region,
0,
msg->msg_resp_data,
msg->msg_resp_data_count,
src_region,
offset,
src_iov,
src_iov_count,
VM_REGION_COPY_ALL,
nr_written);
vm_region_unlock_pair_irqrestore(src_region, dest_region, f);
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
return status;
2026-02-19 19:21:04 +00:00
}