Files
mango/kernel/channel.c

263 lines
5.7 KiB
C
Raw Normal View History

2026-02-19 19:21:04 +00:00
#include <kernel/channel.h>
#include <kernel/msg.h>
#include <kernel/util.h>
#include <kernel/vm-region.h>
#define CHANNEL_CAST(p) OBJECT_C_CAST(struct channel, c_base, &channel_type, p)
static struct object_type channel_type = {
.ob_name = "channel",
.ob_size = sizeof(struct channel),
.ob_header_offset = offsetof(struct channel, c_base),
};
BTREE_DEFINE_SIMPLE_GET(struct kmsg, msgid_t, msg_node, msg_id, get_msg_with_id)
kern_status_t channel_type_init(void)
{
return object_type_register(&channel_type);
}
struct channel *channel_cast(struct object *obj)
{
return CHANNEL_CAST(obj);
}
2026-02-19 19:21:04 +00:00
extern struct channel *channel_create(void)
{
struct object *channel_object = object_create(&channel_type);
if (!channel_object) {
return NULL;
}
struct channel *channel = CHANNEL_CAST(channel_object);
return channel;
}
static bool try_enqueue(struct btree *tree, struct kmsg *msg)
{
if (!tree->b_root) {
tree->b_root = &msg->msg_node;
btree_insert_fixup(tree, &msg->msg_node);
return true;
}
struct btree_node *cur = tree->b_root;
while (1) {
struct kmsg *cur_node
= BTREE_CONTAINER(struct kmsg, msg_node, cur);
struct btree_node *next = NULL;
if (msg->msg_id > cur_node->msg_id) {
next = btree_right(cur);
if (!next) {
btree_put_right(cur, &msg->msg_node);
break;
}
} else if (msg->msg_id < cur_node->msg_id) {
next = btree_left(cur);
if (!next) {
btree_put_left(cur, &msg->msg_node);
break;
}
} else {
return false;
}
cur = next;
}
btree_insert_fixup(tree, &msg->msg_node);
return true;
}
static void kmsg_reply_error(
struct kmsg *msg,
kern_status_t status,
unsigned long *lock_flags)
2026-02-19 19:21:04 +00:00
{
msg->msg_status = KMSG_REPLY_SENT;
msg->msg_result = status;
2026-02-19 19:21:04 +00:00
thread_awaken(msg->msg_sender_thread);
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
2026-02-19 19:21:04 +00:00
}
static struct kmsg *get_next_msg(
struct channel *channel,
unsigned long *lock_flags)
2026-02-19 19:21:04 +00:00
{
struct btree_node *cur = btree_first(&channel->c_msg);
while (cur) {
struct kmsg *msg = BTREE_CONTAINER(struct kmsg, msg_node, cur);
spin_lock_irqsave(&msg->msg_lock, lock_flags);
2026-02-19 19:21:04 +00:00
if (msg->msg_status == KMSG_WAIT_RECEIVE) {
msg->msg_status = KMSG_WAIT_REPLY;
return msg;
}
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
2026-02-19 19:21:04 +00:00
cur = btree_next(cur);
}
return NULL;
}
extern kern_status_t channel_enqueue_msg(
struct channel *channel,
struct kmsg *msg)
{
fill_random(&msg->msg_id, sizeof msg->msg_id);
while (!try_enqueue(&channel->c_msg, msg)) {
msg->msg_id++;
}
wakeup_one(&channel->c_wq);
return KERN_OK;
}
extern kern_status_t channel_recv_msg(
struct channel *channel,
struct msg *out_msg,
msgid_t *out_id,
unsigned long *irq_flags)
{
struct wait_item waiter;
struct thread *self = current_thread();
struct kmsg *msg = NULL;
unsigned long msg_lock_flags;
2026-02-19 19:21:04 +00:00
wait_item_init(&waiter, self);
for (;;) {
thread_wait_begin(&waiter, &channel->c_wq);
msg = get_next_msg(channel, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
if (msg) {
break;
}
object_unlock_irqrestore(&channel->c_base, *irq_flags);
schedule(SCHED_NORMAL);
object_lock_irqsave(&channel->c_base, irq_flags);
}
thread_wait_end(&waiter, &channel->c_wq);
/* msg is now set to the next message to process */
struct task *sender = msg->msg_sender_thread->tr_parent;
struct task *receiver = self->tr_parent;
kern_status_t status = vm_region_memmove_v(
receiver->t_address_space,
0,
out_msg->msg_data,
out_msg->msg_data_count,
sender->t_address_space,
0,
msg->msg_req.msg_data,
msg->msg_req.msg_data_count,
2026-02-19 19:21:04 +00:00
VM_REGION_COPY_ALL);
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
status = handle_list_transfer(
receiver->t_handles,
out_msg->msg_handles,
out_msg->msg_handles_count,
sender->t_handles,
msg->msg_req.msg_handles,
msg->msg_req.msg_handles_count);
2026-02-19 19:21:04 +00:00
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
*out_id = msg->msg_id;
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_OK;
}
extern kern_status_t channel_reply_msg(
struct channel *channel,
msgid_t id,
const struct msg *resp,
unsigned long *irq_flags)
{
unsigned long msg_lock_flags;
2026-02-19 19:21:04 +00:00
struct kmsg *msg = get_msg_with_id(&channel->c_msg, id);
if (!msg) {
return KERN_INVALID_ARGUMENT;
}
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
if (msg->msg_status != KMSG_WAIT_REPLY) {
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_INVALID_ARGUMENT;
}
struct thread *self = current_thread();
/* the task that is about to receive the response */
struct task *receiver = msg->msg_sender_thread->tr_parent;
/* the task that is about to send the response */
struct task *sender = self->tr_parent;
2026-02-19 19:21:04 +00:00
kern_status_t status = vm_region_memmove_v(
receiver->t_address_space,
0,
msg->msg_resp.msg_data,
msg->msg_resp.msg_data_count,
2026-02-19 19:21:04 +00:00
sender->t_address_space,
0,
resp->msg_data,
resp->msg_data_count,
VM_REGION_COPY_ALL);
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
status = handle_list_transfer(
receiver->t_handles,
msg->msg_resp.msg_handles,
msg->msg_resp.msg_handles_count,
2026-02-19 19:21:04 +00:00
sender->t_handles,
resp->msg_handles,
resp->msg_handles_count);
if (status != KERN_OK) {
kmsg_reply_error(msg, status, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return status;
}
kmsg_reply_error(msg, KERN_OK, &msg_lock_flags);
2026-02-19 19:21:04 +00:00
return KERN_OK;
2026-02-19 19:21:04 +00:00
}
extern kern_status_t channel_read_msg(
struct channel *channel,
msgid_t msg,
size_t offset,
void *buf,
size_t len,
size_t *nr_read)
{
return KERN_UNIMPLEMENTED;
}
extern kern_status_t channel_write_msg(
struct channel *channel,
msgid_t msg,
size_t offset,
const void *buf,
size_t len,
size_t *nr_written)
{
return KERN_UNIMPLEMENTED;
}