2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/channel.h>
|
|
|
|
|
#include <kernel/clock.h>
|
|
|
|
|
#include <kernel/cpu.h>
|
|
|
|
|
#include <kernel/handle.h>
|
|
|
|
|
#include <kernel/libc/stdio.h>
|
|
|
|
|
#include <kernel/locks.h>
|
|
|
|
|
#include <kernel/object.h>
|
|
|
|
|
#include <kernel/printk.h>
|
|
|
|
|
#include <kernel/sched.h>
|
|
|
|
|
#include <kernel/util.h>
|
|
|
|
|
#include <kernel/vm-region.h>
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-05-06 19:48:14 +01:00
|
|
|
#define TASK_CAST(p) OBJECT_C_CAST(struct task, t_base, &task_type, p)
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct object_type task_type = {
|
2023-03-09 19:55:52 +00:00
|
|
|
.ob_name = "task",
|
2023-04-12 20:17:11 +01:00
|
|
|
.ob_size = sizeof(struct task),
|
2023-05-06 22:22:05 +01:00
|
|
|
.ob_header_offset = offsetof(struct task, t_base),
|
2023-03-09 19:55:52 +00:00
|
|
|
};
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct task *__kernel_task;
|
2023-04-30 14:27:57 +01:00
|
|
|
static struct task *__idle_task;
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2026-02-08 12:54:43 +00:00
|
|
|
static spin_lock_t pid_map_lock = SPIN_LOCK_INIT;
|
|
|
|
|
static DECLARE_BITMAP(pid_map, PID_MAX);
|
|
|
|
|
|
|
|
|
|
static spin_lock_t task_list_lock = SPIN_LOCK_INIT;
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct btree task_list;
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
BTREE_DEFINE_SIMPLE_GET(
|
|
|
|
|
struct task,
|
|
|
|
|
unsigned int,
|
|
|
|
|
t_tasklist,
|
|
|
|
|
t_id,
|
|
|
|
|
task_list_get)
|
2023-04-12 20:17:11 +01:00
|
|
|
BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert)
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct task *kernel_task(void)
|
2023-03-18 19:35:23 +00:00
|
|
|
{
|
|
|
|
|
return __kernel_task;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
struct task *idle_task(void)
|
|
|
|
|
{
|
|
|
|
|
return __idle_task;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 19:16:59 +00:00
|
|
|
struct task *task_cast(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
return TASK_CAST(obj);
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-03 19:27:18 +01:00
|
|
|
void idle(void)
|
2023-04-30 14:27:57 +01:00
|
|
|
{
|
|
|
|
|
while (1) {
|
|
|
|
|
ml_cpu_pause();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:54:43 +00:00
|
|
|
static unsigned int pid_alloc(void)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&pid_map_lock, &flags);
|
|
|
|
|
|
|
|
|
|
unsigned int pid = bitmap_lowest_clear(pid_map, PID_MAX);
|
|
|
|
|
if (pid != BITMAP_NPOS) {
|
|
|
|
|
bitmap_set(pid_map, pid);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&pid_map_lock, flags);
|
|
|
|
|
|
|
|
|
|
return pid;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void pid_free(unsigned int pid)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&pid_map_lock, &flags);
|
|
|
|
|
|
|
|
|
|
bitmap_clear(pid_map, pid);
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&pid_map_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
kern_status_t setup_kernel_task(void)
|
|
|
|
|
{
|
2023-03-18 19:35:23 +00:00
|
|
|
__kernel_task = task_alloc();
|
|
|
|
|
if (!__kernel_task) {
|
2023-03-09 19:55:52 +00:00
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 13:09:29 +00:00
|
|
|
__kernel_task->t_id = -1;
|
2023-03-18 19:35:23 +00:00
|
|
|
__kernel_task->t_state = TASK_RUNNING;
|
2026-02-08 13:09:29 +00:00
|
|
|
__kernel_task->t_pmap = get_kernel_pmap();
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2026-02-08 13:10:54 +00:00
|
|
|
vm_region_create(
|
|
|
|
|
NULL,
|
|
|
|
|
"root",
|
2026-02-19 19:07:55 +00:00
|
|
|
4,
|
2026-02-08 13:10:54 +00:00
|
|
|
VM_KERNEL_BASE,
|
|
|
|
|
VM_KERNEL_LIMIT - VM_KERNEL_BASE,
|
|
|
|
|
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_SVR,
|
|
|
|
|
&__kernel_task->t_address_space);
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
snprintf(
|
|
|
|
|
__kernel_task->t_name,
|
|
|
|
|
sizeof __kernel_task->t_name,
|
|
|
|
|
"kernel_task");
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread *kernel_thread = thread_alloc();
|
2023-03-09 19:55:52 +00:00
|
|
|
kernel_thread->tr_id = 0;
|
|
|
|
|
kernel_thread->tr_prio = PRIO_NORMAL;
|
|
|
|
|
kernel_thread->tr_state = THREAD_READY;
|
2023-04-09 16:38:08 +01:00
|
|
|
kernel_thread->tr_parent = __kernel_task;
|
2023-04-28 21:05:48 +01:00
|
|
|
kernel_thread->tr_quantum_target = default_quantum();
|
2023-03-09 19:55:52 +00:00
|
|
|
|
|
|
|
|
unsigned long flags;
|
2023-03-18 19:35:23 +00:00
|
|
|
task_lock_irqsave(__kernel_task, &flags);
|
2026-02-08 12:17:27 +00:00
|
|
|
queue_push_back(
|
|
|
|
|
&__kernel_task->t_threads,
|
2026-02-08 13:09:29 +00:00
|
|
|
&kernel_thread->tr_parent_entry);
|
2023-03-18 19:35:23 +00:00
|
|
|
task_unlock_irqrestore(__kernel_task, flags);
|
2023-03-09 19:55:52 +00:00
|
|
|
|
|
|
|
|
spin_lock_irqsave(&task_list_lock, &flags);
|
2023-03-18 19:35:23 +00:00
|
|
|
task_list_insert(&task_list, __kernel_task);
|
2023-03-09 19:55:52 +00:00
|
|
|
spin_unlock_irqrestore(&task_list_lock, flags);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
kern_status_t setup_idle_task(void)
|
|
|
|
|
{
|
|
|
|
|
__idle_task = task_alloc();
|
|
|
|
|
if (!__idle_task) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
task_lock_irqsave(__idle_task, &flags);
|
|
|
|
|
|
2026-02-08 13:09:29 +00:00
|
|
|
__idle_task->t_id = -2;
|
2023-04-30 14:27:57 +01:00
|
|
|
__idle_task->t_state = TASK_RUNNING;
|
2026-02-08 13:09:29 +00:00
|
|
|
__idle_task->t_pmap = get_kernel_pmap();
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
snprintf(__idle_task->t_name, sizeof __idle_task->t_name, "idle");
|
|
|
|
|
|
|
|
|
|
struct thread *idle_thread = thread_alloc();
|
|
|
|
|
if (!idle_thread) {
|
2026-02-08 13:09:29 +00:00
|
|
|
task_unref(__idle_task);
|
2023-04-30 14:27:57 +01:00
|
|
|
__idle_task = NULL;
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
idle_thread->tr_id = 0;
|
|
|
|
|
idle_thread->tr_parent = __idle_task;
|
2026-02-08 13:11:17 +00:00
|
|
|
thread_init_kernel(idle_thread, (uintptr_t)idle);
|
2023-04-30 14:27:57 +01:00
|
|
|
|
2026-02-08 13:09:29 +00:00
|
|
|
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_parent_entry);
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
task_unlock_irqrestore(__idle_task, flags);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
kern_status_t task_object_type_init(void)
|
|
|
|
|
{
|
|
|
|
|
return object_type_register(&task_type);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct task *task_alloc(void)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *task_obj = object_create(&task_type);
|
2023-03-09 19:55:52 +00:00
|
|
|
if (!task_obj) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2023-03-18 19:35:23 +00:00
|
|
|
|
2023-05-06 19:48:14 +01:00
|
|
|
struct task *t = TASK_CAST(task_obj);
|
2023-03-09 19:55:52 +00:00
|
|
|
return t;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 19:07:55 +00:00
|
|
|
struct task *task_create(const char *name, size_t name_len)
|
2026-02-08 13:11:17 +00:00
|
|
|
{
|
|
|
|
|
struct task *task = task_alloc();
|
|
|
|
|
if (!task) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pmap_t pmap = pmap_create();
|
|
|
|
|
if (pmap == PMAP_INVALID) {
|
|
|
|
|
object_unref(&task->t_base);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task->t_id = pid_alloc();
|
|
|
|
|
task->t_pmap = pmap;
|
|
|
|
|
vm_region_create(
|
|
|
|
|
NULL,
|
|
|
|
|
"root",
|
2026-02-19 19:07:55 +00:00
|
|
|
4,
|
2026-02-08 13:11:17 +00:00
|
|
|
VM_USER_BASE,
|
|
|
|
|
VM_USER_LIMIT - VM_USER_BASE,
|
|
|
|
|
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER,
|
|
|
|
|
&task->t_address_space);
|
|
|
|
|
|
|
|
|
|
task->t_address_space->vr_pmap = pmap;
|
2026-02-19 19:07:55 +00:00
|
|
|
task->t_state = TASK_RUNNING;
|
2026-02-08 13:11:17 +00:00
|
|
|
task->t_handles = handle_table_create();
|
|
|
|
|
|
|
|
|
|
if (name) {
|
2026-02-19 19:07:55 +00:00
|
|
|
name_len = MIN(name_len, sizeof task->t_name - 1);
|
|
|
|
|
memcpy(task->t_name, name, name_len);
|
|
|
|
|
task->t_name[name_len] = '\0';
|
2026-02-08 13:11:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&task_list_lock, &flags);
|
|
|
|
|
task_list_insert(&task_list, task);
|
|
|
|
|
spin_unlock_irqrestore(&task_list_lock, flags);
|
|
|
|
|
|
|
|
|
|
return task;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 19:16:59 +00:00
|
|
|
kern_status_t task_add_child(struct task *parent, struct task *child)
|
|
|
|
|
{
|
|
|
|
|
queue_push_back(&parent->t_children, &child->t_child_entry);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t task_add_channel(
|
|
|
|
|
struct task *task,
|
|
|
|
|
struct channel *channel,
|
|
|
|
|
unsigned int id)
|
|
|
|
|
{
|
|
|
|
|
channel->c_id = id;
|
|
|
|
|
|
|
|
|
|
if (!task->b_channels.b_root) {
|
|
|
|
|
task->b_channels.b_root = &channel->c_node;
|
|
|
|
|
btree_insert_fixup(&task->b_channels, &channel->c_node);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct btree_node *cur = task->b_channels.b_root;
|
|
|
|
|
while (1) {
|
|
|
|
|
struct channel *cur_node
|
|
|
|
|
= BTREE_CONTAINER(struct channel, c_node, cur);
|
|
|
|
|
struct btree_node *next = NULL;
|
|
|
|
|
|
|
|
|
|
if (id > cur_node->c_id) {
|
|
|
|
|
next = btree_right(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_right(cur, &channel->c_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else if (id < cur_node->c_id) {
|
|
|
|
|
next = btree_left(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_left(cur, &channel->c_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return KERN_NAME_EXISTS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cur = next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
btree_insert_fixup(&task->b_channels, &channel->c_node);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BTREE_DEFINE_SIMPLE_GET(
|
|
|
|
|
struct channel,
|
|
|
|
|
unsigned int,
|
|
|
|
|
c_node,
|
|
|
|
|
c_id,
|
|
|
|
|
get_channel_with_id)
|
|
|
|
|
|
|
|
|
|
struct channel *task_get_channel(struct task *task, unsigned int id)
|
|
|
|
|
{
|
|
|
|
|
return get_channel_with_id(&task->b_channels, id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct task *task_from_tid(tid_t id)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&task_list_lock, &flags);
|
2026-02-19 19:16:59 +00:00
|
|
|
struct task *t = task_list_get(&task_list, id);
|
2023-03-09 19:55:52 +00:00
|
|
|
spin_unlock_irqrestore(&task_list_lock, flags);
|
|
|
|
|
return t;
|
|
|
|
|
}
|
2023-03-28 21:39:59 +01:00
|
|
|
|
2026-02-19 19:16:59 +00:00
|
|
|
kern_status_t task_open_handle(
|
|
|
|
|
struct task *task,
|
|
|
|
|
struct object *obj,
|
|
|
|
|
handle_flags_t flags,
|
|
|
|
|
kern_handle_t *out)
|
|
|
|
|
{
|
|
|
|
|
struct handle *handle_data = NULL;
|
|
|
|
|
kern_status_t status
|
|
|
|
|
= handle_table_alloc_handle(task->t_handles, &handle_data, out);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_add_handle(obj);
|
|
|
|
|
handle_data->h_object = obj;
|
|
|
|
|
handle_data->h_flags = flags;
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t task_resolve_handle(
|
|
|
|
|
struct task *task,
|
|
|
|
|
kern_handle_t handle,
|
|
|
|
|
struct object **out_obj,
|
|
|
|
|
handle_flags_t *out_flags)
|
|
|
|
|
{
|
|
|
|
|
struct handle *handle_data
|
|
|
|
|
= handle_table_get_handle(task->t_handles, handle);
|
|
|
|
|
if (!handle_data) {
|
|
|
|
|
return KERN_INVALID_ARGUMENT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (out_obj) {
|
|
|
|
|
*out_obj = handle_data->h_object;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (out_flags) {
|
|
|
|
|
*out_flags = handle_data->h_flags;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t task_close_handle(struct task *task, kern_handle_t handle)
|
|
|
|
|
{
|
|
|
|
|
return handle_table_free_handle(task->t_handles, handle);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 13:11:17 +00:00
|
|
|
struct thread *task_create_thread(struct task *parent)
|
|
|
|
|
{
|
|
|
|
|
struct thread *thread = thread_alloc();
|
|
|
|
|
thread->tr_id = parent->t_next_thread_id++;
|
|
|
|
|
thread->tr_prio = PRIO_NORMAL;
|
|
|
|
|
thread->tr_state = THREAD_STOPPED;
|
|
|
|
|
thread->tr_parent = parent;
|
|
|
|
|
thread->tr_quantum_target = default_quantum();
|
|
|
|
|
|
|
|
|
|
queue_push_back(&parent->t_threads, &thread->tr_parent_entry);
|
|
|
|
|
|
|
|
|
|
return thread;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct task *current_task(void)
|
2023-03-28 21:39:59 +01:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread *thr = current_thread();
|
2023-04-09 16:38:08 +01:00
|
|
|
return thr ? thr->tr_parent : NULL;
|
2023-03-28 21:39:59 +01:00
|
|
|
}
|