Files
mango/sched/task.c
Max Wash 6019c9307d kernel: separate headers into kernel and user headers
all kernel headers have been moved from include/mango to include/kernel
and include definitions that are only relevant to kernel-space.

any definitions that are relevant to both kernel- and user-space
(i.e. type definitions, syscall IDs) have been moved to
include/mango within libmango.
2026-02-19 18:54:48 +00:00

247 lines
5.3 KiB
C

#include <kernel/channel.h>
#include <kernel/clock.h>
#include <kernel/cpu.h>
#include <kernel/handle.h>
#include <kernel/libc/stdio.h>
#include <kernel/locks.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/util.h>
#include <kernel/vm-region.h>
#define TASK_CAST(p) OBJECT_C_CAST(struct task, t_base, &task_type, p)
static struct object_type task_type = {
.ob_name = "task",
.ob_size = sizeof(struct task),
.ob_header_offset = offsetof(struct task, t_base),
};
static struct task *__kernel_task;
static struct task *__idle_task;
static spin_lock_t pid_map_lock = SPIN_LOCK_INIT;
static DECLARE_BITMAP(pid_map, PID_MAX);
static spin_lock_t task_list_lock = SPIN_LOCK_INIT;
static struct btree task_list;
BTREE_DEFINE_SIMPLE_GET(
struct task,
unsigned int,
t_tasklist,
t_id,
task_list_get)
BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert)
struct task *kernel_task(void)
{
return __kernel_task;
}
struct task *idle_task(void)
{
return __idle_task;
}
void idle(void)
{
while (1) {
ml_cpu_pause();
}
}
static unsigned int pid_alloc(void)
{
unsigned long flags;
spin_lock_irqsave(&pid_map_lock, &flags);
unsigned int pid = bitmap_lowest_clear(pid_map, PID_MAX);
if (pid != BITMAP_NPOS) {
bitmap_set(pid_map, pid);
}
spin_unlock_irqrestore(&pid_map_lock, flags);
return pid;
}
static void pid_free(unsigned int pid)
{
unsigned long flags;
spin_lock_irqsave(&pid_map_lock, &flags);
bitmap_clear(pid_map, pid);
spin_unlock_irqrestore(&pid_map_lock, flags);
}
kern_status_t setup_kernel_task(void)
{
__kernel_task = task_alloc();
if (!__kernel_task) {
return KERN_NO_MEMORY;
}
__kernel_task->t_id = -1;
__kernel_task->t_state = TASK_RUNNING;
__kernel_task->t_pmap = get_kernel_pmap();
vm_region_create(
NULL,
"root",
VM_KERNEL_BASE,
VM_KERNEL_LIMIT - VM_KERNEL_BASE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_SVR,
&__kernel_task->t_address_space);
snprintf(
__kernel_task->t_name,
sizeof __kernel_task->t_name,
"kernel_task");
struct thread *kernel_thread = thread_alloc();
kernel_thread->tr_id = 0;
kernel_thread->tr_prio = PRIO_NORMAL;
kernel_thread->tr_state = THREAD_READY;
kernel_thread->tr_parent = __kernel_task;
kernel_thread->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(__kernel_task, &flags);
queue_push_back(
&__kernel_task->t_threads,
&kernel_thread->tr_parent_entry);
task_unlock_irqrestore(__kernel_task, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, __kernel_task);
spin_unlock_irqrestore(&task_list_lock, flags);
return KERN_OK;
}
kern_status_t setup_idle_task(void)
{
__idle_task = task_alloc();
if (!__idle_task) {
return KERN_NO_MEMORY;
}
unsigned long flags;
task_lock_irqsave(__idle_task, &flags);
__idle_task->t_id = -2;
__idle_task->t_state = TASK_RUNNING;
__idle_task->t_pmap = get_kernel_pmap();
snprintf(__idle_task->t_name, sizeof __idle_task->t_name, "idle");
struct thread *idle_thread = thread_alloc();
if (!idle_thread) {
task_unref(__idle_task);
__idle_task = NULL;
return KERN_NO_MEMORY;
}
idle_thread->tr_id = 0;
idle_thread->tr_parent = __idle_task;
thread_init_kernel(idle_thread, (uintptr_t)idle);
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_parent_entry);
task_unlock_irqrestore(__idle_task, flags);
return KERN_OK;
}
kern_status_t task_object_type_init(void)
{
return object_type_register(&task_type);
}
struct task *task_alloc(void)
{
struct object *task_obj = object_create(&task_type);
if (!task_obj) {
return NULL;
}
struct task *t = TASK_CAST(task_obj);
return t;
}
struct task *task_create(struct task *parent, const char *name)
{
struct task *task = task_alloc();
if (!task) {
return NULL;
}
pmap_t pmap = pmap_create();
if (pmap == PMAP_INVALID) {
object_unref(&task->t_base);
return NULL;
}
task->t_id = pid_alloc();
task->t_pmap = pmap;
vm_region_create(
NULL,
"root",
VM_USER_BASE,
VM_USER_LIMIT - VM_USER_BASE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER,
&task->t_address_space);
task->t_address_space->vr_pmap = pmap;
task->t_state = TASK_STOPPED;
task->t_handles = handle_table_create();
if (name) {
strncpy(task->t_name, name, sizeof task->t_name);
task->t_name[sizeof task->t_name - 1] = '\0';
}
unsigned long flags;
task_lock_irqsave(parent, &flags);
queue_push_back(&parent->t_children, &task->t_child_entry);
task_unlock_irqrestore(parent, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, task);
spin_unlock_irqrestore(&task_list_lock, flags);
return task;
}
struct task *task_from_pid(unsigned int pid)
{
unsigned long flags;
spin_lock_irqsave(&task_list_lock, &flags);
struct task *t = task_list_get(&task_list, pid);
spin_unlock_irqrestore(&task_list_lock, flags);
return t;
}
struct thread *task_create_thread(struct task *parent)
{
struct thread *thread = thread_alloc();
thread->tr_id = parent->t_next_thread_id++;
thread->tr_prio = PRIO_NORMAL;
thread->tr_state = THREAD_STOPPED;
thread->tr_parent = parent;
thread->tr_quantum_target = default_quantum();
queue_push_back(&parent->t_threads, &thread->tr_parent_entry);
return thread;
}
struct task *current_task(void)
{
struct thread *thr = current_thread();
return thr ? thr->tr_parent : NULL;
}