Files
mango/sched/task.c

361 lines
7.4 KiB
C

#include <kernel/channel.h>
#include <kernel/clock.h>
#include <kernel/cpu.h>
#include <kernel/handle.h>
#include <kernel/libc/stdio.h>
#include <kernel/locks.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/util.h>
#include <kernel/vm-region.h>
#define TASK_CAST(p) OBJECT_C_CAST(struct task, t_base, &task_type, p)
static struct object_type task_type = {
.ob_name = "task",
.ob_size = sizeof(struct task),
.ob_header_offset = offsetof(struct task, t_base),
};
static struct task *__kernel_task;
static struct task *__idle_task;
static spin_lock_t pid_map_lock = SPIN_LOCK_INIT;
static DECLARE_BITMAP(pid_map, PID_MAX);
static spin_lock_t task_list_lock = SPIN_LOCK_INIT;
static struct btree task_list;
BTREE_DEFINE_SIMPLE_GET(
struct task,
unsigned int,
t_tasklist,
t_id,
task_list_get)
BTREE_DEFINE_SIMPLE_INSERT(struct task, t_tasklist, t_id, task_list_insert)
struct task *kernel_task(void)
{
return __kernel_task;
}
struct task *idle_task(void)
{
return __idle_task;
}
struct task *task_cast(struct object *obj)
{
return TASK_CAST(obj);
}
void idle(void)
{
while (1) {
ml_cpu_pause();
}
}
static unsigned int pid_alloc(void)
{
unsigned long flags;
spin_lock_irqsave(&pid_map_lock, &flags);
unsigned int pid = bitmap_lowest_clear(pid_map, PID_MAX);
if (pid != BITMAP_NPOS) {
bitmap_set(pid_map, pid);
}
spin_unlock_irqrestore(&pid_map_lock, flags);
return pid;
}
static void pid_free(unsigned int pid)
{
unsigned long flags;
spin_lock_irqsave(&pid_map_lock, &flags);
bitmap_clear(pid_map, pid);
spin_unlock_irqrestore(&pid_map_lock, flags);
}
kern_status_t setup_kernel_task(void)
{
__kernel_task = task_alloc();
if (!__kernel_task) {
return KERN_NO_MEMORY;
}
__kernel_task->t_id = -1;
__kernel_task->t_state = TASK_RUNNING;
__kernel_task->t_pmap = get_kernel_pmap();
vm_region_create(
NULL,
"root",
4,
VM_KERNEL_BASE,
VM_KERNEL_LIMIT - VM_KERNEL_BASE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_SVR,
&__kernel_task->t_address_space);
snprintf(
__kernel_task->t_name,
sizeof __kernel_task->t_name,
"kernel_task");
struct thread *kernel_thread = thread_alloc();
kernel_thread->tr_id = 0;
kernel_thread->tr_prio = PRIO_NORMAL;
kernel_thread->tr_state = THREAD_READY;
kernel_thread->tr_parent = __kernel_task;
kernel_thread->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(__kernel_task, &flags);
queue_push_back(
&__kernel_task->t_threads,
&kernel_thread->tr_parent_entry);
task_unlock_irqrestore(__kernel_task, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, __kernel_task);
spin_unlock_irqrestore(&task_list_lock, flags);
return KERN_OK;
}
kern_status_t setup_idle_task(void)
{
__idle_task = task_alloc();
if (!__idle_task) {
return KERN_NO_MEMORY;
}
unsigned long flags;
task_lock_irqsave(__idle_task, &flags);
__idle_task->t_id = -2;
__idle_task->t_state = TASK_RUNNING;
__idle_task->t_pmap = get_kernel_pmap();
snprintf(__idle_task->t_name, sizeof __idle_task->t_name, "idle");
struct thread *idle_thread = thread_alloc();
if (!idle_thread) {
task_unref(__idle_task);
__idle_task = NULL;
return KERN_NO_MEMORY;
}
idle_thread->tr_id = 0;
idle_thread->tr_parent = __idle_task;
thread_init_kernel(idle_thread, (uintptr_t)idle);
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_parent_entry);
task_unlock_irqrestore(__idle_task, flags);
return KERN_OK;
}
kern_status_t task_object_type_init(void)
{
return object_type_register(&task_type);
}
struct task *task_alloc(void)
{
struct object *task_obj = object_create(&task_type);
if (!task_obj) {
return NULL;
}
struct task *t = TASK_CAST(task_obj);
return t;
}
struct task *task_create(const char *name, size_t name_len)
{
struct task *task = task_alloc();
if (!task) {
return NULL;
}
pmap_t pmap = pmap_create();
if (pmap == PMAP_INVALID) {
object_unref(&task->t_base);
return NULL;
}
task->t_id = pid_alloc();
task->t_pmap = pmap;
vm_region_create(
NULL,
"root",
4,
VM_USER_BASE,
VM_USER_LIMIT - VM_USER_BASE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER,
&task->t_address_space);
task->t_address_space->vr_pmap = pmap;
task->t_state = TASK_RUNNING;
task->t_handles = handle_table_create();
if (name) {
name_len = MIN(name_len, sizeof task->t_name - 1);
memcpy(task->t_name, name, name_len);
task->t_name[name_len] = '\0';
}
unsigned long flags;
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, task);
spin_unlock_irqrestore(&task_list_lock, flags);
return task;
}
kern_status_t task_add_child(struct task *parent, struct task *child)
{
queue_push_back(&parent->t_children, &child->t_child_entry);
return KERN_OK;
}
kern_status_t task_add_channel(
struct task *task,
struct channel *channel,
unsigned int id)
{
channel->c_id = id;
if (!task->b_channels.b_root) {
task->b_channels.b_root = &channel->c_node;
btree_insert_fixup(&task->b_channels, &channel->c_node);
return KERN_OK;
}
struct btree_node *cur = task->b_channels.b_root;
while (1) {
struct channel *cur_node
= BTREE_CONTAINER(struct channel, c_node, cur);
struct btree_node *next = NULL;
if (id > cur_node->c_id) {
next = btree_right(cur);
if (!next) {
btree_put_right(cur, &channel->c_node);
break;
}
} else if (id < cur_node->c_id) {
next = btree_left(cur);
if (!next) {
btree_put_left(cur, &channel->c_node);
break;
}
} else {
return KERN_NAME_EXISTS;
}
cur = next;
}
btree_insert_fixup(&task->b_channels, &channel->c_node);
return KERN_OK;
}
BTREE_DEFINE_SIMPLE_GET(
struct channel,
unsigned int,
c_node,
c_id,
get_channel_with_id)
struct channel *task_get_channel(struct task *task, unsigned int id)
{
return get_channel_with_id(&task->b_channels, id);
}
struct task *task_from_tid(tid_t id)
{
unsigned long flags;
spin_lock_irqsave(&task_list_lock, &flags);
struct task *t = task_list_get(&task_list, id);
spin_unlock_irqrestore(&task_list_lock, flags);
return t;
}
kern_status_t task_open_handle(
struct task *task,
struct object *obj,
handle_flags_t flags,
kern_handle_t *out)
{
struct handle *handle_data = NULL;
kern_status_t status
= handle_table_alloc_handle(task->t_handles, &handle_data, out);
if (status != KERN_OK) {
return status;
}
object_add_handle(obj);
handle_data->h_object = obj;
handle_data->h_flags = flags;
return KERN_OK;
}
kern_status_t task_resolve_handle(
struct task *task,
kern_handle_t handle,
struct object **out_obj,
handle_flags_t *out_flags)
{
struct handle *handle_data
= handle_table_get_handle(task->t_handles, handle);
if (!handle_data) {
return KERN_INVALID_ARGUMENT;
}
if (out_obj) {
*out_obj = handle_data->h_object;
}
if (out_flags) {
*out_flags = handle_data->h_flags;
}
return KERN_OK;
}
kern_status_t task_close_handle(struct task *task, kern_handle_t handle)
{
return handle_table_free_handle(task->t_handles, handle);
}
struct thread *task_create_thread(struct task *parent)
{
struct thread *thread = thread_alloc();
thread->tr_id = parent->t_next_thread_id++;
thread->tr_prio = PRIO_NORMAL;
thread->tr_state = THREAD_STOPPED;
thread->tr_parent = parent;
thread->tr_quantum_target = default_quantum();
queue_push_back(&parent->t_threads, &thread->tr_parent_entry);
return thread;
}
struct task *current_task(void)
{
struct thread *thr = current_thread();
return thr ? thr->tr_parent : NULL;
}