Files
mango/sched/thread.c
Max Wash 6019c9307d kernel: separate headers into kernel and user headers
all kernel headers have been moved from include/mango to include/kernel
and include definitions that are only relevant to kernel-space.

any definitions that are relevant to both kernel- and user-space
(i.e. type definitions, syscall IDs) have been moved to
include/mango within libmango.
2026-02-19 18:54:48 +00:00

167 lines
3.9 KiB
C

#include <kernel/bitmap.h>
#include <kernel/cpu.h>
#include <kernel/machine/thread.h>
#include <kernel/object.h>
#include <kernel/sched.h>
#define THREAD_CAST(p) OBJECT_C_CAST(struct thread, thr_base, &thread_type, p)
static struct object_type thread_type = {
.ob_name = "thread",
.ob_size = sizeof(struct thread),
.ob_header_offset = offsetof(struct thread, thr_base),
};
kern_status_t thread_object_type_init(void)
{
return object_type_register(&thread_type);
}
struct thread *thread_alloc(void)
{
struct object *thread_obj = object_create(&thread_type);
if (!thread_obj) {
return NULL;
}
struct thread *t = THREAD_CAST(thread_obj);
memset(t, 0x00, sizeof *t);
return t;
}
kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
ml_thread_prepare_kernel_context(ip, &thr->tr_sp);
return KERN_OK;
}
kern_status_t thread_init_user(
struct thread *thr,
virt_addr_t ip,
virt_addr_t sp)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
thr->tr_cpu_kernel_sp = thr->tr_sp;
/* the new thread needs two contextx:
* 1) to get the thread running in kernel mode, so that it can
* execute ml_thread_switch_user
* 2) to allow ml_thread_switch_user to jump to the correct place
* in usermode (and with the correct stack).
*
* these two contexts are constructed on the thread's kernel stack
* in reverse order.
*/
/* this context will be used by ml_user_return to jump to userspace
* with the specified instruction pointer and user stack */
ml_thread_prepare_user_context(ip, sp, &thr->tr_sp);
/* this context will be used by the scheduler and ml_thread_switch to
* jump to ml_user_return in kernel mode with the thread's kernel stack.
*/
ml_thread_prepare_kernel_context(
(uintptr_t)ml_thread_switch_user,
&thr->tr_sp);
return KERN_OK;
}
void thread_free(struct thread *thr)
{
}
struct thread *current_thread(void)
{
struct cpu_data *cpu = get_this_cpu();
if (!cpu) {
return NULL;
}
struct thread *out = cpu->c_rq.rq_cur;
put_cpu(cpu);
return out;
}
bool need_resched(void)
{
return (current_thread()->tr_flags & THREAD_F_NEED_RESCHED) != 0;
}
int thread_priority(struct thread *thr)
{
return thr->tr_prio;
}
struct thread *create_kernel_thread(void (*fn)(void))
{
struct task *kernel = kernel_task();
struct thread *thr = thread_alloc();
thr->tr_id = kernel->t_next_thread_id++;
thr->tr_parent = kernel;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thread_init_kernel(thr, (uintptr_t)fn);
unsigned long flags;
task_lock_irqsave(kernel, &flags);
queue_push_back(&kernel->t_threads, &thr->tr_parent_entry);
task_unlock_irqrestore(kernel, flags);
schedule_thread_on_cpu(thr);
return thr;
}
struct thread *create_idle_thread(void)
{
struct task *idle = idle_task();
struct thread *thr = thread_alloc();
thr->tr_id = idle->t_next_thread_id++;
thr->tr_parent = idle;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(idle, &flags);
queue_push_back(&idle->t_threads, &thr->tr_parent_entry);
task_unlock_irqrestore(idle, flags);
return thr;
}