2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/bitmap.h>
|
|
|
|
|
#include <kernel/cpu.h>
|
|
|
|
|
#include <kernel/machine/thread.h>
|
|
|
|
|
#include <kernel/object.h>
|
|
|
|
|
#include <kernel/sched.h>
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-05-06 19:48:14 +01:00
|
|
|
#define THREAD_CAST(p) OBJECT_C_CAST(struct thread, thr_base, &thread_type, p)
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct object_type thread_type = {
|
2023-03-09 19:55:52 +00:00
|
|
|
.ob_name = "thread",
|
2023-04-12 20:17:11 +01:00
|
|
|
.ob_size = sizeof(struct thread),
|
2023-05-06 22:22:05 +01:00
|
|
|
.ob_header_offset = offsetof(struct thread, thr_base),
|
2023-03-09 19:55:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
kern_status_t thread_object_type_init(void)
|
|
|
|
|
{
|
|
|
|
|
return object_type_register(&thread_type);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 19:16:59 +00:00
|
|
|
struct thread *thread_cast(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
return THREAD_CAST(obj);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread *thread_alloc(void)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *thread_obj = object_create(&thread_type);
|
2023-03-09 19:55:52 +00:00
|
|
|
if (!thread_obj) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2023-03-28 21:39:59 +01:00
|
|
|
|
2023-05-06 19:48:14 +01:00
|
|
|
struct thread *t = THREAD_CAST(thread_obj);
|
2023-03-09 19:55:52 +00:00
|
|
|
return t;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 13:11:17 +00:00
|
|
|
kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip)
|
2023-04-30 14:27:57 +01:00
|
|
|
{
|
2026-02-08 12:54:43 +00:00
|
|
|
thr->tr_id = thr->tr_parent->t_next_thread_id++;
|
2024-09-17 17:49:05 +01:00
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
thr->tr_prio = PRIO_NORMAL;
|
|
|
|
|
thr->tr_state = THREAD_READY;
|
|
|
|
|
thr->tr_quantum_target = default_quantum();
|
|
|
|
|
|
|
|
|
|
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
|
|
|
|
|
if (!thr->tr_kstack) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-17 17:49:05 +01:00
|
|
|
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
|
|
|
|
|
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
|
2023-04-30 14:27:57 +01:00
|
|
|
thr->tr_bp = thr->tr_sp;
|
|
|
|
|
|
2026-02-08 13:11:17 +00:00
|
|
|
ml_thread_prepare_kernel_context(ip, &thr->tr_sp);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t thread_init_user(
|
|
|
|
|
struct thread *thr,
|
|
|
|
|
virt_addr_t ip,
|
2026-02-19 19:04:00 +00:00
|
|
|
virt_addr_t sp,
|
|
|
|
|
const uintptr_t *args,
|
|
|
|
|
size_t nr_args)
|
2026-02-08 13:11:17 +00:00
|
|
|
{
|
|
|
|
|
thr->tr_id = thr->tr_parent->t_next_thread_id++;
|
|
|
|
|
|
|
|
|
|
thr->tr_prio = PRIO_NORMAL;
|
|
|
|
|
thr->tr_state = THREAD_READY;
|
|
|
|
|
thr->tr_quantum_target = default_quantum();
|
|
|
|
|
|
|
|
|
|
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
|
|
|
|
|
if (!thr->tr_kstack) {
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
|
|
|
|
|
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
|
|
|
|
|
thr->tr_bp = thr->tr_sp;
|
|
|
|
|
thr->tr_cpu_kernel_sp = thr->tr_sp;
|
|
|
|
|
|
|
|
|
|
/* the new thread needs two contextx:
|
|
|
|
|
* 1) to get the thread running in kernel mode, so that it can
|
|
|
|
|
* execute ml_thread_switch_user
|
|
|
|
|
* 2) to allow ml_thread_switch_user to jump to the correct place
|
|
|
|
|
* in usermode (and with the correct stack).
|
|
|
|
|
*
|
|
|
|
|
* these two contexts are constructed on the thread's kernel stack
|
|
|
|
|
* in reverse order.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* this context will be used by ml_user_return to jump to userspace
|
|
|
|
|
* with the specified instruction pointer and user stack */
|
2026-02-19 19:04:00 +00:00
|
|
|
ml_thread_prepare_user_context(ip, sp, &thr->tr_sp, args, nr_args);
|
2026-02-08 13:11:17 +00:00
|
|
|
/* this context will be used by the scheduler and ml_thread_switch to
|
|
|
|
|
* jump to ml_user_return in kernel mode with the thread's kernel stack.
|
|
|
|
|
*/
|
|
|
|
|
ml_thread_prepare_kernel_context(
|
|
|
|
|
(uintptr_t)ml_thread_switch_user,
|
|
|
|
|
&thr->tr_sp);
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void thread_free(struct thread *thr)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
|
|
|
|
}
|
2023-03-28 21:39:59 +01:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread *current_thread(void)
|
2023-03-28 21:39:59 +01:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct cpu_data *cpu = get_this_cpu();
|
2023-04-09 16:38:08 +01:00
|
|
|
if (!cpu) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
struct thread *out = cpu->c_rq.rq_cur;
|
2023-03-28 21:39:59 +01:00
|
|
|
put_cpu(cpu);
|
|
|
|
|
return out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool need_resched(void)
|
|
|
|
|
{
|
|
|
|
|
return (current_thread()->tr_flags & THREAD_F_NEED_RESCHED) != 0;
|
|
|
|
|
}
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
int thread_priority(struct thread *thr)
|
|
|
|
|
{
|
|
|
|
|
return thr->tr_prio;
|
|
|
|
|
}
|
2023-05-03 19:27:18 +01:00
|
|
|
|
2026-02-19 19:17:38 +00:00
|
|
|
void thread_awaken(struct thread *thr)
|
|
|
|
|
{
|
|
|
|
|
struct runqueue *rq = thr->tr_rq;
|
|
|
|
|
if (!rq) {
|
|
|
|
|
rq = cpu_rq(this_cpu());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
thr->tr_state = THREAD_READY;
|
|
|
|
|
rq_enqueue(rq, thr);
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-17 17:49:05 +01:00
|
|
|
struct thread *create_kernel_thread(void (*fn)(void))
|
2023-05-04 21:43:18 +01:00
|
|
|
{
|
|
|
|
|
struct task *kernel = kernel_task();
|
|
|
|
|
struct thread *thr = thread_alloc();
|
|
|
|
|
|
2026-02-08 12:54:43 +00:00
|
|
|
thr->tr_id = kernel->t_next_thread_id++;
|
2024-09-17 17:49:05 +01:00
|
|
|
|
2023-05-04 21:43:18 +01:00
|
|
|
thr->tr_parent = kernel;
|
|
|
|
|
thr->tr_prio = PRIO_NORMAL;
|
|
|
|
|
thr->tr_state = THREAD_READY;
|
|
|
|
|
thr->tr_quantum_target = default_quantum();
|
|
|
|
|
|
2026-02-08 13:11:17 +00:00
|
|
|
thread_init_kernel(thr, (uintptr_t)fn);
|
2023-05-04 21:43:18 +01:00
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
task_lock_irqsave(kernel, &flags);
|
2026-02-08 13:09:29 +00:00
|
|
|
queue_push_back(&kernel->t_threads, &thr->tr_parent_entry);
|
2023-05-04 21:43:18 +01:00
|
|
|
task_unlock_irqrestore(kernel, flags);
|
|
|
|
|
|
|
|
|
|
schedule_thread_on_cpu(thr);
|
|
|
|
|
|
|
|
|
|
return thr;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-03 19:27:18 +01:00
|
|
|
struct thread *create_idle_thread(void)
|
|
|
|
|
{
|
|
|
|
|
struct task *idle = idle_task();
|
|
|
|
|
struct thread *thr = thread_alloc();
|
|
|
|
|
|
2026-02-08 12:54:43 +00:00
|
|
|
thr->tr_id = idle->t_next_thread_id++;
|
2024-09-17 17:49:05 +01:00
|
|
|
|
2023-05-03 20:19:11 +01:00
|
|
|
thr->tr_parent = idle;
|
2023-05-03 19:27:18 +01:00
|
|
|
thr->tr_prio = PRIO_NORMAL;
|
|
|
|
|
thr->tr_state = THREAD_READY;
|
|
|
|
|
thr->tr_quantum_target = default_quantum();
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
task_lock_irqsave(idle, &flags);
|
2026-02-08 13:09:29 +00:00
|
|
|
queue_push_back(&idle->t_threads, &thr->tr_parent_entry);
|
2023-05-03 19:27:18 +01:00
|
|
|
task_unlock_irqrestore(idle, flags);
|
|
|
|
|
|
|
|
|
|
return thr;
|
|
|
|
|
}
|