From ee82097017ea49cd1717653e0f4b388ab9b9e896 Mon Sep 17 00:00:00 2001 From: Max Wash Date: Sun, 8 Feb 2026 13:11:17 +0000 Subject: [PATCH] sched: implement user-mode task and thread creation --- include/mango/sched.h | 12 +++++++-- sched/core.c | 16 +++++++++--- sched/task.c | 60 ++++++++++++++++++++++++++++++++++++++++++- sched/thread.c | 50 +++++++++++++++++++++++++++++++++--- 4 files changed, 129 insertions(+), 9 deletions(-) diff --git a/include/mango/sched.h b/include/mango/sched.h index b6207f7..f9bb6f1 100644 --- a/include/mango/sched.h +++ b/include/mango/sched.h @@ -106,7 +106,8 @@ struct thread { cycles_t tr_quantum_cycles, tr_quantum_target; cycles_t tr_total_cycles; - uintptr_t tr_sp, tr_bp; + virt_addr_t tr_ip, tr_sp, tr_bp; + virt_addr_t tr_cpu_user_sp, tr_cpu_kernel_sp; struct runqueue *tr_rq; @@ -114,6 +115,7 @@ struct thread { struct queue_entry tr_rqentry; struct vm_page *tr_kstack; + struct vm_object *tr_ustack; }; struct runqueue { @@ -181,6 +183,7 @@ extern void rq_remove_thread(struct runqueue *rq, struct thread *thr); extern struct runqueue *cpu_rq(unsigned int cpu); extern struct task *task_alloc(void); +extern struct task *task_create(struct task *parent, const char *name); static inline struct task *task_ref(struct task *task) { return OBJECT_CAST(struct task, t_base, object_ref(&task->t_base)); @@ -190,6 +193,7 @@ static inline void task_unref(struct task *task) object_unref(&task->t_base); } extern struct task *task_from_pid(unsigned int pid); +extern struct thread *task_create_thread(struct task *parent); extern struct task *kernel_task(void); extern struct task *idle_task(void); extern cycles_t default_quantum(void); @@ -207,7 +211,11 @@ extern void end_charge_period(void); DEFINE_OBJECT_LOCK_FUNCTION(task, t_base) extern struct thread *thread_alloc(void); -extern kern_status_t thread_init(struct thread *thr, uintptr_t ip); +extern kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip); +extern kern_status_t thread_init_user( + struct thread *thr, + virt_addr_t ip, + virt_addr_t sp); extern int thread_priority(struct thread *thr); extern void idle(void); extern struct thread *create_kernel_thread(void (*fn)(void)); diff --git a/sched/core.c b/sched/core.c index b88432e..da0ff23 100644 --- a/sched/core.c +++ b/sched/core.c @@ -72,11 +72,21 @@ static void expire_timers(struct cpu_data *cpu) void context_switch(struct thread *old, struct thread *new) { - if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) { - pmap_switch(new->tr_parent->t_pmap); + struct ml_cpu_block *this_cpu = ml_this_cpu(); + + old->tr_cpu_kernel_sp = ml_cpu_block_get_kstack(this_cpu); + old->tr_cpu_user_sp = ml_cpu_block_get_ustack(this_cpu); + + pmap_t old_pmap = old->tr_parent->t_pmap; + pmap_t new_pmap = new->tr_parent->t_pmap; + + if (old_pmap != new_pmap) { + pmap_switch(new_pmap); } - switch_to(old, new); + ml_cpu_block_set_kstack(this_cpu, new->tr_cpu_kernel_sp); + ml_cpu_block_set_ustack(this_cpu, new->tr_cpu_user_sp); + ml_thread_switch(old, new); } void __schedule(enum sched_mode mode) diff --git a/sched/task.c b/sched/task.c index 6b80ccf..5122ab3 100644 --- a/sched/task.c +++ b/sched/task.c @@ -145,7 +145,7 @@ kern_status_t setup_idle_task(void) idle_thread->tr_id = 0; idle_thread->tr_parent = __idle_task; - thread_init(idle_thread, (uintptr_t)idle); + thread_init_kernel(idle_thread, (uintptr_t)idle); queue_push_back(&__idle_task->t_threads, &idle_thread->tr_parent_entry); @@ -170,6 +170,50 @@ struct task *task_alloc(void) return t; } +struct task *task_create(struct task *parent, const char *name) +{ + struct task *task = task_alloc(); + if (!task) { + return NULL; + } + + pmap_t pmap = pmap_create(); + if (pmap == PMAP_INVALID) { + object_unref(&task->t_base); + return NULL; + } + + task->t_id = pid_alloc(); + task->t_pmap = pmap; + vm_region_create( + NULL, + "root", + VM_USER_BASE, + VM_USER_LIMIT - VM_USER_BASE, + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER, + &task->t_address_space); + + task->t_address_space->vr_pmap = pmap; + task->t_state = TASK_STOPPED; + task->t_handles = handle_table_create(); + + if (name) { + strncpy(task->t_name, name, sizeof task->t_name); + task->t_name[sizeof task->t_name - 1] = '\0'; + } + + unsigned long flags; + task_lock_irqsave(parent, &flags); + queue_push_back(&parent->t_children, &task->t_child_entry); + task_unlock_irqrestore(parent, flags); + + spin_lock_irqsave(&task_list_lock, &flags); + task_list_insert(&task_list, task); + spin_unlock_irqrestore(&task_list_lock, flags); + + return task; +} + struct task *task_from_pid(unsigned int pid) { unsigned long flags; @@ -179,6 +223,20 @@ struct task *task_from_pid(unsigned int pid) return t; } +struct thread *task_create_thread(struct task *parent) +{ + struct thread *thread = thread_alloc(); + thread->tr_id = parent->t_next_thread_id++; + thread->tr_prio = PRIO_NORMAL; + thread->tr_state = THREAD_STOPPED; + thread->tr_parent = parent; + thread->tr_quantum_target = default_quantum(); + + queue_push_back(&parent->t_threads, &thread->tr_parent_entry); + + return thread; +} + struct task *current_task(void) { struct thread *thr = current_thread(); diff --git a/sched/thread.c b/sched/thread.c index 2486e9e..d2d637d 100644 --- a/sched/thread.c +++ b/sched/thread.c @@ -29,7 +29,7 @@ struct thread *thread_alloc(void) return t; } -kern_status_t thread_init(struct thread *thr, uintptr_t ip) +kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip) { thr->tr_id = thr->tr_parent->t_next_thread_id++; @@ -46,7 +46,51 @@ kern_status_t thread_init(struct thread *thr, uintptr_t ip) + vm_page_order_to_bytes(THREAD_KSTACK_ORDER); thr->tr_bp = thr->tr_sp; - prepare_stack(ip, &thr->tr_sp); + ml_thread_prepare_kernel_context(ip, &thr->tr_sp); + + return KERN_OK; +} + +kern_status_t thread_init_user( + struct thread *thr, + virt_addr_t ip, + virt_addr_t sp) +{ + thr->tr_id = thr->tr_parent->t_next_thread_id++; + + thr->tr_prio = PRIO_NORMAL; + thr->tr_state = THREAD_READY; + thr->tr_quantum_target = default_quantum(); + + thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL); + if (!thr->tr_kstack) { + return KERN_NO_MEMORY; + } + + thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack) + + vm_page_order_to_bytes(THREAD_KSTACK_ORDER); + thr->tr_bp = thr->tr_sp; + thr->tr_cpu_kernel_sp = thr->tr_sp; + + /* the new thread needs two contextx: + * 1) to get the thread running in kernel mode, so that it can + * execute ml_thread_switch_user + * 2) to allow ml_thread_switch_user to jump to the correct place + * in usermode (and with the correct stack). + * + * these two contexts are constructed on the thread's kernel stack + * in reverse order. + */ + + /* this context will be used by ml_user_return to jump to userspace + * with the specified instruction pointer and user stack */ + ml_thread_prepare_user_context(ip, sp, &thr->tr_sp); + /* this context will be used by the scheduler and ml_thread_switch to + * jump to ml_user_return in kernel mode with the thread's kernel stack. + */ + ml_thread_prepare_kernel_context( + (uintptr_t)ml_thread_switch_user, + &thr->tr_sp); return KERN_OK; } @@ -89,7 +133,7 @@ struct thread *create_kernel_thread(void (*fn)(void)) thr->tr_state = THREAD_READY; thr->tr_quantum_target = default_quantum(); - thread_init(thr, (uintptr_t)fn); + thread_init_kernel(thr, (uintptr_t)fn); unsigned long flags; task_lock_irqsave(kernel, &flags);