sched: implement user-mode task and thread creation

This commit is contained in:
2026-02-08 13:11:17 +00:00
parent d2f303680d
commit ee82097017
4 changed files with 129 additions and 9 deletions

View File

@@ -72,11 +72,21 @@ static void expire_timers(struct cpu_data *cpu)
void context_switch(struct thread *old, struct thread *new)
{
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
pmap_switch(new->tr_parent->t_pmap);
struct ml_cpu_block *this_cpu = ml_this_cpu();
old->tr_cpu_kernel_sp = ml_cpu_block_get_kstack(this_cpu);
old->tr_cpu_user_sp = ml_cpu_block_get_ustack(this_cpu);
pmap_t old_pmap = old->tr_parent->t_pmap;
pmap_t new_pmap = new->tr_parent->t_pmap;
if (old_pmap != new_pmap) {
pmap_switch(new_pmap);
}
switch_to(old, new);
ml_cpu_block_set_kstack(this_cpu, new->tr_cpu_kernel_sp);
ml_cpu_block_set_ustack(this_cpu, new->tr_cpu_user_sp);
ml_thread_switch(old, new);
}
void __schedule(enum sched_mode mode)

View File

@@ -145,7 +145,7 @@ kern_status_t setup_idle_task(void)
idle_thread->tr_id = 0;
idle_thread->tr_parent = __idle_task;
thread_init(idle_thread, (uintptr_t)idle);
thread_init_kernel(idle_thread, (uintptr_t)idle);
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_parent_entry);
@@ -170,6 +170,50 @@ struct task *task_alloc(void)
return t;
}
struct task *task_create(struct task *parent, const char *name)
{
struct task *task = task_alloc();
if (!task) {
return NULL;
}
pmap_t pmap = pmap_create();
if (pmap == PMAP_INVALID) {
object_unref(&task->t_base);
return NULL;
}
task->t_id = pid_alloc();
task->t_pmap = pmap;
vm_region_create(
NULL,
"root",
VM_USER_BASE,
VM_USER_LIMIT - VM_USER_BASE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER,
&task->t_address_space);
task->t_address_space->vr_pmap = pmap;
task->t_state = TASK_STOPPED;
task->t_handles = handle_table_create();
if (name) {
strncpy(task->t_name, name, sizeof task->t_name);
task->t_name[sizeof task->t_name - 1] = '\0';
}
unsigned long flags;
task_lock_irqsave(parent, &flags);
queue_push_back(&parent->t_children, &task->t_child_entry);
task_unlock_irqrestore(parent, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, task);
spin_unlock_irqrestore(&task_list_lock, flags);
return task;
}
struct task *task_from_pid(unsigned int pid)
{
unsigned long flags;
@@ -179,6 +223,20 @@ struct task *task_from_pid(unsigned int pid)
return t;
}
struct thread *task_create_thread(struct task *parent)
{
struct thread *thread = thread_alloc();
thread->tr_id = parent->t_next_thread_id++;
thread->tr_prio = PRIO_NORMAL;
thread->tr_state = THREAD_STOPPED;
thread->tr_parent = parent;
thread->tr_quantum_target = default_quantum();
queue_push_back(&parent->t_threads, &thread->tr_parent_entry);
return thread;
}
struct task *current_task(void)
{
struct thread *thr = current_thread();

View File

@@ -29,7 +29,7 @@ struct thread *thread_alloc(void)
return t;
}
kern_status_t thread_init(struct thread *thr, uintptr_t ip)
kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
@@ -46,7 +46,51 @@ kern_status_t thread_init(struct thread *thr, uintptr_t ip)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
prepare_stack(ip, &thr->tr_sp);
ml_thread_prepare_kernel_context(ip, &thr->tr_sp);
return KERN_OK;
}
kern_status_t thread_init_user(
struct thread *thr,
virt_addr_t ip,
virt_addr_t sp)
{
thr->tr_id = thr->tr_parent->t_next_thread_id++;
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack)
+ vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
thr->tr_cpu_kernel_sp = thr->tr_sp;
/* the new thread needs two contextx:
* 1) to get the thread running in kernel mode, so that it can
* execute ml_thread_switch_user
* 2) to allow ml_thread_switch_user to jump to the correct place
* in usermode (and with the correct stack).
*
* these two contexts are constructed on the thread's kernel stack
* in reverse order.
*/
/* this context will be used by ml_user_return to jump to userspace
* with the specified instruction pointer and user stack */
ml_thread_prepare_user_context(ip, sp, &thr->tr_sp);
/* this context will be used by the scheduler and ml_thread_switch to
* jump to ml_user_return in kernel mode with the thread's kernel stack.
*/
ml_thread_prepare_kernel_context(
(uintptr_t)ml_thread_switch_user,
&thr->tr_sp);
return KERN_OK;
}
@@ -89,7 +133,7 @@ struct thread *create_kernel_thread(void (*fn)(void))
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thread_init(thr, (uintptr_t)fn);
thread_init_kernel(thr, (uintptr_t)fn);
unsigned long flags;
task_lock_irqsave(kernel, &flags);