2026-02-19 19:21:50 +00:00
|
|
|
#include <kernel/machine/cpu.h>
|
|
|
|
|
#include <kernel/printk.h>
|
|
|
|
|
#include <kernel/sched.h>
|
|
|
|
|
#include <kernel/syscall.h>
|
|
|
|
|
#include <kernel/vm-region.h>
|
2026-02-08 16:17:11 +00:00
|
|
|
|
2026-02-19 19:21:50 +00:00
|
|
|
extern kern_status_t sys_task_exit(int status)
|
2026-02-08 16:17:11 +00:00
|
|
|
{
|
|
|
|
|
while (1) {
|
2026-02-19 19:21:50 +00:00
|
|
|
printk("sys_exit(%d)", status);
|
|
|
|
|
milli_sleep(1000);
|
2026-02-08 16:17:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_UNIMPLEMENTED;
|
|
|
|
|
}
|
2026-02-19 19:21:50 +00:00
|
|
|
|
|
|
|
|
kern_status_t sys_task_create(
|
|
|
|
|
kern_handle_t parent_handle,
|
|
|
|
|
const char *name,
|
|
|
|
|
size_t name_len,
|
|
|
|
|
kern_handle_t *out_task,
|
|
|
|
|
kern_handle_t *out_address_space)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
if (name_len && !validate_access_r(self, name, name_len)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!validate_access_w(self, out_task, sizeof *out_task)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!validate_access_w(
|
|
|
|
|
self,
|
|
|
|
|
out_address_space,
|
|
|
|
|
sizeof *out_address_space)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct object *parent_obj;
|
|
|
|
|
handle_flags_t parent_flags;
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
parent_handle,
|
|
|
|
|
&parent_obj,
|
|
|
|
|
&parent_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(parent_obj);
|
|
|
|
|
struct task *parent = task_cast(parent_obj);
|
|
|
|
|
|
|
|
|
|
struct handle *child_handle_slot = NULL, *space_handle_slot = NULL;
|
|
|
|
|
kern_handle_t child_handle, space_handle;
|
|
|
|
|
status = handle_table_alloc_handle(
|
|
|
|
|
self->t_handles,
|
|
|
|
|
&child_handle_slot,
|
|
|
|
|
&child_handle);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
status = handle_table_alloc_handle(
|
|
|
|
|
self->t_handles,
|
|
|
|
|
&space_handle_slot,
|
|
|
|
|
&space_handle);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
handle_table_free_handle(self->t_handles, child_handle);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
struct task *child = task_create(name, name_len);
|
|
|
|
|
if (!child) {
|
|
|
|
|
object_unref(parent_obj);
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
handle_table_free_handle(self->t_handles, child_handle);
|
|
|
|
|
handle_table_free_handle(self->t_handles, space_handle);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(parent, &flags);
|
|
|
|
|
task_add_child(parent, child);
|
|
|
|
|
task_unlock_irqrestore(parent, flags);
|
|
|
|
|
|
|
|
|
|
child_handle_slot->h_object = &child->t_base;
|
|
|
|
|
space_handle_slot->h_object = &child->t_address_space->vr_base;
|
|
|
|
|
|
|
|
|
|
object_add_handle(&child->t_base);
|
|
|
|
|
object_add_handle(&child->t_address_space->vr_base);
|
|
|
|
|
|
|
|
|
|
object_unref(parent_obj);
|
|
|
|
|
|
|
|
|
|
*out_task = child_handle;
|
|
|
|
|
*out_address_space = space_handle;
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_task_create_thread(
|
|
|
|
|
kern_handle_t task,
|
|
|
|
|
virt_addr_t ip,
|
|
|
|
|
virt_addr_t sp,
|
|
|
|
|
uintptr_t *args,
|
|
|
|
|
size_t nr_args,
|
|
|
|
|
kern_handle_t *out_thread)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
if (!validate_access_r(self, args, nr_args * sizeof(uintptr_t))) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!validate_access_w(self, out_thread, sizeof *out_thread)) {
|
|
|
|
|
return KERN_MEMORY_FAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct object *target_obj;
|
|
|
|
|
handle_flags_t target_flags;
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
kern_status_t status
|
|
|
|
|
= task_resolve_handle(self, task, &target_obj, &target_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(target_obj);
|
|
|
|
|
struct task *target = task_cast(target_obj);
|
|
|
|
|
|
|
|
|
|
struct handle *target_handle = NULL;
|
|
|
|
|
kern_handle_t out_handle;
|
|
|
|
|
status = handle_table_alloc_handle(
|
|
|
|
|
self->t_handles,
|
|
|
|
|
&target_handle,
|
|
|
|
|
&out_handle);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
task_lock_irqsave(target, &flags);
|
|
|
|
|
|
|
|
|
|
struct thread *thread = task_create_thread(target);
|
|
|
|
|
if (!thread) {
|
|
|
|
|
object_unref(target_obj);
|
|
|
|
|
task_unlock_irqrestore(target, flags);
|
|
|
|
|
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
handle_table_free_handle(self->t_handles, out_handle);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return KERN_NO_MEMORY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
thread_init_user(thread, ip, sp, args, nr_args);
|
|
|
|
|
target_handle->h_object = &thread->thr_base;
|
|
|
|
|
object_add_handle(&thread->thr_base);
|
|
|
|
|
|
|
|
|
|
task_unlock_irqrestore(target, flags);
|
|
|
|
|
|
|
|
|
|
*out_thread = out_handle;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t sys_thread_start(kern_handle_t thread_handle)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
struct task *self = current_task();
|
|
|
|
|
|
|
|
|
|
struct object *thread_obj;
|
|
|
|
|
handle_flags_t thread_flags;
|
|
|
|
|
task_lock_irqsave(self, &flags);
|
|
|
|
|
kern_status_t status = task_resolve_handle(
|
|
|
|
|
self,
|
|
|
|
|
thread_handle,
|
|
|
|
|
&thread_obj,
|
|
|
|
|
&thread_flags);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
object_ref(thread_obj);
|
|
|
|
|
struct thread *thread = thread_cast(thread_obj);
|
|
|
|
|
task_unlock_irqrestore(self, flags);
|
|
|
|
|
|
|
|
|
|
schedule_thread_on_cpu(thread);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|