sched: add kernel-mode context switching

This commit is contained in:
2023-04-30 14:27:57 +01:00
parent 2cb2d9100a
commit 085c3d2a89
12 changed files with 340 additions and 27 deletions

View File

@@ -2,8 +2,10 @@
#include <socks/sched.h>
#include <socks/cpu.h>
#include <socks/printk.h>
#include <socks/machine/thread.h>
extern kern_status_t setup_kernel_task(void);
extern kern_status_t setup_idle_task(void);
extern kern_status_t task_object_type_init(void);
extern kern_status_t thread_object_type_init(void);
@@ -28,11 +30,18 @@ kern_status_t sched_init(void)
return status;
}
status = setup_idle_task();
if (status != KERN_OK) {
return status;
}
struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads));
struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads));
struct cpu_data *this_cpu = get_this_cpu();
runqueue_init(&this_cpu->c_rq);
this_cpu->c_current_thread = this_thread;
rq_init(&this_cpu->c_rq);
this_cpu->c_rq.rq_cur = this_thread;
this_cpu->c_rq.rq_idle = idle_thread;
put_cpu(this_cpu);
start_charge_period();
@@ -40,6 +49,58 @@ kern_status_t sched_init(void)
return status;
}
void context_switch(struct thread *old, struct thread *new)
{
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
pmap_switch(new->tr_parent->t_pmap);
}
switch_to(old, new);
}
void __schedule(void)
{
ml_int_disable();
struct cpu_data *this_cpu = get_this_cpu();
struct runqueue *rq = &this_cpu->c_rq;
unsigned long flags;
rq_lock(rq, &flags);
struct thread *prev = rq->rq_cur;
prev->tr_quantum_cycles = 0;
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
enum thread_state prev_state = READ_ONCE(prev->tr_state);
if (prev_state == THREAD_READY && prev != rq->rq_idle) {
rq_enqueue(rq, prev);
}
struct thread *next = rq_dequeue(rq);
if (!next) {
next = rq->rq_idle;
}
rq->rq_cur = next;
rq_unlock(rq, flags);
if (prev != next) {
context_switch(prev, next);
} else {
ml_int_enable();
}
}
void schedule(void)
{
do {
__schedule();
} while (need_resched());
}
void start_charge_period(void)
{
struct thread *self = current_thread();

View File

@@ -1,8 +1,47 @@
#include <socks/sched.h>
#include <socks/percpu.h>
void runqueue_init(struct runqueue *rq)
#define PRIO_MASK(p) (((uint32_t)1) << (p))
#define FIRST_PRIO(m) (m > 0 ? (PRIO_MAX - __builtin_clz(m) - 1) : -1)
void rq_init(struct runqueue *rq)
{
memset(rq, 0x00, sizeof *rq);
rq->rq_lock = SPIN_LOCK_INIT;
}
struct thread *rq_dequeue(struct runqueue *rq)
{
int prio = FIRST_PRIO(rq->rq_readybits);
if (prio == -1) {
return NULL;
}
struct queue *q = &rq->rq_queues[prio];
struct queue_entry *qe = queue_pop_front(q);
if (!qe) {
rq->rq_readybits &= ~PRIO_MASK(prio);
return NULL;
}
struct thread *thr = QUEUE_CONTAINER(struct thread, tr_rqentry, qe);
if (queue_empty(q)) {
rq->rq_readybits &= ~PRIO_MASK(prio);
}
return thr;
}
void rq_enqueue(struct runqueue *rq, struct thread *thr)
{
int prio = thread_priority(thr);
if (prio < 0 || prio > PRIO_MAX) {
return;
}
struct queue *q = &rq->rq_queues[prio];
queue_push_back(q, &thr->tr_rqentry);
rq->rq_readybits |= PRIO_MASK(thread_priority(thr));
}

View File

@@ -1,4 +1,6 @@
#include <socks/locks.h>
#include <socks/printk.h>
#include <socks/clock.h>
#include <socks/sched.h>
#include <socks/object.h>
#include <socks/cpu.h>
@@ -10,6 +12,7 @@ static struct object_type task_type = {
};
static struct task *__kernel_task;
static struct task *__idle_task;
static spin_lock_t task_list_lock;
static struct btree task_list;
@@ -22,6 +25,20 @@ struct task *kernel_task(void)
return __kernel_task;
}
struct task *idle_task(void)
{
return __idle_task;
}
static void __idle_function(void)
{
while (1) {
clock_wait(HZ);
printk("idle");
ml_cpu_pause();
}
}
kern_status_t setup_kernel_task(void)
{
__kernel_task = task_alloc();
@@ -54,6 +71,69 @@ kern_status_t setup_kernel_task(void)
return KERN_OK;
}
kern_status_t setup_idle_task(void)
{
/*
__kernel_task = task_alloc();
if (!__kernel_task) {
return KERN_NO_MEMORY;
}
__kernel_task->t_id = 0;
__kernel_task->t_pmap = get_kernel_pmap();
__kernel_task->t_state = TASK_RUNNING;
snprintf(__kernel_task->t_name, sizeof __kernel_task->t_name, "kernel_task");
struct thread *kernel_thread = thread_alloc();
kernel_thread->tr_id = 0;
kernel_thread->tr_prio = PRIO_NORMAL;
kernel_thread->tr_state = THREAD_READY;
kernel_thread->tr_parent = __kernel_task;
kernel_thread->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(__kernel_task, &flags);
queue_push_back(&__kernel_task->t_threads, &kernel_thread->tr_threads);
task_unlock_irqrestore(__kernel_task, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, __kernel_task);
spin_unlock_irqrestore(&task_list_lock, flags);
*/
__idle_task = task_alloc();
if (!__idle_task) {
return KERN_NO_MEMORY;
}
unsigned long flags;
task_lock_irqsave(__idle_task, &flags);
__idle_task->t_id = (unsigned int)-1;
__idle_task->t_pmap = get_kernel_pmap();
__idle_task->t_state = TASK_RUNNING;
snprintf(__idle_task->t_name, sizeof __idle_task->t_name, "idle");
struct thread *idle_thread = thread_alloc();
if (!idle_thread) {
task_deref(__idle_task);
__idle_task = NULL;
return KERN_NO_MEMORY;
}
idle_thread->tr_id = 0;
idle_thread->tr_parent = __idle_task;
thread_init(idle_thread, (uintptr_t)__idle_function);
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_threads);
task_unlock_irqrestore(__idle_task, flags);
return KERN_OK;
}
kern_status_t task_object_type_init(void)
{
return object_type_register(&task_type);

View File

@@ -1,6 +1,7 @@
#include <socks/sched.h>
#include <socks/object.h>
#include <socks/cpu.h>
#include <socks/machine/thread.h>
static struct object_type thread_type = {
.ob_name = "thread",
@@ -24,6 +25,25 @@ struct thread *thread_alloc(void)
return t;
}
kern_status_t thread_init(struct thread *thr, uintptr_t ip)
{
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack) + vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
prepare_stack(ip, &thr->tr_sp);
return KERN_OK;
}
void thread_free(struct thread *thr)
{
@@ -36,7 +56,7 @@ struct thread *current_thread(void)
return NULL;
}
struct thread *out = cpu->c_current_thread;
struct thread *out = cpu->c_rq.rq_cur;
put_cpu(cpu);
return out;
}
@@ -45,3 +65,8 @@ bool need_resched(void)
{
return (current_thread()->tr_flags & THREAD_F_NEED_RESCHED) != 0;
}
int thread_priority(struct thread *thr)
{
return thr->tr_prio;
}