sched: add kernel-mode context switching

This commit is contained in:
2023-04-30 14:27:57 +01:00
parent 2cb2d9100a
commit 085c3d2a89
12 changed files with 340 additions and 27 deletions

View File

@@ -36,32 +36,32 @@ struct irq_hook {
};
struct cpu_context {
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t int_no, err_no;
uint64_t rip, cs, rflags, rsp, ss;
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t int_no, err_no;
uint64_t rip, cs, rflags, rsp, ss;
} __packed;
struct idt_entry {
uint16_t base_low;
uint16_t selector;
uint8_t always0;
uint8_t type : 4;
uint8_t zero : 1;
uint8_t dpl : 2;
uint8_t present : 1;
uint16_t base_middle;
uint32_t base_high;
uint32_t reserved;
uint16_t base_low;
uint16_t selector;
uint8_t always0;
uint8_t type : 4;
uint8_t zero : 1;
uint8_t dpl : 2;
uint8_t present : 1;
uint16_t base_middle;
uint32_t base_high;
uint32_t reserved;
} __packed;
struct idt {
struct idt_entry i_entries[NR_IDT_ENTRIES];
struct idt_entry i_entries[NR_IDT_ENTRIES];
};
struct idt_ptr {
uint16_t i_limit;
uintptr_t i_base;
uint16_t i_limit;
uintptr_t i_base;
} __packed;
typedef void (*int_hook)(struct cpu_context *);

View File

@@ -0,0 +1,10 @@
#ifndef SOCKS_X86_64_THREAD_H_
#define SOCKS_X86_64_THREAD_H_
#include <socks/sched.h>
extern void switch_to(struct thread *from, struct thread *to);
extern void prepare_stack(uintptr_t ip, uintptr_t *sp);
extern void user_jump(uintptr_t ip, uintptr_t sp);
#endif

View File

@@ -96,7 +96,7 @@ static void gpf_handler(struct cpu_context *regs)
static void pf_handler(struct cpu_context *regs)
{
printk("page fault (%016llx %016llx)", pf_faultptr(), regs->rip);
printk("page fault (%016llx %016llx %016llx)", pf_faultptr(), regs->rip, regs->err_no);
ml_halt_cpu();
}
@@ -241,6 +241,10 @@ void irq_dispatch(struct cpu_context *regs)
hook->irq_callback();
}
if (need_resched()) {
schedule();
}
start_charge_period();
}

20
arch/x86_64/thread.c Normal file
View File

@@ -0,0 +1,20 @@
#include <socks/machine/thread.h>
struct thread_ctx {
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t rfl;
} __packed;
void prepare_stack(uintptr_t ip, uintptr_t *sp)
{
(*sp) -= sizeof(uintptr_t);
uintptr_t *dest_ip = (uintptr_t *)(*sp);
*dest_ip = ip;
(*sp) -= sizeof(struct thread_ctx);
struct thread_ctx *ctx = (struct thread_ctx *)(*sp);
memset(ctx, 0x0, sizeof *ctx);
ctx->rfl = 0x202;
}

View File

@@ -0,0 +1,54 @@
.code64
.extern THREAD_sp
//TASK_threadsp:
//.long 32
.global switch_to
.type switch_to, @function
// %rdi = (struct thread *) current thread.
// %rsi = (struct thread *) next thread.
switch_to:
pushfq
push %rax
push %rcx
push %rdx
push %rbx
pushq $0
push %rbp
push %rsi
push %rdi
push %r8
push %r9
push %r10
push %r11
push %r12
push %r13
push %r14
push %r15
movq %rsp, THREAD_sp(%rdi)
movq THREAD_sp(%rsi), %rsp
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
add $8, %rsp
pop %rbx
pop %rdx
pop %rcx
pop %rax
popfq
ret

View File

@@ -19,7 +19,6 @@ struct cpu_data {
unsigned int c_id;
unsigned int c_preempt_count;
struct thread *c_current_thread;
struct runqueue c_rq;
};

View File

@@ -10,6 +10,7 @@
#define TASK_NAME_MAX 64
#define PRIO_MAX 32
#define THREAD_KSTACK_ORDER VM_PAGE_4K
#ifdef __cplusplus
extern "C" {
@@ -54,8 +55,8 @@ struct task {
};
struct thread {
enum thread_state tr_state : 8;
enum thread_flags tr_flags : 8;
enum thread_state tr_state;
enum thread_flags tr_flags;
struct task *tr_parent;
unsigned int tr_id;
@@ -65,15 +66,20 @@ struct thread {
cycles_t tr_quantum_cycles, tr_quantum_target;
cycles_t tr_total_cycles;
uintptr_t tr_sp, tr_bp;
struct queue_entry tr_threads;
struct queue_entry tr_rqentry;
void *tr_kstack;
struct vm_page *tr_kstack;
};
struct runqueue {
struct queue rq_queues[PRIO_MAX];
uint32_t rq_readybits;
spin_lock_t rq_lock;
struct thread *rq_cur, *rq_idle;
};
extern kern_status_t sched_init(void);
@@ -81,13 +87,24 @@ extern void schedule(void);
extern void preempt_disable(void);
extern void preempt_enable(void);
extern void runqueue_init(struct runqueue *rq);
extern void rq_init(struct runqueue *rq);
extern struct thread *rq_dequeue(struct runqueue *rq);
extern void rq_enqueue(struct runqueue *rq, struct thread *thr);
static inline void rq_lock(struct runqueue *rq, unsigned long *flags)
{
spin_lock_irqsave(&rq->rq_lock, flags);
}
static inline void rq_unlock(struct runqueue *rq, unsigned long flags)
{
spin_unlock_irqrestore(&rq->rq_lock, flags);
}
extern struct task *task_alloc(void);
static inline struct task *task_ref(struct task *task) { return (struct task *)object_data(object_ref(object_header(task))); }
static inline void task_deref(struct task *task) { object_deref(object_header(task)); }
extern struct task *task_from_pid(unsigned int pid);
extern struct task *kernel_task(void);
extern struct task *idle_task(void);
extern cycles_t default_quantum(void);
extern bool need_resched(void);
@@ -108,6 +125,8 @@ static inline void task_unlock_irqrestore(struct task *task, unsigned long flags
}
extern struct thread *thread_alloc(void);
extern kern_status_t thread_init(struct thread *thr, uintptr_t ip);
extern int thread_priority(struct thread *thr);
#ifdef __cplusplus
}

View File

@@ -37,6 +37,8 @@ void kernel_init(uintptr_t arg)
run_all_tests();
current_thread()->tr_state = THREAD_SLEEPING;
while (1) {
ml_cpu_pause();
}

View File

@@ -2,8 +2,10 @@
#include <socks/sched.h>
#include <socks/cpu.h>
#include <socks/printk.h>
#include <socks/machine/thread.h>
extern kern_status_t setup_kernel_task(void);
extern kern_status_t setup_idle_task(void);
extern kern_status_t task_object_type_init(void);
extern kern_status_t thread_object_type_init(void);
@@ -28,11 +30,18 @@ kern_status_t sched_init(void)
return status;
}
status = setup_idle_task();
if (status != KERN_OK) {
return status;
}
struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads));
struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads));
struct cpu_data *this_cpu = get_this_cpu();
runqueue_init(&this_cpu->c_rq);
this_cpu->c_current_thread = this_thread;
rq_init(&this_cpu->c_rq);
this_cpu->c_rq.rq_cur = this_thread;
this_cpu->c_rq.rq_idle = idle_thread;
put_cpu(this_cpu);
start_charge_period();
@@ -40,6 +49,58 @@ kern_status_t sched_init(void)
return status;
}
void context_switch(struct thread *old, struct thread *new)
{
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
pmap_switch(new->tr_parent->t_pmap);
}
switch_to(old, new);
}
void __schedule(void)
{
ml_int_disable();
struct cpu_data *this_cpu = get_this_cpu();
struct runqueue *rq = &this_cpu->c_rq;
unsigned long flags;
rq_lock(rq, &flags);
struct thread *prev = rq->rq_cur;
prev->tr_quantum_cycles = 0;
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
enum thread_state prev_state = READ_ONCE(prev->tr_state);
if (prev_state == THREAD_READY && prev != rq->rq_idle) {
rq_enqueue(rq, prev);
}
struct thread *next = rq_dequeue(rq);
if (!next) {
next = rq->rq_idle;
}
rq->rq_cur = next;
rq_unlock(rq, flags);
if (prev != next) {
context_switch(prev, next);
} else {
ml_int_enable();
}
}
void schedule(void)
{
do {
__schedule();
} while (need_resched());
}
void start_charge_period(void)
{
struct thread *self = current_thread();

View File

@@ -1,8 +1,47 @@
#include <socks/sched.h>
#include <socks/percpu.h>
void runqueue_init(struct runqueue *rq)
#define PRIO_MASK(p) (((uint32_t)1) << (p))
#define FIRST_PRIO(m) (m > 0 ? (PRIO_MAX - __builtin_clz(m) - 1) : -1)
void rq_init(struct runqueue *rq)
{
memset(rq, 0x00, sizeof *rq);
rq->rq_lock = SPIN_LOCK_INIT;
}
struct thread *rq_dequeue(struct runqueue *rq)
{
int prio = FIRST_PRIO(rq->rq_readybits);
if (prio == -1) {
return NULL;
}
struct queue *q = &rq->rq_queues[prio];
struct queue_entry *qe = queue_pop_front(q);
if (!qe) {
rq->rq_readybits &= ~PRIO_MASK(prio);
return NULL;
}
struct thread *thr = QUEUE_CONTAINER(struct thread, tr_rqentry, qe);
if (queue_empty(q)) {
rq->rq_readybits &= ~PRIO_MASK(prio);
}
return thr;
}
void rq_enqueue(struct runqueue *rq, struct thread *thr)
{
int prio = thread_priority(thr);
if (prio < 0 || prio > PRIO_MAX) {
return;
}
struct queue *q = &rq->rq_queues[prio];
queue_push_back(q, &thr->tr_rqentry);
rq->rq_readybits |= PRIO_MASK(thread_priority(thr));
}

View File

@@ -1,4 +1,6 @@
#include <socks/locks.h>
#include <socks/printk.h>
#include <socks/clock.h>
#include <socks/sched.h>
#include <socks/object.h>
#include <socks/cpu.h>
@@ -10,6 +12,7 @@ static struct object_type task_type = {
};
static struct task *__kernel_task;
static struct task *__idle_task;
static spin_lock_t task_list_lock;
static struct btree task_list;
@@ -22,6 +25,20 @@ struct task *kernel_task(void)
return __kernel_task;
}
struct task *idle_task(void)
{
return __idle_task;
}
static void __idle_function(void)
{
while (1) {
clock_wait(HZ);
printk("idle");
ml_cpu_pause();
}
}
kern_status_t setup_kernel_task(void)
{
__kernel_task = task_alloc();
@@ -54,6 +71,69 @@ kern_status_t setup_kernel_task(void)
return KERN_OK;
}
kern_status_t setup_idle_task(void)
{
/*
__kernel_task = task_alloc();
if (!__kernel_task) {
return KERN_NO_MEMORY;
}
__kernel_task->t_id = 0;
__kernel_task->t_pmap = get_kernel_pmap();
__kernel_task->t_state = TASK_RUNNING;
snprintf(__kernel_task->t_name, sizeof __kernel_task->t_name, "kernel_task");
struct thread *kernel_thread = thread_alloc();
kernel_thread->tr_id = 0;
kernel_thread->tr_prio = PRIO_NORMAL;
kernel_thread->tr_state = THREAD_READY;
kernel_thread->tr_parent = __kernel_task;
kernel_thread->tr_quantum_target = default_quantum();
unsigned long flags;
task_lock_irqsave(__kernel_task, &flags);
queue_push_back(&__kernel_task->t_threads, &kernel_thread->tr_threads);
task_unlock_irqrestore(__kernel_task, flags);
spin_lock_irqsave(&task_list_lock, &flags);
task_list_insert(&task_list, __kernel_task);
spin_unlock_irqrestore(&task_list_lock, flags);
*/
__idle_task = task_alloc();
if (!__idle_task) {
return KERN_NO_MEMORY;
}
unsigned long flags;
task_lock_irqsave(__idle_task, &flags);
__idle_task->t_id = (unsigned int)-1;
__idle_task->t_pmap = get_kernel_pmap();
__idle_task->t_state = TASK_RUNNING;
snprintf(__idle_task->t_name, sizeof __idle_task->t_name, "idle");
struct thread *idle_thread = thread_alloc();
if (!idle_thread) {
task_deref(__idle_task);
__idle_task = NULL;
return KERN_NO_MEMORY;
}
idle_thread->tr_id = 0;
idle_thread->tr_parent = __idle_task;
thread_init(idle_thread, (uintptr_t)__idle_function);
queue_push_back(&__idle_task->t_threads, &idle_thread->tr_threads);
task_unlock_irqrestore(__idle_task, flags);
return KERN_OK;
}
kern_status_t task_object_type_init(void)
{
return object_type_register(&task_type);

View File

@@ -1,6 +1,7 @@
#include <socks/sched.h>
#include <socks/object.h>
#include <socks/cpu.h>
#include <socks/machine/thread.h>
static struct object_type thread_type = {
.ob_name = "thread",
@@ -24,6 +25,25 @@ struct thread *thread_alloc(void)
return t;
}
kern_status_t thread_init(struct thread *thr, uintptr_t ip)
{
thr->tr_prio = PRIO_NORMAL;
thr->tr_state = THREAD_READY;
thr->tr_quantum_target = default_quantum();
thr->tr_kstack = vm_page_alloc(THREAD_KSTACK_ORDER, VM_NORMAL);
if (!thr->tr_kstack) {
return KERN_NO_MEMORY;
}
thr->tr_sp = (uintptr_t)vm_page_get_vaddr(thr->tr_kstack) + vm_page_order_to_bytes(THREAD_KSTACK_ORDER);
thr->tr_bp = thr->tr_sp;
prepare_stack(ip, &thr->tr_sp);
return KERN_OK;
}
void thread_free(struct thread *thr)
{
@@ -36,7 +56,7 @@ struct thread *current_thread(void)
return NULL;
}
struct thread *out = cpu->c_current_thread;
struct thread *out = cpu->c_rq.rq_cur;
put_cpu(cpu);
return out;
}
@@ -45,3 +65,8 @@ bool need_resched(void)
{
return (current_thread()->tr_flags & THREAD_F_NEED_RESCHED) != 0;
}
int thread_priority(struct thread *thr)
{
return thr->tr_prio;
}