sched: add timer tasks and schedule_timeout()

This commit is contained in:
2023-04-30 21:09:36 +01:00
parent 085c3d2a89
commit 8a0291c9b3
8 changed files with 132 additions and 18 deletions

View File

@@ -242,7 +242,7 @@ void irq_dispatch(struct cpu_context *regs)
}
if (need_resched()) {
schedule();
schedule(SCHED_IRQ);
}
start_charge_period();

View File

@@ -1,8 +1,6 @@
.code64
.extern THREAD_sp
//TASK_threadsp:
//.long 32
.global switch_to
.type switch_to, @function

View File

@@ -20,6 +20,7 @@ struct cpu_data {
unsigned int c_preempt_count;
struct runqueue c_rq;
struct queue c_timers;
};
/* maximum number of processor cores that the kernel can support.

View File

@@ -41,6 +41,17 @@ enum sched_priority {
PRIO_REALTIME = 24,
};
enum sched_mode {
/* used when calling from non-interrupt context.
threads that aren't in state THREAD_READY are
removed from the runqueue. */
SCHED_NORMAL = 0,
/* used when calling from interrupt context.
threads that aren't in state THREAD_READY are
still added to the runqueue. */
SCHED_IRQ = 1,
};
struct task {
struct task *t_parent;
unsigned int t_id;
@@ -82,8 +93,26 @@ struct runqueue {
struct thread *rq_cur, *rq_idle;
};
struct timer {
struct queue_entry t_entry;
struct cpu_data *t_cpu;
struct thread *t_owner;
unsigned long t_expiry;
void(*t_callback)(struct timer *);
};
struct wait_item {
struct thread *w_thread;
struct queue_entry w_entry;
};
struct waitqueue {
struct queue wq_waiters;
spin_lock_t wq_lock;
};
extern kern_status_t sched_init(void);
extern void schedule(void);
extern void schedule(enum sched_mode mode);
extern void preempt_disable(void);
extern void preempt_enable(void);
@@ -128,6 +157,11 @@ extern struct thread *thread_alloc(void);
extern kern_status_t thread_init(struct thread *thr, uintptr_t ip);
extern int thread_priority(struct thread *thr);
extern void add_timer(struct timer *timer);
extern void remove_timer(struct timer *timer);
extern unsigned long schedule_timeout(unsigned long clock_ticks);
extern unsigned long milli_sleep(unsigned long ms);
#ifdef __cplusplus
}
#endif

View File

@@ -1,5 +1,6 @@
#include <stdint.h>
#include <socks/init.h>
#include <socks/clock.h>
#include <socks/panic.h>
#include <socks/test.h>
#include <socks/printk.h>
@@ -37,9 +38,8 @@ void kernel_init(uintptr_t arg)
run_all_tests();
current_thread()->tr_state = THREAD_SLEEPING;
while (1) {
ml_cpu_pause();
schedule_timeout(HZ);
printk("tick");
}
}

View File

@@ -1,5 +1,6 @@
#include <socks/object.h>
#include <socks/sched.h>
#include <socks/clock.h>
#include <socks/cpu.h>
#include <socks/printk.h>
#include <socks/machine/thread.h>
@@ -49,6 +50,15 @@ kern_status_t sched_init(void)
return status;
}
static void expire_timers(struct cpu_data *cpu)
{
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) {
if (timer->t_expiry <= clock_ticks) {
timer->t_callback(timer);
}
}
}
void context_switch(struct thread *old, struct thread *new)
{
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
@@ -58,23 +68,27 @@ void context_switch(struct thread *old, struct thread *new)
switch_to(old, new);
}
void __schedule(void)
void __schedule(enum sched_mode mode)
{
ml_int_disable();
struct cpu_data *this_cpu = get_this_cpu();
struct runqueue *rq = &this_cpu->c_rq;
expire_timers(this_cpu);
unsigned long flags;
rq_lock(rq, &flags);
put_cpu(this_cpu);
struct thread *prev = rq->rq_cur;
prev->tr_quantum_cycles = 0;
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
if (prev->tr_quantum_cycles >= prev->tr_quantum_target) {
prev->tr_quantum_cycles = 0;
}
enum thread_state prev_state = READ_ONCE(prev->tr_state);
if (prev_state == THREAD_READY && prev != rq->rq_idle) {
if ((mode == SCHED_IRQ || prev_state == THREAD_READY) && prev != rq->rq_idle) {
rq_enqueue(rq, prev);
}
@@ -84,20 +98,22 @@ void __schedule(void)
next = rq->rq_idle;
}
if (mode == SCHED_NORMAL) {
next->tr_state = THREAD_READY;
}
rq->rq_cur = next;
rq_unlock(rq, flags);
if (prev != next) {
context_switch(prev, next);
} else {
ml_int_enable();
}
}
void schedule(void)
void schedule(enum sched_mode mode)
{
do {
__schedule();
__schedule(mode);
} while (need_resched());
}

View File

@@ -33,8 +33,6 @@ struct task *idle_task(void)
static void __idle_function(void)
{
while (1) {
clock_wait(HZ);
printk("idle");
ml_cpu_pause();
}
}

67
sched/timer.c Normal file
View File

@@ -0,0 +1,67 @@
#include <socks/sched.h>
#include <socks/printk.h>
#include <socks/cpu.h>
#include <socks/clock.h>
static void timeout_expiry(struct timer *timer)
{
struct thread *thread = timer->t_owner;
struct cpu_data *cpu = get_this_cpu();
struct runqueue *rq = &cpu->c_rq;
thread->tr_state = THREAD_READY;
unsigned long flags;
rq_lock(rq, &flags);
rq_enqueue(rq, thread);
rq_unlock(rq, flags);
}
void add_timer(struct timer *timer)
{
struct cpu_data *cpu = get_this_cpu();
timer->t_cpu = cpu;
queue_push_back(&cpu->c_timers, &timer->t_entry);
put_cpu(cpu);
}
void remove_timer(struct timer *timer)
{
if (!timer->t_cpu) {
return;
}
preempt_disable();
queue_delete(&timer->t_cpu->c_timers, &timer->t_entry);
timer->t_cpu = NULL;
preempt_enable();
}
unsigned long schedule_timeout(unsigned long ticks)
{
struct timer timer;
struct thread *self = current_thread();
timer.t_entry = QUEUE_ENTRY_INIT;
timer.t_expiry = clock_ticks + ticks;
timer.t_owner = self;
timer.t_callback = timeout_expiry;
self->tr_state = THREAD_SLEEPING;
add_timer(&timer);
schedule(SCHED_NORMAL);
remove_timer(&timer);
return 0;
}
unsigned long milli_sleep(unsigned long ms)
{
unsigned long ticks = (ms * HZ) / 1000;
ticks = schedule_timeout(ticks);
return (ticks * 1000) / HZ;
}