2023-03-06 11:08:26 +00:00
|
|
|
#include <socks/object.h>
|
|
|
|
|
#include <socks/sched.h>
|
2023-04-30 21:09:36 +01:00
|
|
|
#include <socks/clock.h>
|
2023-03-18 19:35:23 +00:00
|
|
|
#include <socks/cpu.h>
|
2023-03-06 11:08:26 +00:00
|
|
|
#include <socks/printk.h>
|
2023-04-30 14:27:57 +01:00
|
|
|
#include <socks/machine/thread.h>
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
extern kern_status_t setup_kernel_task(void);
|
2023-04-30 14:27:57 +01:00
|
|
|
extern kern_status_t setup_idle_task(void);
|
2023-03-09 19:55:52 +00:00
|
|
|
extern kern_status_t task_object_type_init(void);
|
|
|
|
|
extern kern_status_t thread_object_type_init(void);
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-04-28 21:05:48 +01:00
|
|
|
static cycles_t __default_quantum = 0;
|
|
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
kern_status_t sched_init(void)
|
|
|
|
|
{
|
|
|
|
|
kern_status_t status = KERN_OK;
|
|
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
status = task_object_type_init();
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
status = thread_object_type_init();
|
2023-03-06 11:08:26 +00:00
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
status = setup_kernel_task();
|
2023-03-06 11:08:26 +00:00
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
2023-03-18 19:35:23 +00:00
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
status = setup_idle_task();
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads));
|
2023-04-30 14:27:57 +01:00
|
|
|
struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads));
|
2023-03-28 21:39:59 +01:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct cpu_data *this_cpu = get_this_cpu();
|
2023-04-30 14:27:57 +01:00
|
|
|
rq_init(&this_cpu->c_rq);
|
|
|
|
|
this_cpu->c_rq.rq_cur = this_thread;
|
|
|
|
|
this_cpu->c_rq.rq_idle = idle_thread;
|
2023-03-18 19:35:23 +00:00
|
|
|
put_cpu(this_cpu);
|
|
|
|
|
|
2023-04-28 21:05:48 +01:00
|
|
|
start_charge_period();
|
|
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
return status;
|
|
|
|
|
}
|
2023-04-28 21:05:48 +01:00
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
static void expire_timers(struct cpu_data *cpu)
|
|
|
|
|
{
|
|
|
|
|
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) {
|
|
|
|
|
if (timer->t_expiry <= clock_ticks) {
|
|
|
|
|
timer->t_callback(timer);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
void context_switch(struct thread *old, struct thread *new)
|
|
|
|
|
{
|
|
|
|
|
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
|
|
|
|
|
pmap_switch(new->tr_parent->t_pmap);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch_to(old, new);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
void __schedule(enum sched_mode mode)
|
2023-04-30 14:27:57 +01:00
|
|
|
{
|
2023-05-01 08:27:18 +01:00
|
|
|
ml_int_disable();
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
struct cpu_data *this_cpu = get_this_cpu();
|
|
|
|
|
struct runqueue *rq = &this_cpu->c_rq;
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
expire_timers(this_cpu);
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
unsigned long flags;
|
|
|
|
|
rq_lock(rq, &flags);
|
|
|
|
|
|
2023-05-01 08:27:18 +01:00
|
|
|
/* subtrace one to compensate for the fact that get_this_cpu()
|
|
|
|
|
increases preempt_count */
|
|
|
|
|
int preempt = READ_ONCE(this_cpu->c_preempt_count) - 1;
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
put_cpu(this_cpu);
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
struct thread *prev = rq->rq_cur;
|
|
|
|
|
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
|
2023-04-30 21:09:36 +01:00
|
|
|
if (prev->tr_quantum_cycles >= prev->tr_quantum_target) {
|
|
|
|
|
prev->tr_quantum_cycles = 0;
|
|
|
|
|
}
|
2023-04-30 14:27:57 +01:00
|
|
|
|
2023-05-01 08:27:18 +01:00
|
|
|
if (preempt > 0) {
|
|
|
|
|
rq_unlock(rq, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
enum thread_state prev_state = READ_ONCE(prev->tr_state);
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
if ((mode == SCHED_IRQ || prev_state == THREAD_READY) && prev != rq->rq_idle) {
|
2023-04-30 14:27:57 +01:00
|
|
|
rq_enqueue(rq, prev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct thread *next = rq_dequeue(rq);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
next = rq->rq_idle;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
if (mode == SCHED_NORMAL) {
|
|
|
|
|
next->tr_state = THREAD_READY;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
rq->rq_cur = next;
|
|
|
|
|
rq_unlock(rq, flags);
|
|
|
|
|
|
|
|
|
|
if (prev != next) {
|
|
|
|
|
context_switch(prev, next);
|
|
|
|
|
}
|
2023-05-01 08:27:18 +01:00
|
|
|
|
|
|
|
|
ml_int_enable();
|
2023-04-30 14:27:57 +01:00
|
|
|
}
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
void schedule(enum sched_mode mode)
|
2023-04-30 14:27:57 +01:00
|
|
|
{
|
|
|
|
|
do {
|
2023-04-30 21:09:36 +01:00
|
|
|
__schedule(mode);
|
2023-04-30 14:27:57 +01:00
|
|
|
} while (need_resched());
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-28 21:05:48 +01:00
|
|
|
void start_charge_period(void)
|
|
|
|
|
{
|
|
|
|
|
struct thread *self = current_thread();
|
|
|
|
|
if (!self) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self->tr_charge_period_start = get_cycles();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void end_charge_period(void)
|
|
|
|
|
{
|
|
|
|
|
preempt_disable();
|
|
|
|
|
struct thread *self = current_thread();
|
|
|
|
|
if (!self) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cycles_t end = get_cycles();
|
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
|
|
cycles_t charge = cycles_diff(self->tr_charge_period_start, end);
|
|
|
|
|
|
|
|
|
|
self->tr_quantum_cycles += charge;
|
|
|
|
|
self->tr_total_cycles += charge;
|
|
|
|
|
|
|
|
|
|
if (self->tr_quantum_cycles >= self->tr_quantum_target) {
|
|
|
|
|
self->tr_flags |= THREAD_F_NEED_RESCHED;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self->tr_charge_period_start = 0;
|
|
|
|
|
|
|
|
|
|
//printk("%llu cycles charged to %s/%u", charge, self->tr_parent->t_name, self->tr_parent->t_id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cycles_t default_quantum(void)
|
|
|
|
|
{
|
|
|
|
|
return __default_quantum;
|
|
|
|
|
}
|