223 lines
4.4 KiB
C
223 lines
4.4 KiB
C
#include <mango/object.h>
|
|
#include <mango/sched.h>
|
|
#include <mango/clock.h>
|
|
#include <mango/cpu.h>
|
|
#include <mango/printk.h>
|
|
#include <mango/machine/thread.h>
|
|
|
|
extern kern_status_t setup_kernel_task(void);
|
|
extern kern_status_t setup_idle_task(void);
|
|
extern kern_status_t task_object_type_init(void);
|
|
extern kern_status_t thread_object_type_init(void);
|
|
extern kern_status_t global_wq_init(void);
|
|
|
|
static cycles_t __default_quantum = 0;
|
|
|
|
kern_status_t sched_init(void)
|
|
{
|
|
kern_status_t status = KERN_OK;
|
|
|
|
status = task_object_type_init();
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
status = thread_object_type_init();
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
status = setup_kernel_task();
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
status = setup_idle_task();
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
struct thread *this_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&kernel_task()->t_threads));
|
|
struct thread *idle_thread = QUEUE_CONTAINER(struct thread, tr_threads, queue_first(&idle_task()->t_threads));
|
|
|
|
struct cpu_data *this_cpu = get_this_cpu();
|
|
rq_init(&this_cpu->c_rq);
|
|
this_cpu->c_rq.rq_cur = this_thread;
|
|
this_cpu->c_rq.rq_idle = idle_thread;
|
|
put_cpu(this_cpu);
|
|
|
|
global_wq_init();
|
|
|
|
start_charge_period();
|
|
|
|
return status;
|
|
}
|
|
|
|
static void expire_timers(struct cpu_data *cpu)
|
|
{
|
|
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry) {
|
|
if (timer->t_expiry <= clock_ticks) {
|
|
timer->t_callback(timer);
|
|
}
|
|
}
|
|
}
|
|
|
|
void context_switch(struct thread *old, struct thread *new)
|
|
{
|
|
if (old->tr_parent->t_pmap != new->tr_parent->t_pmap) {
|
|
pmap_switch(new->tr_parent->t_pmap);
|
|
}
|
|
|
|
switch_to(old, new);
|
|
}
|
|
|
|
void __schedule(enum sched_mode mode)
|
|
{
|
|
ml_int_disable();
|
|
|
|
struct cpu_data *this_cpu = get_this_cpu();
|
|
struct runqueue *rq = &this_cpu->c_rq;
|
|
|
|
expire_timers(this_cpu);
|
|
|
|
unsigned long flags;
|
|
rq_lock(rq, &flags);
|
|
|
|
/* subtrace one to compensate for the fact that get_this_cpu()
|
|
increases preempt_count */
|
|
int preempt = READ_ONCE(this_cpu->c_preempt_count) - 1;
|
|
|
|
put_cpu(this_cpu);
|
|
|
|
struct thread *prev = rq->rq_cur;
|
|
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
|
|
if (prev->tr_quantum_cycles >= prev->tr_quantum_target) {
|
|
prev->tr_quantum_cycles = 0;
|
|
}
|
|
|
|
if (preempt > 0) {
|
|
rq_unlock(rq, flags);
|
|
return;
|
|
}
|
|
|
|
enum thread_state prev_state = READ_ONCE(prev->tr_state);
|
|
|
|
if ((mode == SCHED_IRQ || prev_state == THREAD_READY) && prev != rq->rq_idle) {
|
|
rq_enqueue(rq, prev);
|
|
}
|
|
|
|
struct thread *next = rq_dequeue(rq);
|
|
|
|
if (!next) {
|
|
next = rq->rq_idle;
|
|
}
|
|
|
|
if (mode == SCHED_NORMAL) {
|
|
next->tr_state = THREAD_READY;
|
|
}
|
|
|
|
rq->rq_cur = next;
|
|
rq_unlock(rq, flags);
|
|
|
|
if (prev != next) {
|
|
context_switch(prev, next);
|
|
}
|
|
|
|
ml_int_enable();
|
|
}
|
|
|
|
void schedule(enum sched_mode mode)
|
|
{
|
|
do {
|
|
__schedule(mode);
|
|
} while (need_resched());
|
|
}
|
|
|
|
struct runqueue *select_rq_for_thread(struct thread *thr)
|
|
{
|
|
struct runqueue *best_rq = NULL;
|
|
unsigned int best_nthreads = 0;
|
|
unsigned long flags;
|
|
|
|
unsigned int nr_cpu = cpu_get_highest_available() + 1;
|
|
for (unsigned int i = 0; i < nr_cpu; i++) {
|
|
if (!cpu_is_available(i) || !cpu_is_online(i)) {
|
|
continue;
|
|
}
|
|
|
|
struct runqueue *rq = cpu_rq(i);
|
|
if (!rq) {
|
|
continue;
|
|
}
|
|
|
|
rq_lock(rq, &flags);
|
|
unsigned int nthreads = rq->rq_nthreads;
|
|
if (rq->rq_cur && rq->rq_cur != rq->rq_idle) {
|
|
nthreads++;
|
|
}
|
|
rq_unlock(rq, flags);
|
|
|
|
if (!best_rq || nthreads < best_nthreads) {
|
|
best_rq = rq;
|
|
best_nthreads = nthreads;
|
|
}
|
|
}
|
|
|
|
return best_rq;
|
|
}
|
|
|
|
void schedule_thread_on_cpu(struct thread *thr)
|
|
{
|
|
struct runqueue *rq = thr->tr_rq;
|
|
if (!rq) {
|
|
rq = select_rq_for_thread(thr);
|
|
}
|
|
|
|
if (rq) {
|
|
unsigned long flags;
|
|
rq_lock(rq, &flags);
|
|
rq_enqueue(rq, thr);
|
|
rq_unlock(rq, flags);
|
|
}
|
|
}
|
|
|
|
void start_charge_period(void)
|
|
{
|
|
struct thread *self = current_thread();
|
|
if (!self) {
|
|
return;
|
|
}
|
|
|
|
self->tr_charge_period_start = get_cycles();
|
|
}
|
|
|
|
void end_charge_period(void)
|
|
{
|
|
preempt_disable();
|
|
struct thread *self = current_thread();
|
|
if (!self) {
|
|
return;
|
|
}
|
|
|
|
cycles_t end = get_cycles();
|
|
preempt_enable();
|
|
|
|
cycles_t charge = cycles_diff(self->tr_charge_period_start, end);
|
|
|
|
self->tr_quantum_cycles += charge;
|
|
self->tr_total_cycles += charge;
|
|
|
|
if (self->tr_quantum_cycles >= self->tr_quantum_target) {
|
|
self->tr_flags |= THREAD_F_NEED_RESCHED;
|
|
}
|
|
|
|
self->tr_charge_period_start = 0;
|
|
|
|
//printk("%llu cycles charged to %s/%u", charge, self->tr_parent->t_name, self->tr_parent->t_id);
|
|
}
|
|
|
|
cycles_t default_quantum(void)
|
|
{
|
|
return __default_quantum;
|
|
}
|