Files
mango/sched/core.c

243 lines
4.8 KiB
C
Raw Normal View History

#include <kernel/clock.h>
#include <kernel/cpu.h>
#include <kernel/machine/thread.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/vm-region.h>
extern kern_status_t setup_kernel_task(void);
extern kern_status_t setup_idle_task(void);
extern kern_status_t task_object_type_init(void);
extern kern_status_t thread_object_type_init(void);
extern kern_status_t global_wq_init(void);
static cycles_t __default_quantum = 0;
kern_status_t sched_init(void)
{
kern_status_t status = KERN_OK;
status = task_object_type_init();
if (status != KERN_OK) {
return status;
}
status = thread_object_type_init();
if (status != KERN_OK) {
return status;
}
status = setup_kernel_task();
if (status != KERN_OK) {
return status;
}
status = setup_idle_task();
if (status != KERN_OK) {
return status;
}
2026-02-08 12:17:27 +00:00
struct thread *this_thread = QUEUE_CONTAINER(
struct thread,
tr_parent_entry,
2026-02-08 12:17:27 +00:00
queue_first(&kernel_task()->t_threads));
struct thread *idle_thread = QUEUE_CONTAINER(
struct thread,
tr_parent_entry,
2026-02-08 12:17:27 +00:00
queue_first(&idle_task()->t_threads));
struct cpu_data *this_cpu = get_this_cpu();
rq_init(&this_cpu->c_rq);
this_cpu->c_rq.rq_cur = this_thread;
this_cpu->c_rq.rq_idle = idle_thread;
put_cpu(this_cpu);
global_wq_init();
start_charge_period();
return status;
}
static void expire_timers(struct cpu_data *cpu)
{
2026-02-08 12:17:27 +00:00
queue_foreach(struct timer, timer, &cpu->c_timers, t_entry)
{
if (timer->t_expiry <= clock_ticks) {
timer->t_callback(timer);
}
}
}
void context_switch(struct thread *old, struct thread *new)
{
struct ml_cpu_block *this_cpu = ml_this_cpu();
old->tr_cpu_kernel_sp = ml_cpu_block_get_kstack(this_cpu);
old->tr_cpu_user_sp = ml_cpu_block_get_ustack(this_cpu);
pmap_t old_pmap = old->tr_parent->t_pmap;
pmap_t new_pmap = new->tr_parent->t_pmap;
if (old_pmap != new_pmap) {
pmap_switch(new_pmap);
}
ml_cpu_block_set_kstack(this_cpu, new->tr_cpu_kernel_sp);
ml_cpu_block_set_ustack(this_cpu, new->tr_cpu_user_sp);
ml_thread_switch(old, new);
}
void __schedule(enum sched_mode mode)
{
ml_int_disable();
struct cpu_data *this_cpu = get_this_cpu();
struct runqueue *rq = &this_cpu->c_rq;
expire_timers(this_cpu);
unsigned long flags;
rq_lock(rq, &flags);
/* subtrace one to compensate for the fact that get_this_cpu()
increases preempt_count */
int preempt = READ_ONCE(this_cpu->c_preempt_count) - 1;
put_cpu(this_cpu);
struct thread *prev = rq->rq_cur;
prev->tr_flags &= ~THREAD_F_NEED_RESCHED;
if (prev->tr_quantum_cycles >= prev->tr_quantum_target) {
prev->tr_quantum_cycles = 0;
}
if (preempt > 0) {
rq_unlock(rq, flags);
return;
}
enum thread_state prev_state = READ_ONCE(prev->tr_state);
2026-02-08 12:17:27 +00:00
if ((mode == SCHED_IRQ || prev_state == THREAD_READY)
&& prev != rq->rq_idle) {
rq_enqueue(rq, prev);
}
struct thread *next = rq_dequeue(rq);
if (!next) {
next = rq->rq_idle;
}
if (mode == SCHED_NORMAL) {
next->tr_state = THREAD_READY;
}
rq->rq_cur = next;
rq_unlock(rq, flags);
if (prev != next) {
context_switch(prev, next);
}
ml_int_enable();
}
void schedule(enum sched_mode mode)
{
do {
__schedule(mode);
} while (need_resched());
}
struct runqueue *select_rq_for_thread(struct thread *thr)
{
struct runqueue *best_rq = NULL;
unsigned int best_nthreads = 0;
unsigned long flags;
unsigned int nr_cpu = cpu_get_highest_available() + 1;
for (unsigned int i = 0; i < nr_cpu; i++) {
if (!cpu_is_available(i) || !cpu_is_online(i)) {
continue;
}
struct runqueue *rq = cpu_rq(i);
if (!rq) {
continue;
}
rq_lock(rq, &flags);
unsigned int nthreads = rq->rq_nthreads;
if (rq->rq_cur && rq->rq_cur != rq->rq_idle) {
nthreads++;
}
rq_unlock(rq, flags);
if (!best_rq || nthreads < best_nthreads) {
best_rq = rq;
best_nthreads = nthreads;
}
}
return best_rq;
}
void schedule_thread_on_cpu(struct thread *thr)
{
struct runqueue *rq = thr->tr_rq;
if (!rq) {
rq = select_rq_for_thread(thr);
}
if (rq) {
unsigned long flags;
rq_lock(rq, &flags);
rq_enqueue(rq, thr);
rq_unlock(rq, flags);
}
}
void start_charge_period(void)
{
struct thread *self = current_thread();
if (!self) {
return;
}
self->tr_charge_period_start = get_cycles();
}
void end_charge_period(void)
{
preempt_disable();
struct thread *self = current_thread();
if (!self) {
return;
}
cycles_t end = get_cycles();
preempt_enable();
cycles_t charge = cycles_diff(self->tr_charge_period_start, end);
self->tr_quantum_cycles += charge;
self->tr_total_cycles += charge;
if (self->tr_quantum_cycles >= self->tr_quantum_target) {
self->tr_flags |= THREAD_F_NEED_RESCHED;
}
self->tr_charge_period_start = 0;
2026-02-08 12:17:27 +00:00
// printk("%llu cycles charged to %s/%u", charge,
// self->tr_parent->t_name, self->tr_parent->t_id);
}
cycles_t default_quantum(void)
{
return __default_quantum;
}