2023-03-06 11:08:26 +00:00
|
|
|
#ifndef SOCKS_SCHED_H_
|
|
|
|
|
#define SOCKS_SCHED_H_
|
|
|
|
|
|
|
|
|
|
#include <socks/pmap.h>
|
2023-03-09 19:55:52 +00:00
|
|
|
#include <socks/locks.h>
|
2023-03-06 11:08:26 +00:00
|
|
|
#include <socks/queue.h>
|
2023-03-09 19:55:52 +00:00
|
|
|
#include <socks/object.h>
|
2023-03-06 11:08:26 +00:00
|
|
|
#include <socks/btree.h>
|
|
|
|
|
#include <socks/status.h>
|
|
|
|
|
|
2023-03-09 19:55:52 +00:00
|
|
|
#define TASK_NAME_MAX 64
|
|
|
|
|
#define PRIO_MAX 32
|
2023-04-30 14:27:57 +01:00
|
|
|
#define THREAD_KSTACK_ORDER VM_PAGE_4K
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-03-20 20:41:39 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
|
extern "C" {
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
enum task_state {
|
2023-03-06 11:08:26 +00:00
|
|
|
TASK_RUNNING,
|
|
|
|
|
TASK_STOPPED,
|
2023-04-12 20:17:11 +01:00
|
|
|
};
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
enum thread_state {
|
2023-03-19 20:35:48 +00:00
|
|
|
THREAD_READY = 1,
|
|
|
|
|
THREAD_SLEEPING = 2,
|
|
|
|
|
THREAD_STOPPED = 3,
|
2023-04-12 20:17:11 +01:00
|
|
|
};
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
enum thread_flags {
|
2023-03-19 20:35:48 +00:00
|
|
|
THREAD_F_NEED_RESCHED = 0x01u,
|
|
|
|
|
THREAD_F_NO_PREEMPT = 0x02u,
|
2023-04-12 20:17:11 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
enum sched_priority {
|
|
|
|
|
PRIO_IDLE = 4,
|
|
|
|
|
PRIO_SUBNORMAL = 6,
|
|
|
|
|
PRIO_NORMAL = 10,
|
|
|
|
|
PRIO_SUPERNORMAL = 14,
|
|
|
|
|
PRIO_HIGH = 18,
|
|
|
|
|
PRIO_REALTIME = 24,
|
|
|
|
|
};
|
|
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
enum sched_mode {
|
|
|
|
|
/* used when calling from non-interrupt context.
|
|
|
|
|
threads that aren't in state THREAD_READY are
|
|
|
|
|
removed from the runqueue. */
|
|
|
|
|
SCHED_NORMAL = 0,
|
|
|
|
|
/* used when calling from interrupt context.
|
|
|
|
|
threads that aren't in state THREAD_READY are
|
|
|
|
|
still added to the runqueue. */
|
|
|
|
|
SCHED_IRQ = 1,
|
|
|
|
|
};
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct task {
|
2023-05-06 19:48:14 +01:00
|
|
|
struct object t_base;
|
|
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
struct task *t_parent;
|
|
|
|
|
unsigned int t_id;
|
2023-04-12 20:17:11 +01:00
|
|
|
enum task_state t_state;
|
2023-03-09 19:55:52 +00:00
|
|
|
char t_name[TASK_NAME_MAX];
|
2023-03-06 11:08:26 +00:00
|
|
|
|
|
|
|
|
pmap_t t_pmap;
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct btree_node t_tasklist;
|
|
|
|
|
struct queue t_threads;
|
|
|
|
|
struct queue t_children;
|
|
|
|
|
};
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct thread {
|
2023-05-06 19:48:14 +01:00
|
|
|
struct object thr_base;
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
enum thread_state tr_state;
|
|
|
|
|
enum thread_flags tr_flags;
|
2023-04-12 20:17:11 +01:00
|
|
|
struct task *tr_parent;
|
2023-03-18 19:35:00 +00:00
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
unsigned int tr_id;
|
|
|
|
|
unsigned int tr_prio;
|
|
|
|
|
|
2023-04-28 21:05:48 +01:00
|
|
|
cycles_t tr_charge_period_start;
|
|
|
|
|
cycles_t tr_quantum_cycles, tr_quantum_target;
|
|
|
|
|
cycles_t tr_total_cycles;
|
|
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
uintptr_t tr_sp, tr_bp;
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct queue_entry tr_threads;
|
|
|
|
|
struct queue_entry tr_rqentry;
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
struct vm_page *tr_kstack;
|
2023-04-12 20:17:11 +01:00
|
|
|
};
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct runqueue {
|
|
|
|
|
struct queue rq_queues[PRIO_MAX];
|
2023-03-09 19:55:52 +00:00
|
|
|
uint32_t rq_readybits;
|
|
|
|
|
spin_lock_t rq_lock;
|
2023-05-04 21:43:18 +01:00
|
|
|
unsigned int rq_nthreads;
|
2023-04-30 14:27:57 +01:00
|
|
|
|
|
|
|
|
struct thread *rq_cur, *rq_idle;
|
2023-04-12 20:17:11 +01:00
|
|
|
};
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
struct timer {
|
|
|
|
|
struct queue_entry t_entry;
|
|
|
|
|
struct cpu_data *t_cpu;
|
|
|
|
|
struct thread *t_owner;
|
|
|
|
|
unsigned long t_expiry;
|
|
|
|
|
void(*t_callback)(struct timer *);
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct wait_item {
|
|
|
|
|
struct thread *w_thread;
|
|
|
|
|
struct queue_entry w_entry;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct waitqueue {
|
|
|
|
|
struct queue wq_waiters;
|
|
|
|
|
spin_lock_t wq_lock;
|
|
|
|
|
};
|
|
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
extern kern_status_t sched_init(void);
|
2023-04-30 21:09:36 +01:00
|
|
|
extern void schedule(enum sched_mode mode);
|
2023-03-28 21:39:59 +01:00
|
|
|
extern void preempt_disable(void);
|
|
|
|
|
extern void preempt_enable(void);
|
2023-03-06 11:08:26 +00:00
|
|
|
|
2023-04-30 14:27:57 +01:00
|
|
|
extern void rq_init(struct runqueue *rq);
|
|
|
|
|
extern struct thread *rq_dequeue(struct runqueue *rq);
|
|
|
|
|
extern void rq_enqueue(struct runqueue *rq, struct thread *thr);
|
|
|
|
|
static inline void rq_lock(struct runqueue *rq, unsigned long *flags)
|
|
|
|
|
{
|
|
|
|
|
spin_lock_irqsave(&rq->rq_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
static inline void rq_unlock(struct runqueue *rq, unsigned long flags)
|
|
|
|
|
{
|
|
|
|
|
spin_unlock_irqrestore(&rq->rq_lock, flags);
|
|
|
|
|
}
|
2023-05-04 21:42:51 +01:00
|
|
|
extern struct runqueue *cpu_rq(unsigned int cpu);
|
2023-03-18 19:35:00 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
extern struct task *task_alloc(void);
|
2023-05-06 19:48:14 +01:00
|
|
|
static inline struct task *task_ref(struct task *task) { return OBJECT_CAST(struct task, t_base, object_ref(&task->t_base)); }
|
|
|
|
|
static inline void task_deref(struct task *task) { object_deref(&task->t_base); }
|
2023-04-12 20:17:11 +01:00
|
|
|
extern struct task *task_from_pid(unsigned int pid);
|
|
|
|
|
extern struct task *kernel_task(void);
|
2023-04-30 14:27:57 +01:00
|
|
|
extern struct task *idle_task(void);
|
2023-04-28 21:05:48 +01:00
|
|
|
extern cycles_t default_quantum(void);
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-03-28 21:39:59 +01:00
|
|
|
extern bool need_resched(void);
|
2023-04-12 20:17:11 +01:00
|
|
|
extern struct task *current_task(void);
|
|
|
|
|
extern struct thread *current_thread(void);
|
2023-03-28 21:39:59 +01:00
|
|
|
|
2023-05-04 21:43:18 +01:00
|
|
|
extern struct runqueue *select_rq_for_thread(struct thread *thr);
|
|
|
|
|
extern void schedule_thread_on_cpu(struct thread *thr);
|
|
|
|
|
|
2023-04-28 21:05:48 +01:00
|
|
|
extern void start_charge_period(void);
|
|
|
|
|
extern void end_charge_period(void);
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static inline void task_lock_irqsave(struct task *task, unsigned long *flags)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
2023-05-06 19:48:14 +01:00
|
|
|
object_lock(&task->t_base, flags);
|
2023-03-09 19:55:52 +00:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static inline void task_unlock_irqrestore(struct task *task, unsigned long flags)
|
2023-03-09 19:55:52 +00:00
|
|
|
{
|
2023-05-06 19:48:14 +01:00
|
|
|
object_unlock(&task->t_base, flags);
|
2023-03-09 19:55:52 +00:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
extern struct thread *thread_alloc(void);
|
2023-04-30 14:27:57 +01:00
|
|
|
extern kern_status_t thread_init(struct thread *thr, uintptr_t ip);
|
|
|
|
|
extern int thread_priority(struct thread *thr);
|
2023-05-03 19:27:18 +01:00
|
|
|
extern void idle(void);
|
2023-05-04 21:43:18 +01:00
|
|
|
extern struct thread *create_kernel_thread(void(*fn)(void));
|
2023-05-03 19:27:18 +01:00
|
|
|
extern struct thread *create_idle_thread(void);
|
2023-03-09 19:55:52 +00:00
|
|
|
|
2023-04-30 21:09:36 +01:00
|
|
|
extern void add_timer(struct timer *timer);
|
|
|
|
|
extern void remove_timer(struct timer *timer);
|
|
|
|
|
extern unsigned long schedule_timeout(unsigned long clock_ticks);
|
|
|
|
|
extern unsigned long milli_sleep(unsigned long ms);
|
|
|
|
|
|
2023-03-20 20:41:39 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2023-03-06 11:08:26 +00:00
|
|
|
#endif
|