278 lines
7.5 KiB
C
278 lines
7.5 KiB
C
#ifndef KERNEL_SCHED_H_
|
|
#define KERNEL_SCHED_H_
|
|
|
|
#include <kernel/btree.h>
|
|
#include <kernel/handle.h>
|
|
#include <kernel/locks.h>
|
|
#include <kernel/msg.h>
|
|
#include <kernel/object.h>
|
|
#include <kernel/pmap.h>
|
|
#include <kernel/queue.h>
|
|
#include <mango/status.h>
|
|
|
|
#define TASK_NAME_MAX 64
|
|
#define PRIO_MAX 32
|
|
#define PID_MAX 99999
|
|
#define THREAD_KSTACK_ORDER VM_PAGE_4K
|
|
#define THREAD_MAX 65536
|
|
|
|
#define wait_event(wq, cond) \
|
|
({ \
|
|
struct thread *self = current_thread(); \
|
|
struct wait_item waiter; \
|
|
wait_item_init(&waiter, self); \
|
|
for (;;) { \
|
|
thread_wait_begin(&waiter, wq); \
|
|
if (cond) { \
|
|
break; \
|
|
} \
|
|
schedule(SCHED_NORMAL); \
|
|
} \
|
|
thread_wait_end(&waiter, wq); \
|
|
})
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
struct channel;
|
|
struct runqueue;
|
|
struct work_item;
|
|
|
|
enum task_state {
|
|
TASK_RUNNING,
|
|
TASK_STOPPED,
|
|
};
|
|
|
|
enum thread_state {
|
|
THREAD_READY = 1,
|
|
THREAD_SLEEPING = 2,
|
|
THREAD_STOPPED = 3,
|
|
};
|
|
|
|
enum thread_flags {
|
|
THREAD_F_NEED_RESCHED = 0x01u,
|
|
THREAD_F_NO_PREEMPT = 0x02u,
|
|
};
|
|
|
|
enum sched_priority {
|
|
PRIO_IDLE = 4,
|
|
PRIO_SUBNORMAL = 6,
|
|
PRIO_NORMAL = 10,
|
|
PRIO_SUPERNORMAL = 14,
|
|
PRIO_HIGH = 18,
|
|
PRIO_REALTIME = 24,
|
|
};
|
|
|
|
enum sched_mode {
|
|
/* used when calling from non-interrupt context.
|
|
threads that aren't in state THREAD_READY are
|
|
removed from the runqueue. */
|
|
SCHED_NORMAL = 0,
|
|
/* used when calling from interrupt context.
|
|
threads that aren't in state THREAD_READY are
|
|
still added to the runqueue. */
|
|
SCHED_IRQ = 1,
|
|
};
|
|
|
|
struct task {
|
|
struct object t_base;
|
|
|
|
struct task *t_parent;
|
|
long t_id;
|
|
enum task_state t_state;
|
|
char t_name[TASK_NAME_MAX];
|
|
|
|
pmap_t t_pmap;
|
|
struct vm_region *t_address_space;
|
|
struct handle_table *t_handles;
|
|
struct btree b_channels;
|
|
|
|
struct btree_node t_tasklist;
|
|
struct queue_entry t_child_entry;
|
|
|
|
size_t t_next_thread_id;
|
|
struct queue t_threads;
|
|
struct queue t_children;
|
|
};
|
|
|
|
struct thread {
|
|
struct object thr_base;
|
|
|
|
enum thread_state tr_state;
|
|
enum thread_flags tr_flags;
|
|
struct task *tr_parent;
|
|
|
|
unsigned int tr_id;
|
|
unsigned int tr_prio;
|
|
|
|
cycles_t tr_charge_period_start;
|
|
cycles_t tr_quantum_cycles, tr_quantum_target;
|
|
cycles_t tr_total_cycles;
|
|
|
|
virt_addr_t tr_ip, tr_sp, tr_bp;
|
|
virt_addr_t tr_cpu_user_sp, tr_cpu_kernel_sp;
|
|
|
|
struct runqueue *tr_rq;
|
|
struct kmsg tr_msg;
|
|
|
|
struct queue_entry tr_parent_entry;
|
|
struct queue_entry tr_rqentry;
|
|
|
|
struct vm_page *tr_kstack;
|
|
struct vm_object *tr_ustack;
|
|
};
|
|
|
|
struct runqueue {
|
|
struct queue rq_queues[PRIO_MAX];
|
|
uint32_t rq_readybits;
|
|
spin_lock_t rq_lock;
|
|
unsigned int rq_nthreads;
|
|
|
|
struct thread *rq_cur, *rq_idle;
|
|
};
|
|
|
|
struct timer {
|
|
struct queue_entry t_entry;
|
|
struct cpu_data *t_cpu;
|
|
struct thread *t_owner;
|
|
unsigned long t_expiry;
|
|
void (*t_callback)(struct timer *);
|
|
};
|
|
|
|
struct wait_item {
|
|
struct thread *w_thread;
|
|
struct queue_entry w_entry;
|
|
};
|
|
|
|
struct waitqueue {
|
|
struct queue wq_waiters;
|
|
spin_lock_t wq_lock;
|
|
};
|
|
|
|
typedef void (*work_func_t)(struct work_item *);
|
|
|
|
struct work_item {
|
|
void *w_data;
|
|
work_func_t w_func;
|
|
struct queue_entry w_head;
|
|
};
|
|
|
|
struct worker_pool {
|
|
struct thread **wp_workers;
|
|
size_t wp_nworkers;
|
|
};
|
|
|
|
struct workqueue {
|
|
spin_lock_t wq_lock;
|
|
struct queue wq_queue; /* list of struct work_item */
|
|
};
|
|
|
|
extern kern_status_t sched_init(void);
|
|
extern void schedule(enum sched_mode mode);
|
|
extern void preempt_disable(void);
|
|
extern void preempt_enable(void);
|
|
|
|
extern void rq_init(struct runqueue *rq);
|
|
extern struct thread *rq_dequeue(struct runqueue *rq);
|
|
extern void rq_enqueue(struct runqueue *rq, struct thread *thr);
|
|
static inline void rq_lock(struct runqueue *rq, unsigned long *flags)
|
|
{
|
|
spin_lock_irqsave(&rq->rq_lock, flags);
|
|
}
|
|
static inline void rq_unlock(struct runqueue *rq, unsigned long flags)
|
|
{
|
|
spin_unlock_irqrestore(&rq->rq_lock, flags);
|
|
}
|
|
extern void rq_remove_thread(struct runqueue *rq, struct thread *thr);
|
|
extern struct runqueue *cpu_rq(unsigned int cpu);
|
|
|
|
extern struct task *task_alloc(void);
|
|
extern struct task *task_cast(struct object *obj);
|
|
extern struct task *task_create(const char *name, size_t name_len);
|
|
static inline struct task *task_ref(struct task *task)
|
|
{
|
|
return OBJECT_CAST(struct task, t_base, object_ref(&task->t_base));
|
|
}
|
|
static inline void task_unref(struct task *task)
|
|
{
|
|
object_unref(&task->t_base);
|
|
}
|
|
extern kern_status_t task_add_child(struct task *parent, struct task *child);
|
|
extern kern_status_t task_add_channel(
|
|
struct task *task,
|
|
struct channel *channel,
|
|
unsigned int id);
|
|
extern struct channel *task_get_channel(struct task *task, unsigned int id);
|
|
extern struct task *task_from_tid(tid_t id);
|
|
extern kern_status_t task_open_handle(
|
|
struct task *task,
|
|
struct object *obj,
|
|
handle_flags_t flags,
|
|
kern_handle_t *out);
|
|
extern kern_status_t task_resolve_handle(
|
|
struct task *task,
|
|
kern_handle_t handle,
|
|
struct object **out_obj,
|
|
handle_flags_t *out_flags);
|
|
extern kern_status_t task_close_handle(struct task *task, kern_handle_t handle);
|
|
extern struct thread *task_create_thread(struct task *parent);
|
|
extern struct task *kernel_task(void);
|
|
extern struct task *idle_task(void);
|
|
extern cycles_t default_quantum(void);
|
|
|
|
extern bool need_resched(void);
|
|
extern struct task *current_task(void);
|
|
extern struct thread *current_thread(void);
|
|
|
|
extern struct runqueue *select_rq_for_thread(struct thread *thr);
|
|
extern void schedule_thread_on_cpu(struct thread *thr);
|
|
|
|
extern void start_charge_period(void);
|
|
extern void end_charge_period(void);
|
|
|
|
DEFINE_OBJECT_LOCK_FUNCTION(task, t_base)
|
|
|
|
extern struct thread *thread_alloc(void);
|
|
extern struct thread *thread_cast(struct object *obj);
|
|
extern kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip);
|
|
extern kern_status_t thread_init_user(
|
|
struct thread *thr,
|
|
virt_addr_t ip,
|
|
virt_addr_t sp,
|
|
const uintptr_t *args,
|
|
size_t nr_args);
|
|
extern int thread_priority(struct thread *thr);
|
|
extern void thread_awaken(struct thread *thr);
|
|
extern void idle(void);
|
|
extern struct thread *create_kernel_thread(void (*fn)(void));
|
|
extern struct thread *create_idle_thread(void);
|
|
|
|
extern void add_timer(struct timer *timer);
|
|
extern void remove_timer(struct timer *timer);
|
|
extern unsigned long schedule_timeout(unsigned long clock_ticks);
|
|
extern unsigned long milli_sleep(unsigned long ms);
|
|
extern void sleep_forever(void);
|
|
|
|
extern void wait_item_init(struct wait_item *item, struct thread *thr);
|
|
extern void thread_wait_begin(struct wait_item *waiter, struct waitqueue *q);
|
|
extern void thread_wait_end(struct wait_item *waiter, struct waitqueue *q);
|
|
extern void wait_on_queue(struct waitqueue *q);
|
|
extern void wakeup_queue(struct waitqueue *q);
|
|
extern void wakeup_one(struct waitqueue *q);
|
|
|
|
extern void work_item_init(work_func_t func, void *data, struct work_item *out);
|
|
extern void workqueue_init(struct workqueue *wq);
|
|
extern struct worker_pool *worker_pool_create(size_t nworkers);
|
|
extern struct worker_pool *global_worker_pool(void);
|
|
extern bool schedule_work_on(struct workqueue *wq, struct work_item *work);
|
|
extern bool schedule_work(struct work_item *work);
|
|
|
|
extern void wake_workers(struct workqueue *wq, struct worker_pool *pool);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif
|