kernel: add support for getting percpu variables that belong to other CPUs

This commit is contained in:
2023-05-04 21:42:51 +01:00
parent 3f91c96050
commit bb524c1576
6 changed files with 49 additions and 4 deletions

View File

@@ -30,8 +30,12 @@ struct cpu_data {
#define this_cpu() (ml_cpu_block_get_id(ml_this_cpu())) #define this_cpu() (ml_cpu_block_get_id(ml_this_cpu()))
extern struct cpu_data *get_this_cpu(void); extern struct cpu_data *get_this_cpu(void);
extern struct cpu_data *get_cpu(unsigned int id);
extern void put_cpu(struct cpu_data *cpu); extern void put_cpu(struct cpu_data *cpu);
extern bool cpu_is_available(unsigned int cpu_id);
extern bool cpu_is_online(unsigned int cpu_id);
extern void cpu_set_available(unsigned int cpu_id); extern void cpu_set_available(unsigned int cpu_id);
extern void cpu_set_online(unsigned int cpu_id); extern void cpu_set_online(unsigned int cpu_id);

View File

@@ -15,13 +15,19 @@ extern "C" {
#define percpu_get(var) \ #define percpu_get(var) \
__extension__({ \ __extension__({ \
preempt_disable(); \ preempt_disable(); \
__percpu_get(var); \ __percpu_get(this_cpu(), var); \
})
#define percpu_get_from(cpu, var) \
__extension__({ \
preempt_disable(); \
__percpu_get(cpu, var); \
}) })
#define percpu_put(var) preempt_enable(); #define percpu_put(var) preempt_enable();
extern kern_status_t init_per_cpu_areas(void); extern kern_status_t init_per_cpu_areas(void);
extern void *__percpu_get(void *var); extern void *__percpu_get(unsigned int cpu, void *var);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@@ -127,6 +127,7 @@ static inline void rq_unlock(struct runqueue *rq, unsigned long flags)
{ {
spin_unlock_irqrestore(&rq->rq_lock, flags); spin_unlock_irqrestore(&rq->rq_lock, flags);
} }
extern struct runqueue *cpu_rq(unsigned int cpu);
extern struct task *task_alloc(void); extern struct task *task_alloc(void);
static inline struct task *task_ref(struct task *task) { return (struct task *)object_data(object_ref(object_header(task))); } static inline struct task *task_ref(struct task *task) { return (struct task *)object_data(object_ref(object_header(task))); }

View File

@@ -12,11 +12,34 @@ struct cpu_data *get_this_cpu(void)
return percpu_get(&cpu_data); return percpu_get(&cpu_data);
} }
struct cpu_data *get_cpu(unsigned int id)
{
return percpu_get_from(id, &cpu_data);
}
void put_cpu(struct cpu_data *cpu) void put_cpu(struct cpu_data *cpu)
{ {
percpu_put(cpu); percpu_put(cpu);
} }
bool cpu_is_available(unsigned int cpu_id)
{
if (cpu_id >= CPU_MAX) {
return false;
}
return bitmap_check(cpu_available, cpu_id);
}
bool cpu_is_online(unsigned int cpu_id)
{
if (cpu_id >= CPU_MAX) {
return false;
}
return bitmap_check(cpu_online, cpu_id);
}
void cpu_set_available(unsigned int cpu_id) void cpu_set_available(unsigned int cpu_id)
{ {
if (cpu_id >= CPU_MAX) { if (cpu_id >= CPU_MAX) {

View File

@@ -24,7 +24,7 @@ extern kern_status_t init_per_cpu_areas(void)
return KERN_OK; return KERN_OK;
} }
extern void *__percpu_get(void *var) extern void *__percpu_get(unsigned int cpu, void *var)
{ {
uintptr_t pvar = (uintptr_t)var; uintptr_t pvar = (uintptr_t)var;
uintptr_t percpu_start = (uintptr_t)__percpu_start; uintptr_t percpu_start = (uintptr_t)__percpu_start;
@@ -36,5 +36,5 @@ extern void *__percpu_get(void *var)
size_t var_offset = pvar - percpu_start; size_t var_offset = pvar - percpu_start;
return (char *)percpu_buffer + (this_cpu() * percpu_stride) + var_offset; return (char *)percpu_buffer + (cpu * percpu_stride) + var_offset;
} }

View File

@@ -1,5 +1,6 @@
#include <socks/sched.h> #include <socks/sched.h>
#include <socks/percpu.h> #include <socks/percpu.h>
#include <socks/cpu.h>
#define PRIO_MASK(p) (((uint32_t)1) << (p)) #define PRIO_MASK(p) (((uint32_t)1) << (p))
#define FIRST_PRIO(m) (m > 0 ? (PRIO_MAX - __builtin_clz(m) - 1) : -1) #define FIRST_PRIO(m) (m > 0 ? (PRIO_MAX - __builtin_clz(m) - 1) : -1)
@@ -42,6 +43,16 @@ void rq_enqueue(struct runqueue *rq, struct thread *thr)
struct queue *q = &rq->rq_queues[prio]; struct queue *q = &rq->rq_queues[prio];
queue_push_back(q, &thr->tr_rqentry); queue_push_back(q, &thr->tr_rqentry);
rq->rq_nthreads++;
rq->rq_readybits |= PRIO_MASK(thread_priority(thr)); rq->rq_readybits |= PRIO_MASK(thread_priority(thr));
} }
struct runqueue *cpu_rq(unsigned int cpu)
{
struct cpu_data *cpu_data = get_cpu(cpu);
struct runqueue *rq = &cpu_data->c_rq;
put_cpu(cpu_data);
return rq;
}