Compare commits
68 Commits
409725f9d4
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| de520cdd2d | |||
| e84ed6057d | |||
| 1d4cb882a8 | |||
| 18b281debf | |||
| 09d292fd09 | |||
| 36c5ac7837 | |||
| b1bdb89ca4 | |||
| f8a7a4285f | |||
| f9bf4c618a | |||
| e4de3af00d | |||
| b59d0d8948 | |||
| 8cc877c251 | |||
| 2073cad97b | |||
| eb8758bc5e | |||
| 1cdde0d32e | |||
| 1c7c90ef39 | |||
| 11c741bd68 | |||
| 34bd6e479c | |||
| 5f0654430d | |||
| fd1bc0ad5f | |||
| b1ffdcf2bc | |||
| 5690dd5b9c | |||
| 37ae7aeef7 | |||
| dbe117135b | |||
| 273557fa9f | |||
| fe107fbad3 | |||
| b2d04c5983 | |||
| 6c2ca888ee | |||
| 044b3688aa | |||
| 77936e3511 | |||
| 08c78bd6e7 | |||
| 2537ca46de | |||
| 3190035086 | |||
| 7f049293f4 | |||
| 9b2c2f6b29 | |||
| 6e39dd45a4 | |||
| 855440f584 | |||
| e1e025ab6a | |||
| 0680b73461 | |||
| aa0933be10 | |||
| 8b188a0ac4 | |||
| ed25ee6761 | |||
| 0bae39e550 | |||
| 9a90662eaa | |||
| 1d4fd4f586 | |||
| dbc7b8fc59 | |||
| aa9439c392 | |||
| 8e072945d8 | |||
| 821246bc16 | |||
| fc8cdf62d3 | |||
| b2dbb88778 | |||
| 9424e7bcd6 | |||
| 4c35723959 | |||
| 2b7e5368c9 | |||
| 85006411bd | |||
| f2e128c57e | |||
| c6e1ba21dd | |||
| 2f413c603d | |||
| 291a5f677e | |||
| b188573eea | |||
| c69aed254f | |||
| 44c2904c11 | |||
| f89e3cb12c | |||
| 6019c9307d | |||
| e3dd48a0fa | |||
| 9f7b7bdd2d | |||
| c424e8127e | |||
| fb7d7635c2 |
@@ -12,12 +12,11 @@ set(kernel_arch x86_64)
|
||||
set(kernel_name "Mango")
|
||||
set(kernel_exe_name "mango_kernel")
|
||||
|
||||
set(generic_src_dirs ds init kernel libc sched util vm)
|
||||
set(generic_src_dirs ds init kernel libc sched util vm syscall)
|
||||
set(kernel_sources "")
|
||||
set(kernel_headers "")
|
||||
|
||||
foreach (dir ${generic_src_dirs})
|
||||
message(STATUS ${dir})
|
||||
file(GLOB_RECURSE dir_sources ${dir}/*.c)
|
||||
file(GLOB_RECURSE dir_headers ${dir}/*.h)
|
||||
|
||||
@@ -41,6 +40,7 @@ add_executable(${kernel_exe_name}
|
||||
target_include_directories(${kernel_exe_name} PRIVATE
|
||||
include
|
||||
libc/include
|
||||
libmango/include
|
||||
arch/${kernel_arch}/include)
|
||||
target_compile_options(${kernel_exe_name} PRIVATE
|
||||
-nostdlib -ffreestanding
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <unistd.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
|
||||
int ml_init_bootcpu(void)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <mango/machine/hwlock.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/machine/hwlock.h>
|
||||
|
||||
void ml_hwlock_lock(ml_hwlock_t *lck)
|
||||
{
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <mango/init.h>
|
||||
#include <mango/memblock.h>
|
||||
#include <mango/vm.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/printk.h>
|
||||
#include <kernel/init.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <arch/stdcon.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include <mango/init.h>
|
||||
#include <kernel/init.h>
|
||||
|
||||
#ifdef __APPLE__
|
||||
extern char __start_initcall0[] __asm("section$start$__DATA$__initcall0.init");
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#include <mango/libc/string.h>
|
||||
#include <mango/libc/ctype.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/libc/ctype.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <mango/console.h>
|
||||
#include <mango/vm.h>
|
||||
#include <mango/printk.h>
|
||||
#include <kernel/console.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <kernel/printk.h>
|
||||
|
||||
static void stdcon_write(struct console *con, const char *s, unsigned int len)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <mango/sched.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/compiler.h>
|
||||
|
||||
//size_t THREAD_sp = offsetof(struct thread, tr_sp);
|
||||
|
||||
|
||||
@@ -2,9 +2,3 @@ target_compile_options(${kernel_exe_name} PRIVATE
|
||||
-z max-page-size=0x1000 -m64 -mcmodel=large -mno-red-zone -mno-mmx
|
||||
-mno-sse -mno-sse2 -D_64BIT -DBYTE_ORDER=1234)
|
||||
target_link_libraries(${kernel_exe_name} "-z max-page-size=0x1000" "-T ${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/layout.ld")
|
||||
|
||||
add_custom_command(TARGET ${kernel_exe_name} POST_BUILD
|
||||
COMMAND ${BUILD_TOOLS_DIR}/e64patch $<TARGET_FILE:${kernel_exe_name}>
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Patching kernel elf64 image"
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <arch/msr.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
|
||||
int ml_cpu_block_init(ml_cpu_block *p)
|
||||
{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#include "mango/types.h"
|
||||
#include <mango/memblock.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/util.h>
|
||||
#include <arch/e820.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/util.h>
|
||||
|
||||
void e820_scan(multiboot_memory_map_t *mmap, size_t len)
|
||||
{
|
||||
@@ -36,7 +36,9 @@ void e820_scan(multiboot_memory_map_t *mmap, size_t len)
|
||||
}
|
||||
|
||||
printk("e820: [mem 0x%016llx-0x%016llx] %s",
|
||||
entry->addr, entry->addr + entry->len - 1, type);
|
||||
entry->addr,
|
||||
entry->addr + entry->len - 1,
|
||||
type);
|
||||
|
||||
memblock_add(entry->addr, entry->len);
|
||||
|
||||
@@ -53,7 +55,12 @@ void e820_scan(multiboot_memory_map_t *mmap, size_t len)
|
||||
|
||||
char str_mem_total[64], str_mem_reserved[64];
|
||||
data_size_to_string(mem_total, str_mem_total, sizeof str_mem_total);
|
||||
data_size_to_string(mem_reserved, str_mem_reserved, sizeof str_mem_reserved);
|
||||
data_size_to_string(
|
||||
mem_reserved,
|
||||
str_mem_reserved,
|
||||
sizeof str_mem_reserved);
|
||||
|
||||
printk("e820: total memory: %s, hw reserved: %s", str_mem_total, str_mem_reserved);
|
||||
printk("e820: total memory: %s, hw reserved: %s",
|
||||
str_mem_total,
|
||||
str_mem_reserved);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include <arch/gdt.h>
|
||||
#include <arch/tss.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <mango/types.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/types.h>
|
||||
#include <stddef.h>
|
||||
|
||||
static void init_entry(struct gdt_entry *entry, int access, int flags)
|
||||
@@ -28,11 +28,11 @@ int gdt_init(struct gdt *gdt, struct gdt_ptr *gdtp)
|
||||
GDT_F_64BIT);
|
||||
init_entry(
|
||||
&gdt->g_entries[3],
|
||||
GDT_A_PRESENT | GDT_A_USER | GDT_A_CODEREAD | GDT_A_CODE,
|
||||
GDT_A_PRESENT | GDT_A_USER | GDT_A_DATAWRITE | GDT_A_DATA,
|
||||
GDT_F_64BIT);
|
||||
init_entry(
|
||||
&gdt->g_entries[4],
|
||||
GDT_A_PRESENT | GDT_A_USER | GDT_A_DATAWRITE | GDT_A_DATA,
|
||||
GDT_A_PRESENT | GDT_A_USER | GDT_A_CODEREAD | GDT_A_CODE,
|
||||
GDT_F_64BIT);
|
||||
|
||||
gdtp->g_ptr = (uint64_t)gdt;
|
||||
|
||||
@@ -11,6 +11,41 @@ ml_hwlock_lock:
|
||||
|
||||
mov $1, %ecx
|
||||
|
||||
mfence
|
||||
|
||||
1: mov $0, %eax
|
||||
lock cmpxchg %ecx, (%rdi)
|
||||
jne 1b
|
||||
|
||||
pop %rbp
|
||||
ret
|
||||
|
||||
|
||||
.global ml_hwlock_unlock
|
||||
.type ml_hwlock_unlock, @function
|
||||
|
||||
/* %rdi = pointer to ml_hwlock_t (int) */
|
||||
ml_hwlock_unlock:
|
||||
push %rbp
|
||||
mov %rsp, %rbp
|
||||
|
||||
movl $0, (%rdi)
|
||||
mfence
|
||||
|
||||
pop %rbp
|
||||
ret
|
||||
|
||||
|
||||
.global ml_hwlock_lock_irq
|
||||
.type ml_hwlock_lock_irq, @function
|
||||
|
||||
/* %rdi = pointer to ml_hwlock_t (int) */
|
||||
ml_hwlock_lock_irq:
|
||||
push %rbp
|
||||
mov %rsp, %rbp
|
||||
|
||||
mov $1, %ecx
|
||||
|
||||
cli
|
||||
mfence
|
||||
|
||||
@@ -21,11 +56,12 @@ ml_hwlock_lock:
|
||||
pop %rbp
|
||||
ret
|
||||
|
||||
.global ml_hwlock_unlock
|
||||
.type ml_hwlock_unlock, @function
|
||||
|
||||
.global ml_hwlock_unlock_irq
|
||||
.type ml_hwlock_unlock_irq, @function
|
||||
|
||||
/* %rdi = pointer to ml_hwlock_t (int) */
|
||||
ml_hwlock_unlock:
|
||||
ml_hwlock_unlock_irq:
|
||||
push %rbp
|
||||
mov %rsp, %rbp
|
||||
|
||||
@@ -42,7 +78,7 @@ ml_hwlock_unlock:
|
||||
|
||||
/* %rdi = pointer to ml_hwlock_t (int)
|
||||
%rsi = pointer to quadword to store rflags in */
|
||||
ml_hwlock_lock_irqsave:
|
||||
ml_hwlock_lock_irqsave:
|
||||
push %rbp
|
||||
mov %rsp, %rbp
|
||||
|
||||
@@ -62,6 +98,7 @@ ml_hwlock_lock_irqsave:
|
||||
pop %rbp
|
||||
ret
|
||||
|
||||
|
||||
.global ml_hwlock_unlock_irqrestore
|
||||
.type ml_hwlock_unlock_irqrestore, @function
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
#define ARCH_GDT_H_
|
||||
|
||||
#include <arch/tss.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef ARCH_IRQ_H_
|
||||
#define ARCH_IRQ_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#define MSR_GS_BASE 0xC0000101
|
||||
#define MSR_GS_BASE 0xC0000101
|
||||
#define MSR_KERNEL_GS_BASE 0xC0000102
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef ARCH_PAGING_H_
|
||||
#define ARCH_PAGING_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/compiler.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef ARCH_TSS_H_
|
||||
#define ARCH_TSS_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/types.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/types.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define TSS_GDT_INDEX 5
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#ifndef MANGO_X86_64_CPU_H_
|
||||
#define MANGO_X86_64_CPU_H_
|
||||
#ifndef KERNEL_X86_64_CPU_H_
|
||||
#define KERNEL_X86_64_CPU_H_
|
||||
|
||||
#include <arch/gdt.h>
|
||||
#include <arch/irq.h>
|
||||
#include <arch/tss.h>
|
||||
#include <mango/types.h>
|
||||
#include <kernel/types.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -38,6 +38,10 @@ typedef struct ml_cpu_block {
|
||||
struct cpu_data *c_data;
|
||||
} ml_cpu_block;
|
||||
|
||||
struct ml_int_context {
|
||||
uint64_t rip, cs, rflags, rsp, ss;
|
||||
};
|
||||
|
||||
struct ml_cpu_context {
|
||||
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
|
||||
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_X86_64_HWLOCK_H_
|
||||
#define MANGO_X86_64_HWLOCK_H_
|
||||
#ifndef KERNEL_X86_64_HWLOCK_H_
|
||||
#define KERNEL_X86_64_HWLOCK_H_
|
||||
|
||||
#define ML_HWLOCK_INIT (0)
|
||||
|
||||
@@ -12,6 +12,9 @@ typedef int ml_hwlock_t;
|
||||
extern void ml_hwlock_lock(ml_hwlock_t *lck);
|
||||
extern void ml_hwlock_unlock(ml_hwlock_t *lck);
|
||||
|
||||
extern void ml_hwlock_lock_irq(ml_hwlock_t *lck);
|
||||
extern void ml_hwlock_unlock_irq(ml_hwlock_t *lck);
|
||||
|
||||
extern void ml_hwlock_lock_irqsave(ml_hwlock_t *lck, unsigned long *flags);
|
||||
extern void ml_hwlock_unlock_irqrestore(ml_hwlock_t *lck, unsigned long flags);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_X86_64_INIT_H_
|
||||
#define MANGO_X86_64_INIT_H_
|
||||
#ifndef KERNEL_X86_64_INIT_H_
|
||||
#define KERNEL_X86_64_INIT_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
5
arch/x86_64/include/kernel/machine/irq.h
Normal file
5
arch/x86_64/include/kernel/machine/irq.h
Normal file
@@ -0,0 +1,5 @@
|
||||
#ifndef KERNEL_X86_64_IRQ_H_
|
||||
#define KERNEL_X86_64_IRQ_H_
|
||||
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_X86_64_PANIC_H_
|
||||
#define MANGO_X86_64_PANIC_H_
|
||||
#ifndef KERNEL_X86_64_PANIC_H_
|
||||
#define KERNEL_X86_64_PANIC_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_X86_64_PMAP_H_
|
||||
#define MANGO_X86_64_PMAP_H_
|
||||
#ifndef KERNEL_X86_64_PMAP_H_
|
||||
#define KERNEL_X86_64_PMAP_H_
|
||||
|
||||
#include <arch/paging.h>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef MANGO_X86_64_THREAD_H_
|
||||
#define MANGO_X86_64_THREAD_H_
|
||||
#ifndef KERNEL_X86_64_THREAD_H_
|
||||
#define KERNEL_X86_64_THREAD_H_
|
||||
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
struct ml_cpu_context;
|
||||
|
||||
@@ -21,9 +21,11 @@ extern void ml_thread_switch_user(void);
|
||||
extern void ml_thread_prepare_kernel_context(uintptr_t ip, uintptr_t *sp);
|
||||
/* prepare the stack so that ml_thread_switch_user can jump to usermode
|
||||
* with the specified IP/user SP */
|
||||
extern void ml_thread_prepare_user_context(
|
||||
extern kern_status_t ml_thread_prepare_user_context(
|
||||
virt_addr_t ip,
|
||||
virt_addr_t user_sp,
|
||||
virt_addr_t *kernel_sp);
|
||||
virt_addr_t *kernel_sp,
|
||||
const uintptr_t *args,
|
||||
size_t nr_args);
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_X86_64_VM_H_
|
||||
#define MANGO_X86_64_VM_H_
|
||||
#ifndef KERNEL_X86_64_VM_H_
|
||||
#define KERNEL_X86_64_VM_H_
|
||||
|
||||
/* kernel higher-half base virtual address. */
|
||||
#define VM_KERNEL_VOFFSET 0xFFFFFFFF80000000
|
||||
@@ -1,5 +0,0 @@
|
||||
#ifndef MANGO_X86_64_IRQ_H_
|
||||
#define MANGO_X86_64_IRQ_H_
|
||||
|
||||
|
||||
#endif
|
||||
@@ -2,23 +2,36 @@
|
||||
#include <arch/pit.h>
|
||||
#include <arch/serial.h>
|
||||
#include <arch/vgacon.h>
|
||||
#include <mango/arg.h>
|
||||
#include <mango/bsp.h>
|
||||
#include <mango/clock.h>
|
||||
#include <mango/console.h>
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/init.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <mango/memblock.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/percpu.h>
|
||||
#include <mango/pmap.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/types.h>
|
||||
#include <mango/vm.h>
|
||||
#include <kernel/arg.h>
|
||||
#include <kernel/bsp.h>
|
||||
#include <kernel/clock.h>
|
||||
#include <kernel/console.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/init.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/percpu.h>
|
||||
#include <kernel/pmap.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/vm.h>
|
||||
|
||||
#define PTR32(x) ((void *)((uintptr_t)(x)))
|
||||
#define PTR32(x) ((void *)((uintptr_t)(x)))
|
||||
|
||||
/* the physical address of the start of the memblock heap.
|
||||
* this is an arbirary value; the heap can start anywhere in memory.
|
||||
* any reserved areas of memory (the kernel, bsp, bios data, etc) are
|
||||
* automatically taken into account.
|
||||
* HOWEVER, this value will dictate how much physical memory is required for
|
||||
* the kernel to boot successfully.
|
||||
* the value of 16MiB (0x1000000) means that all heap allocations will be
|
||||
* above 16MiB, leaving the area below free for DMA operations.
|
||||
* this value CAN be reduced all the way to zero to minimise the amount of
|
||||
* memory required to boot, but this may leave you with no DMA memory available.
|
||||
*/
|
||||
#define MEMBLOCK_HEAP_START 0x1000000
|
||||
|
||||
static ml_cpu_block g_bootstrap_cpu = {0};
|
||||
|
||||
@@ -33,7 +46,7 @@ static void bootstrap_cpu_init(void)
|
||||
|
||||
static void early_vm_init(uintptr_t reserve_end)
|
||||
{
|
||||
uintptr_t alloc_start = VM_KERNEL_VOFFSET;
|
||||
uintptr_t alloc_start = VM_KERNEL_VOFFSET + MEMBLOCK_HEAP_START;
|
||||
/* boot code mapped 2 GiB of memory from
|
||||
VM_KERNEL_VOFFSET */
|
||||
uintptr_t alloc_end = VM_KERNEL_VOFFSET + 0x7fffffff;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include <mango/init.h>
|
||||
#include <kernel/init.h>
|
||||
|
||||
extern char __initcall0_start[];
|
||||
extern char __initcall1_start[];
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#include <arch/irq.h>
|
||||
#include <arch/msr.h>
|
||||
#include <arch/ports.h>
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <mango/machine/irq.h>
|
||||
#include <mango/panic.h>
|
||||
#include <mango/sched.h>
|
||||
#include <mango/syscall.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <kernel/machine/irq.h>
|
||||
#include <kernel/panic.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/syscall.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#define MAX_ISR_HANDLERS 16
|
||||
@@ -31,7 +31,7 @@ static uintptr_t int_entry_points[NR_IDT_ENTRIES];
|
||||
static void set_syscall_gate(uintptr_t rip)
|
||||
{
|
||||
uint64_t user_cs = 0x13;
|
||||
uint64_t kernel_cs = 0x8;
|
||||
uint64_t kernel_cs = 0x08;
|
||||
|
||||
uintptr_t star_reg = 0xC0000081;
|
||||
uintptr_t lstar_reg = 0xC0000082;
|
||||
@@ -97,11 +97,7 @@ static void pf_handler(struct ml_cpu_context *regs)
|
||||
|
||||
virt_addr_t fault_ptr = pf_faultptr();
|
||||
|
||||
kern_status_t status = KERN_FATAL_ERROR;
|
||||
|
||||
if (regs->err_no & PF_USER) {
|
||||
status = pmap_handle_fault(fault_ptr, fault_flags);
|
||||
}
|
||||
kern_status_t status = pmap_handle_fault(fault_ptr, fault_flags);
|
||||
|
||||
if (status == KERN_OK) {
|
||||
return;
|
||||
@@ -206,7 +202,7 @@ void irq_dispatch(struct ml_cpu_context *regs)
|
||||
void syscall_dispatch(struct ml_cpu_context *regs)
|
||||
{
|
||||
unsigned int sysid = regs->rax;
|
||||
virt_addr_t syscall_impl = syscall_get_func(sysid);
|
||||
virt_addr_t syscall_impl = syscall_get_function(sysid);
|
||||
|
||||
if (syscall_impl == 0) {
|
||||
regs->rax = KERN_UNSUPPORTED;
|
||||
@@ -226,6 +222,7 @@ void syscall_dispatch(struct ml_cpu_context *regs)
|
||||
|
||||
SYSCALL_SIGNATURE(fn) = (SYSCALL_SIGNATURE())syscall_impl;
|
||||
|
||||
ml_int_enable();
|
||||
regs->rax
|
||||
= fn(regs->rdi,
|
||||
regs->rsi,
|
||||
@@ -235,6 +232,7 @@ void syscall_dispatch(struct ml_cpu_context *regs)
|
||||
regs->r9,
|
||||
regs->r13,
|
||||
regs->r14);
|
||||
ml_int_disable();
|
||||
}
|
||||
|
||||
void hook_irq(enum irq_vector vec, struct irq_hook *hook)
|
||||
|
||||
@@ -332,80 +332,115 @@ IRQ 223, 255
|
||||
|
||||
isr_common_stub:
|
||||
PUSH_REGS
|
||||
|
||||
|
||||
# When ISR occurs in Ring 3, CPU sets %ss (and other non-code selectors)
|
||||
# to 0.
|
||||
mov %ss, %ax
|
||||
cmp $0, %ax
|
||||
jne isr_skipgs1
|
||||
|
||||
mov $0x10, %ax
|
||||
mov %ax, %ss
|
||||
swapgs
|
||||
|
||||
isr_skipgs1:
|
||||
mov %rsp, %rdi
|
||||
call isr_dispatch
|
||||
|
||||
|
||||
POP_REGS
|
||||
add $16, %rsp
|
||||
|
||||
cmpq $0x1b, 32(%rsp)
|
||||
jne isr_skipgs2
|
||||
|
||||
swapgs
|
||||
|
||||
isr_skipgs2:
|
||||
|
||||
iretq
|
||||
|
||||
|
||||
|
||||
.global irq_common_stub
|
||||
.type irq_common_stub, @function
|
||||
|
||||
irq_common_stub:
|
||||
PUSH_REGS
|
||||
|
||||
|
||||
# When IRQ occurs in Ring 3, CPU sets %ss (and other non-code selectors)
|
||||
# to 0.
|
||||
mov %ss, %ax
|
||||
cmp $0, %ax
|
||||
jne irq_skipgs1
|
||||
|
||||
mov $0x10, %ax
|
||||
mov %ax, %ss
|
||||
swapgs
|
||||
|
||||
irq_skipgs1:
|
||||
mov %rsp, %rdi
|
||||
call irq_dispatch
|
||||
|
||||
|
||||
|
||||
POP_REGS
|
||||
add $16, %rsp
|
||||
|
||||
cmpq $0x1b, 32(%rsp)
|
||||
jne isr_skipgs2
|
||||
|
||||
swapgs
|
||||
|
||||
irq_skipgs2:
|
||||
|
||||
iretq
|
||||
|
||||
|
||||
|
||||
.global syscall_gate
|
||||
.type syscall_gate, @function
|
||||
|
||||
|
||||
.extern syscall_dispatch
|
||||
.type syscall_dispatch, @function
|
||||
|
||||
|
||||
syscall_gate:
|
||||
swapgs
|
||||
movq %rsp, %gs:20 # GS+20 = rsp2 in the current TSS block (user stack storage)
|
||||
movq %gs:4, %rsp # GS+4 = rsp0 in the current TSS block (per-thread kstack)
|
||||
|
||||
# start building a pf_cpu_context
|
||||
movq %rsp, %gs:94 # GS+20 = rsp2 in the current TSS block (user stack storage)
|
||||
movq %gs:78, %rsp # GS+4 = rsp0 in the current TSS block (per-thread kstack)
|
||||
|
||||
# start building a ml_cpu_context
|
||||
pushq $0x1b
|
||||
pushq %gs:20
|
||||
pushq %gs:94
|
||||
push %r11
|
||||
push $0x23
|
||||
push %rcx
|
||||
|
||||
|
||||
pushq $0
|
||||
pushq $0x80
|
||||
|
||||
|
||||
PUSH_REGS
|
||||
|
||||
|
||||
mov %rsp, %rdi
|
||||
|
||||
# switch back to user gs while in syscall_dispatch. Interrupts are enabled in syscall_dispatch,
|
||||
# and if the task gets pre-empted, the incoming task will expect %gs to have its usermode value.
|
||||
swapgs
|
||||
|
||||
|
||||
call syscall_dispatch
|
||||
|
||||
|
||||
POP_REGS
|
||||
|
||||
|
||||
add $16, %rsp
|
||||
pop %rcx
|
||||
add $8, %rsp
|
||||
pop %r11
|
||||
add $16, %rsp
|
||||
|
||||
|
||||
movq %gs:94, %rsp # GS+20 = rsp2 in the current TSS block
|
||||
|
||||
swapgs
|
||||
movq %gs:20, %rsp # GS+20 = rsp2 in the current TSS block
|
||||
swapgs
|
||||
|
||||
|
||||
# back to usermode
|
||||
sysretq
|
||||
|
||||
|
||||
|
||||
.global pf_faultptr
|
||||
.type pf_faultptr, @function
|
||||
pf_faultptr:
|
||||
mov %cr2, %rax
|
||||
ret
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#include <arch/irq.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <mango/machine/panic.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/vm.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <kernel/machine/panic.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/vm.h>
|
||||
|
||||
#define R_CF 0
|
||||
#define R_PF 2
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#include <arch/irq.h>
|
||||
#include <arch/ports.h>
|
||||
#include <mango/clock.h>
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/printk.h>
|
||||
#include <kernel/clock.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/printk.h>
|
||||
|
||||
#define PIT_COUNTER0 0x40
|
||||
#define PIT_CMD 0x43
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/memblock.h>
|
||||
#include <mango/pmap.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/pmap.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/vm-object.h>
|
||||
#include <kernel/vm-region.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
#include <mango/vm-object.h>
|
||||
#include <mango/vm-region.h>
|
||||
#include <mango/vm.h>
|
||||
|
||||
/* some helpful datasize constants */
|
||||
#define C_1GiB 0x40000000ULL
|
||||
@@ -43,7 +43,7 @@ static pmap_t alloc_pmap(void)
|
||||
return vm_virt_to_phys(p);
|
||||
}
|
||||
|
||||
static pte_t make_pte(pfn_t pfn, enum vm_prot prot, enum page_size size)
|
||||
static pte_t make_pte(pfn_t pfn, vm_prot_t prot, enum page_size size)
|
||||
{
|
||||
pte_t v = pfn;
|
||||
|
||||
@@ -139,7 +139,7 @@ static kern_status_t do_pmap_add(
|
||||
pmap_t pmap,
|
||||
virt_addr_t pv,
|
||||
pfn_t pfn,
|
||||
enum vm_prot prot,
|
||||
vm_prot_t prot,
|
||||
enum page_size size)
|
||||
{
|
||||
unsigned int pml4t_index = BAD_INDEX, pdpt_index = BAD_INDEX,
|
||||
@@ -364,14 +364,19 @@ kern_status_t pmap_handle_fault(
|
||||
struct task *task = current_task();
|
||||
struct vm_region *space = task->t_address_space;
|
||||
|
||||
return vm_region_demand_map(space, fault_addr, flags);
|
||||
unsigned long lock_flags;
|
||||
vm_region_lock_irqsave(space, &lock_flags);
|
||||
kern_status_t status = vm_region_demand_map(space, fault_addr, flags);
|
||||
vm_region_unlock_irqrestore(space, lock_flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
kern_status_t pmap_add(
|
||||
pmap_t pmap,
|
||||
virt_addr_t p,
|
||||
pfn_t pfn,
|
||||
enum vm_prot prot,
|
||||
vm_prot_t prot,
|
||||
enum pmap_flags flags)
|
||||
{
|
||||
enum page_size ps = PS_4K;
|
||||
@@ -387,18 +392,18 @@ kern_status_t pmap_add_block(
|
||||
virt_addr_t p,
|
||||
pfn_t pfn,
|
||||
size_t len,
|
||||
enum vm_prot prot,
|
||||
vm_prot_t prot,
|
||||
enum pmap_flags flags)
|
||||
{
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t pmap_remove(pmap_t pmap, void *p)
|
||||
kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p)
|
||||
{
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len)
|
||||
kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len)
|
||||
{
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#include <arch/irq.h>
|
||||
#include <arch/ports.h>
|
||||
#include <arch/serial.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/printk.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/printk.h>
|
||||
|
||||
#define COM1 0x3F8
|
||||
#define COM2 0x2F8
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <mango/machine/thread.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <kernel/machine/thread.h>
|
||||
|
||||
#define MAX_REG_ARGS 6
|
||||
#define REG_ARG_0 rdi
|
||||
#define REG_ARG_1 rsi
|
||||
#define REG_ARG_2 rdx
|
||||
#define REG_ARG_3 rcx
|
||||
#define REG_ARG_4 r8
|
||||
#define REG_ARG_5 r9
|
||||
|
||||
/* this is the context information restored by ml_thread_switch.
|
||||
* since ml_thread_switch only jumps to kernel-mode, IRETQ isn't used,
|
||||
@@ -23,19 +31,49 @@ void ml_thread_prepare_kernel_context(uintptr_t ip, uintptr_t *sp)
|
||||
ctx->rfl = 0x202;
|
||||
}
|
||||
|
||||
extern void ml_thread_prepare_user_context(
|
||||
extern kern_status_t ml_thread_prepare_user_context(
|
||||
virt_addr_t ip,
|
||||
virt_addr_t user_sp,
|
||||
virt_addr_t *kernel_sp)
|
||||
virt_addr_t *kernel_sp,
|
||||
const uintptr_t *args,
|
||||
size_t nr_args)
|
||||
{
|
||||
(*kernel_sp) -= sizeof(struct ml_cpu_context);
|
||||
|
||||
struct ml_cpu_context *ctx = (struct ml_cpu_context *)(*kernel_sp);
|
||||
memset(ctx, 0x0, sizeof *ctx);
|
||||
ctx->rip = ip;
|
||||
ctx->rsp = user_sp;
|
||||
ctx->ss = 0x23;
|
||||
ctx->cs = 0x1B;
|
||||
ctx->ss = 0x1b;
|
||||
ctx->cs = 0x23;
|
||||
ctx->rflags = 0x202;
|
||||
ctx->rdi = 0; // arg 0
|
||||
ctx->rsi = 0; // arg 1
|
||||
|
||||
for (size_t i = 0; i < nr_args; i++) {
|
||||
switch (i) {
|
||||
case 0:
|
||||
ctx->REG_ARG_0 = args[i];
|
||||
break;
|
||||
case 1:
|
||||
ctx->REG_ARG_1 = args[i];
|
||||
break;
|
||||
case 2:
|
||||
ctx->REG_ARG_2 = args[i];
|
||||
break;
|
||||
case 3:
|
||||
ctx->REG_ARG_3 = args[i];
|
||||
break;
|
||||
case 4:
|
||||
ctx->REG_ARG_4 = args[i];
|
||||
break;
|
||||
case 5:
|
||||
ctx->REG_ARG_5 = args[i];
|
||||
break;
|
||||
default:
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
}
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
@@ -73,4 +73,5 @@ ml_thread_switch_user:
|
||||
pop %rax
|
||||
|
||||
add $16, %rsp
|
||||
swapgs
|
||||
iretq
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#include "arch/msr.h"
|
||||
|
||||
#include <arch/gdt.h>
|
||||
#include <arch/tss.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <kernel/libc/string.h>
|
||||
|
||||
static void tss_flush(int index)
|
||||
{
|
||||
@@ -22,9 +20,6 @@ void tss_init(struct tss *tss, struct tss_ptr *ptr)
|
||||
void tss_load(struct tss *tss)
|
||||
{
|
||||
tss_flush(TSS_GDT_INDEX);
|
||||
|
||||
uintptr_t kernel_gs_base_reg = 0xC0000102;
|
||||
wrmsr(kernel_gs_base_reg, (uintptr_t)tss);
|
||||
}
|
||||
|
||||
virt_addr_t tss_get_kstack(struct tss *tss)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#include <arch/irq.h>
|
||||
#include <arch/ports.h>
|
||||
#include <arch/serial.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/machine/vm.h>
|
||||
#include <mango/printk.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/machine/vm.h>
|
||||
#include <kernel/printk.h>
|
||||
|
||||
struct vga_console {
|
||||
uint16_t *vga_framebuffer;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <mango/bitmap.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <kernel/bitmap.h>
|
||||
#include <kernel/libc/string.h>
|
||||
|
||||
void bitmap_zero(unsigned long *map, unsigned long nbits)
|
||||
{
|
||||
|
||||
@@ -57,7 +57,7 @@
|
||||
provide a comparator function.
|
||||
*/
|
||||
|
||||
#include <mango/btree.h>
|
||||
#include <kernel/btree.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/queue.h>
|
||||
|
||||
size_t queue_length(struct queue *q)
|
||||
{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#include <mango/ringbuffer.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/ringbuffer.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
size_t ringbuffer_unread(struct ringbuffer *ring_buffer)
|
||||
{
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef MANGO_ARG_H_
|
||||
#define MANGO_ARG_H_
|
||||
#ifndef KERNEL_ARG_H_
|
||||
#define KERNEL_ARG_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <mango/status.h>
|
||||
|
||||
#define CMDLINE_MAX 4096
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_BITMAP_H_
|
||||
#define MANGO_BITMAP_H_
|
||||
#ifndef KERNEL_BITMAP_H_
|
||||
#define KERNEL_BITMAP_H_
|
||||
|
||||
#include <stdbool.h>
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#ifndef MANGO_BSP_H_
|
||||
#define MANGO_BSP_H_
|
||||
#ifndef KERNEL_BSP_H_
|
||||
#define KERNEL_BSP_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
#include <kernel/types.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -20,99 +20,130 @@
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
|
||||
#ifndef MANGO_BTREE_H_
|
||||
#define MANGO_BTREE_H_
|
||||
#ifndef KERNEL_BTREE_H_
|
||||
#define KERNEL_BTREE_H_
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* if your custom structure contains a struct btree_node (i.e. it can be part of a btree),
|
||||
you can use this macro to convert a struct btree_node* to a your_type*
|
||||
/* if your custom structure contains a struct btree_node (i.e. it can be part of
|
||||
a btree), you can use this macro to convert a struct btree_node* to a
|
||||
your_type*
|
||||
|
||||
@param t the name of your custom type (something that can be passed to offsetof)
|
||||
@param m the name of the struct btree_node member variable within your custom type.
|
||||
@param v the struct btree_node pointer that you wish to convert. if this is NULL, NULL will be returned.
|
||||
@param t the name of your custom type (something that can be passed to
|
||||
offsetof)
|
||||
@param m the name of the struct btree_node member variable within your custom
|
||||
type.
|
||||
@param v the struct btree_node pointer that you wish to convert. if this is
|
||||
NULL, NULL will be returned.
|
||||
*/
|
||||
#define BTREE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
#define BTREE_CONTAINER(t, m, v) \
|
||||
((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
|
||||
/* defines a simple node insertion function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
this function assumes that your nodes have simple integer keys that can be
|
||||
compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
struct btree_node base;
|
||||
struct btree_node base;
|
||||
}
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
You would use the following call to generate an insert function for a tree
|
||||
with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key, my_tree_node_insert);
|
||||
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key,
|
||||
my_tree_node_insert);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node);
|
||||
static void my_tree_node_insert(struct btree *tree, struct my_tree_node
|
||||
*node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
|
||||
@param container_node_member the name of the struct btree_node member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param node_type your custom tree node type. usually a structure that
|
||||
contains a struct btree_node member.
|
||||
@param container_node_member the name of the struct btree_node member
|
||||
variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your
|
||||
custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_INSERT(node_type, container_node_member, container_key_member, function_name) \
|
||||
void function_name(struct btree *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
struct btree_node *next = NULL; \
|
||||
\
|
||||
if (node->container_key_member > cur_node->container_key_member) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (node->container_key_member < cur_node->container_key_member) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
#define BTREE_DEFINE_SIMPLE_INSERT( \
|
||||
node_type, \
|
||||
container_node_member, \
|
||||
container_key_member, \
|
||||
function_name) \
|
||||
void function_name(struct btree *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup( \
|
||||
tree, \
|
||||
&node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER( \
|
||||
node_type, \
|
||||
container_node_member, \
|
||||
cur); \
|
||||
struct btree_node *next = NULL; \
|
||||
\
|
||||
if (node->container_key_member \
|
||||
> cur_node->container_key_member) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right( \
|
||||
cur, \
|
||||
&node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if ( \
|
||||
node->container_key_member \
|
||||
< cur_node->container_key_member) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left( \
|
||||
cur, \
|
||||
&node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a node insertion function.
|
||||
this function should be used for trees with complex node keys that cannot be directly compared.
|
||||
a comparator for your keys must be supplied.
|
||||
this function should be used for trees with complex node keys that cannot be
|
||||
directly compared. a comparator for your keys must be supplied.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
complex_key_t key;
|
||||
struct btree_node base;
|
||||
struct btree_node base;
|
||||
}
|
||||
|
||||
You would need to define a comparator function or macro with the following signature:
|
||||
You would need to define a comparator function or macro with the following
|
||||
signature:
|
||||
|
||||
int my_comparator(struct my_tree_node *a, struct my_tree_node *b);
|
||||
|
||||
@@ -122,102 +153,136 @@ extern "C" {
|
||||
return 0 if a == b
|
||||
return 1 if a > b
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
You would use the following call to generate an insert function for a tree
|
||||
with this node type:
|
||||
|
||||
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert, my_comparator);
|
||||
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert,
|
||||
my_comparator);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node);
|
||||
static void my_tree_node_insert(struct btree *tree, struct my_tree_node
|
||||
*node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
|
||||
@param container_node_member the name of the struct btree_node member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param node_type your custom tree node type. usually a structure that
|
||||
contains a struct btree_node member.
|
||||
@param container_node_member the name of the struct btree_node member
|
||||
variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your
|
||||
custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
@param comparator the name of a comparator function or functional-macro that conforms to the
|
||||
requirements listed above.
|
||||
@param comparator the name of a comparator function or functional-macro that
|
||||
conforms to the requirements listed above.
|
||||
*/
|
||||
#define BTREE_DEFINE_INSERT(node_type, container_node_member, container_key_member, function_name, comparator) \
|
||||
void function_name(struct btree *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
struct btree_node *next = NULL; \
|
||||
int cmp = comparator(node, cur_node); \
|
||||
\
|
||||
if (cmp == 1) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (cmp == -1) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
#define BTREE_DEFINE_INSERT( \
|
||||
node_type, \
|
||||
container_node_member, \
|
||||
container_key_member, \
|
||||
function_name, \
|
||||
comparator) \
|
||||
void function_name(struct btree *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup( \
|
||||
tree, \
|
||||
&node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER( \
|
||||
node_type, \
|
||||
container_node_member, \
|
||||
cur); \
|
||||
struct btree_node *next = NULL; \
|
||||
int cmp = comparator(node, cur_node); \
|
||||
\
|
||||
if (cmp == 1) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right( \
|
||||
cur, \
|
||||
&node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (cmp == -1) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left( \
|
||||
cur, \
|
||||
&node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a simple tree search function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
this function assumes that your nodes have simple integer keys that can be
|
||||
compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
struct btree_node base;
|
||||
struct btree_node base;
|
||||
}
|
||||
|
||||
You would use the following call to generate a search function for a tree with this node type:
|
||||
You would use the following call to generate a search function for a tree
|
||||
with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key, my_tree_node_get);
|
||||
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key,
|
||||
my_tree_node_get);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static struct my_tree_node *my_tree_node_get(struct btree *tree, int key);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
|
||||
@param key_type the type name of the key embedded in your custom tree node type. this type must be
|
||||
compatible with the builtin comparison operators.
|
||||
@param container_node_member the name of the struct btree_node member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param node_type your custom tree node type. usually a structure that
|
||||
contains a struct btree_node member.
|
||||
@param key_type the type name of the key embedded in your custom tree node
|
||||
type. this type must be compatible with the builtin comparison operators.
|
||||
@param container_node_member the name of the struct btree_node member
|
||||
variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your
|
||||
custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_GET(node_type, key_type, container_node_member, container_key_member, function_name) \
|
||||
node_type *function_name(struct btree *tree, key_type key) \
|
||||
{ \
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (cur) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
if (key > cur_node->container_key_member) { \
|
||||
cur = btree_right(cur); \
|
||||
} else if (key < cur_node->container_key_member) { \
|
||||
cur = btree_left(cur); \
|
||||
} else { \
|
||||
return cur_node; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return NULL; \
|
||||
#define BTREE_DEFINE_SIMPLE_GET( \
|
||||
node_type, \
|
||||
key_type, \
|
||||
container_node_member, \
|
||||
container_key_member, \
|
||||
function_name) \
|
||||
node_type *function_name(struct btree *tree, key_type key) \
|
||||
{ \
|
||||
struct btree_node *cur = tree->b_root; \
|
||||
while (cur) { \
|
||||
node_type *cur_node = BTREE_CONTAINER( \
|
||||
node_type, \
|
||||
container_node_member, \
|
||||
cur); \
|
||||
if (key > cur_node->container_key_member) { \
|
||||
cur = btree_right(cur); \
|
||||
} else if (key < cur_node->container_key_member) { \
|
||||
cur = btree_left(cur); \
|
||||
} else { \
|
||||
return cur_node; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
/* perform an in-order traversal of a binary tree
|
||||
@@ -230,7 +295,7 @@ extern "C" {
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
struct btree_node base;
|
||||
struct btree_node base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
@@ -241,15 +306,23 @@ extern "C" {
|
||||
|
||||
btree_foreach (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_type the type name of the iterator variable. this should be the
|
||||
tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the struct btree_node member variable within the tree node type.
|
||||
@param node_member the name of the struct btree_node member variable within
|
||||
the tree node type.
|
||||
*/
|
||||
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_first(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_next(&((iter_name)->node_member))))
|
||||
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER( \
|
||||
iter_type, \
|
||||
node_member, \
|
||||
btree_first(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER( \
|
||||
iter_type, \
|
||||
node_member, \
|
||||
btree_next(&((iter_name)->node_member))))
|
||||
|
||||
/* perform an reverse in-order traversal of a binary tree
|
||||
|
||||
@@ -261,7 +334,7 @@ extern "C" {
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
struct btree_node base;
|
||||
struct btree_node base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
@@ -272,35 +345,43 @@ extern "C" {
|
||||
|
||||
btree_foreach_r (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_type the type name of the iterator variable. this should be the
|
||||
tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the struct btree_node member variable within the tree node type.
|
||||
@param node_member the name of the struct btree_node member variable within
|
||||
the tree node type.
|
||||
*/
|
||||
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_prev(&((iter_name)->node_member))))
|
||||
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name \
|
||||
= BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER( \
|
||||
iter_type, \
|
||||
node_member, \
|
||||
btree_prev(&((iter_name)->node_member))))
|
||||
|
||||
/* binary tree nodes. this *cannot* be used directly. you need to define a custom node type
|
||||
that contains a member variable of type struct btree_node.
|
||||
/* binary tree nodes. this *cannot* be used directly. you need to define a
|
||||
custom node type that contains a member variable of type struct btree_node.
|
||||
|
||||
you would then use the supplied macros to define functions to manipulate your custom binary tree.
|
||||
you would then use the supplied macros to define functions to manipulate your
|
||||
custom binary tree.
|
||||
*/
|
||||
struct btree_node {
|
||||
struct btree_node *b_parent, *b_left, *b_right;
|
||||
unsigned short b_height;
|
||||
};
|
||||
|
||||
/* binary tree. unlike struct btree_node, you can define variables of type struct btree. */
|
||||
/* binary tree. unlike struct btree_node, you can define variables of type
|
||||
* struct btree. */
|
||||
struct btree {
|
||||
struct btree_node *b_root;
|
||||
};
|
||||
|
||||
/* re-balance a binary tree after an insertion operation.
|
||||
|
||||
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or similar,
|
||||
this function will automatically called for you.
|
||||
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or
|
||||
similar, this function will automatically called for you.
|
||||
|
||||
@param tree the tree to re-balance.
|
||||
@param node the node that was just inserted into the tree.
|
||||
@@ -316,29 +397,42 @@ extern void btree_delete(struct btree *tree, struct btree_node *node);
|
||||
|
||||
/* get the first node in a binary tree.
|
||||
|
||||
this will be the node with the smallest key (i.e. the node that is furthest-left from the root)
|
||||
this will be the node with the smallest key (i.e. the node that is
|
||||
furthest-left from the root)
|
||||
*/
|
||||
extern struct btree_node *btree_first(struct btree *tree);
|
||||
|
||||
/* get the last node in a binary tree.
|
||||
|
||||
this will be the node with the largest key (i.e. the node that is furthest-right from the root)
|
||||
this will be the node with the largest key (i.e. the node that is
|
||||
furthest-right from the root)
|
||||
*/
|
||||
extern struct btree_node *btree_last(struct btree *tree);
|
||||
/* for any binary tree node, this function returns the node with the next-largest key value */
|
||||
/* for any binary tree node, this function returns the node with the
|
||||
* next-largest key value */
|
||||
extern struct btree_node *btree_next(struct btree_node *node);
|
||||
/* for any binary tree node, this function returns the node with the next-smallest key value */
|
||||
/* for any binary tree node, this function returns the node with the
|
||||
* next-smallest key value */
|
||||
extern struct btree_node *btree_prev(struct btree_node *node);
|
||||
|
||||
static inline bool btree_empty(const struct btree *tree)
|
||||
{
|
||||
return tree->b_root == NULL;
|
||||
}
|
||||
|
||||
/* sets `child` as the immediate left-child of `parent` */
|
||||
static inline void btree_put_left(struct btree_node *parent, struct btree_node *child)
|
||||
static inline void btree_put_left(
|
||||
struct btree_node *parent,
|
||||
struct btree_node *child)
|
||||
{
|
||||
parent->b_left = child;
|
||||
child->b_parent = parent;
|
||||
}
|
||||
|
||||
/* sets `child` as the immediate right-child of `parent` */
|
||||
static inline void btree_put_right(struct btree_node *parent, struct btree_node *child)
|
||||
static inline void btree_put_right(
|
||||
struct btree_node *parent,
|
||||
struct btree_node *child)
|
||||
{
|
||||
parent->b_right = child;
|
||||
child->b_parent = parent;
|
||||
55
include/kernel/channel.h
Normal file
55
include/kernel/channel.h
Normal file
@@ -0,0 +1,55 @@
|
||||
#ifndef KERNEL_CHANNEL_H_
|
||||
#define KERNEL_CHANNEL_H_
|
||||
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
struct msg;
|
||||
|
||||
struct channel {
|
||||
struct object c_base;
|
||||
unsigned int c_id;
|
||||
struct waitqueue c_wq;
|
||||
struct btree c_msg;
|
||||
struct btree_node c_node;
|
||||
};
|
||||
|
||||
extern kern_status_t channel_type_init(void);
|
||||
extern struct channel *channel_cast(struct object *obj);
|
||||
|
||||
extern struct channel *channel_create(void);
|
||||
|
||||
extern kern_status_t channel_enqueue_msg(
|
||||
struct channel *channel,
|
||||
struct msg *msg);
|
||||
|
||||
extern kern_status_t channel_recv_msg(
|
||||
struct channel *channel,
|
||||
kern_msg_t *out_msg,
|
||||
unsigned long *irq_flags);
|
||||
extern kern_status_t channel_reply_msg(
|
||||
struct channel *channel,
|
||||
msgid_t id,
|
||||
const kern_msg_t *reply,
|
||||
unsigned long *irq_flags);
|
||||
|
||||
extern kern_status_t channel_read_msg(
|
||||
struct channel *channel,
|
||||
msgid_t msg,
|
||||
size_t offset,
|
||||
struct vm_region *dest_region,
|
||||
const kern_iovec_t *dest_iov,
|
||||
size_t dest_iov_count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t channel_write_msg(
|
||||
struct channel *channel,
|
||||
msgid_t msg,
|
||||
size_t offset,
|
||||
struct vm_region *src_region,
|
||||
const kern_iovec_t *src_iov,
|
||||
size_t src_iov_count,
|
||||
size_t *nr_written);
|
||||
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(channel, c_base)
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_CLOCK_H_
|
||||
#define MANGO_CLOCK_H_
|
||||
#ifndef KERNEL_CLOCK_H_
|
||||
#define KERNEL_CLOCK_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_COMPILER_H_
|
||||
#define MANGO_COMPILER_H_
|
||||
#ifndef KERNEL_COMPILER_H_
|
||||
#define KERNEL_COMPILER_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
template <typename T>
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_CONSOLE_H_
|
||||
#define MANGO_CONSOLE_H_
|
||||
#ifndef KERNEL_CONSOLE_H_
|
||||
#define KERNEL_CONSOLE_H_
|
||||
|
||||
/* The console system
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
representing a serial port may allow both sending AND receiving over the
|
||||
port.
|
||||
*/
|
||||
#include <mango/queue.h>
|
||||
#include <mango/locks.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,10 +1,10 @@
|
||||
#ifndef MANGO_CPU_H_
|
||||
#define MANGO_CPU_H_
|
||||
#ifndef KERNEL_CPU_H_
|
||||
#define KERNEL_CPU_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <stdint.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_FB_H_
|
||||
#define MANGO_FB_H_
|
||||
#ifndef KERNEL_FB_H_
|
||||
#define KERNEL_FB_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_FLAGS_H_
|
||||
#define MANGO_FLAGS_H_
|
||||
#ifndef KERNEL_FLAGS_H_
|
||||
#define KERNEL_FLAGS_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
#ifndef MANGO_HANDLE_H_
|
||||
#define MANGO_HANDLE_H_
|
||||
#ifndef KERNEL_HANDLE_H_
|
||||
#define KERNEL_HANDLE_H_
|
||||
|
||||
#include <mango/bitmap.h>
|
||||
#include <kernel/bitmap.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* subtract 32 bytes to account for the handle bitmap */
|
||||
@@ -11,15 +13,16 @@
|
||||
|
||||
typedef uint32_t kern_handle_t;
|
||||
|
||||
typedef uintptr_t handle_flags_t;
|
||||
|
||||
struct task;
|
||||
struct object;
|
||||
struct vm_region;
|
||||
struct handle_list;
|
||||
|
||||
struct handle {
|
||||
union {
|
||||
struct object *h_object;
|
||||
uint64_t __x;
|
||||
};
|
||||
|
||||
uint64_t h_flags;
|
||||
struct object *h_object;
|
||||
handle_flags_t h_flags;
|
||||
};
|
||||
|
||||
struct handle_table {
|
||||
@@ -46,11 +49,21 @@ extern kern_status_t handle_table_alloc_handle(
|
||||
struct handle_table *tab,
|
||||
struct handle **out_slot,
|
||||
kern_handle_t *out_handle);
|
||||
extern void handle_table_free_handle(
|
||||
extern kern_status_t handle_table_free_handle(
|
||||
struct handle_table *tab,
|
||||
kern_handle_t handle);
|
||||
extern struct handle *handle_table_get_handle(
|
||||
struct handle_table *tab,
|
||||
kern_handle_t handle);
|
||||
|
||||
extern kern_status_t handle_table_transfer(
|
||||
struct vm_region *dst_region,
|
||||
struct handle_table *dst,
|
||||
kern_msg_handle_t *dst_handles,
|
||||
size_t dst_handles_max,
|
||||
struct vm_region *src_region,
|
||||
struct handle_table *src,
|
||||
kern_msg_handle_t *src_handles,
|
||||
size_t src_handles_count);
|
||||
|
||||
#endif
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef MANGO_INIT_H_
|
||||
#define MANGO_INIT_H_
|
||||
#ifndef KERNEL_INIT_H_
|
||||
#define KERNEL_INIT_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/machine/init.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/machine/init.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef MANGO_INPUT_H_
|
||||
#define MANGO_INPUT_H_
|
||||
#ifndef KERNEL_INPUT_H_
|
||||
#define KERNEL_INPUT_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <mango/status.h>
|
||||
|
||||
enum input_event_hook_flags {
|
||||
31
include/kernel/iovec.h
Normal file
31
include/kernel/iovec.h
Normal file
@@ -0,0 +1,31 @@
|
||||
#ifndef KERNEL_IOVEC_H_
|
||||
#define KERNEL_IOVEC_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <stddef.h>
|
||||
|
||||
struct iovec_iterator {
|
||||
/* if this is set, we are iterating over a list of iovecs stored in
|
||||
* userspace, and must go through this region to retrieve the data. */
|
||||
struct vm_region *it_region;
|
||||
const kern_iovec_t *it_vecs;
|
||||
size_t it_nr_vecs;
|
||||
size_t it_vec_ptr;
|
||||
|
||||
virt_addr_t it_base;
|
||||
size_t it_len;
|
||||
};
|
||||
|
||||
extern void iovec_iterator_begin(
|
||||
struct iovec_iterator *it,
|
||||
const kern_iovec_t *vecs,
|
||||
size_t nr_vecs);
|
||||
extern void iovec_iterator_begin_user(
|
||||
struct iovec_iterator *it,
|
||||
struct vm_region *address_space,
|
||||
const kern_iovec_t *vecs,
|
||||
size_t nr_vecs);
|
||||
|
||||
extern void iovec_iterator_seek(struct iovec_iterator *it, size_t nr_bytes);
|
||||
|
||||
#endif
|
||||
61
include/kernel/locks.h
Normal file
61
include/kernel/locks.h
Normal file
@@ -0,0 +1,61 @@
|
||||
#ifndef KERNEL_LOCKS_H_
|
||||
#define KERNEL_LOCKS_H_
|
||||
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/machine/hwlock.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef __aligned(8) ml_hwlock_t spin_lock_t;
|
||||
|
||||
#define SPIN_LOCK_INIT ML_HWLOCK_INIT
|
||||
|
||||
#define spin_lock(lck) ml_hwlock_lock(lck);
|
||||
#define spin_unlock(lck) ml_hwlock_unlock(lck);
|
||||
|
||||
#define spin_lock_irq(lck) ml_hwlock_lock_irq(lck);
|
||||
#define spin_unlock_irq(lck) ml_hwlock_unlock_irq(lck);
|
||||
|
||||
#define spin_lock_irqsave(lck, flags) ml_hwlock_lock_irqsave(lck, flags);
|
||||
#define spin_unlock_irqrestore(lck, flags) \
|
||||
ml_hwlock_unlock_irqrestore(lck, flags);
|
||||
|
||||
static inline void spin_lock_pair_irqsave(
|
||||
spin_lock_t *a,
|
||||
spin_lock_t *b,
|
||||
unsigned long *flags)
|
||||
{
|
||||
if (a == b) {
|
||||
spin_lock_irqsave(a, flags);
|
||||
} else if (a < b) {
|
||||
spin_lock_irqsave(a, flags);
|
||||
spin_lock(b);
|
||||
} else {
|
||||
spin_lock_irqsave(b, flags);
|
||||
spin_lock(a);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void spin_unlock_pair_irqrestore(
|
||||
spin_lock_t *a,
|
||||
spin_lock_t *b,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (a == b) {
|
||||
spin_unlock_irqrestore(a, flags);
|
||||
} else if (a < b) {
|
||||
spin_unlock(b);
|
||||
spin_unlock_irqrestore(a, flags);
|
||||
} else {
|
||||
spin_unlock(a);
|
||||
spin_unlock_irqrestore(b, flags);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -19,11 +19,11 @@
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
#ifndef MANGO_MEMBLOCK_H_
|
||||
#define MANGO_MEMBLOCK_H_
|
||||
#ifndef KERNEL_MEMBLOCK_H_
|
||||
#define KERNEL_MEMBLOCK_H_
|
||||
|
||||
#include <kernel/types.h>
|
||||
#include <limits.h>
|
||||
#include <mango/types.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -338,6 +338,8 @@ extern void __next_memory_region(
|
||||
phys_addr_t start,
|
||||
phys_addr_t end);
|
||||
|
||||
extern void memblock_dump(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
29
include/kernel/msg.h
Normal file
29
include/kernel/msg.h
Normal file
@@ -0,0 +1,29 @@
|
||||
#ifndef KERNEL_MSG_H_
|
||||
#define KERNEL_MSG_H_
|
||||
|
||||
#include <kernel/btree.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
|
||||
struct port;
|
||||
struct thread;
|
||||
|
||||
enum kmsg_status {
|
||||
KMSG_WAIT_RECEIVE,
|
||||
KMSG_WAIT_REPLY,
|
||||
KMSG_REPLY_SENT,
|
||||
};
|
||||
|
||||
struct msg {
|
||||
spin_lock_t msg_lock;
|
||||
enum kmsg_status msg_status;
|
||||
struct btree_node msg_node;
|
||||
msgid_t msg_id;
|
||||
kern_status_t msg_result;
|
||||
struct port *msg_sender_port;
|
||||
struct thread *msg_sender_thread;
|
||||
kern_msg_t msg_req, msg_resp;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -1,10 +1,10 @@
|
||||
#ifndef MANGO_OBJECT_H_
|
||||
#define MANGO_OBJECT_H_
|
||||
#ifndef KERNEL_OBJECT_H_
|
||||
#define KERNEL_OBJECT_H_
|
||||
|
||||
#include <mango/flags.h>
|
||||
#include <mango/locks.h>
|
||||
#include <kernel/flags.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/vm.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -31,6 +31,20 @@ extern "C" {
|
||||
unsigned long flags) \
|
||||
{ \
|
||||
object_unlock_irqrestore(&p->base, flags); \
|
||||
} \
|
||||
static inline void object_name##_lock_pair_irqsave( \
|
||||
struct object_name *a, \
|
||||
struct object_name *b, \
|
||||
unsigned long *flags) \
|
||||
{ \
|
||||
object_lock_pair_irqsave(&a->base, &b->base, flags); \
|
||||
} \
|
||||
static inline void object_name##_unlock_pair_irqrestore( \
|
||||
struct object_name *a, \
|
||||
struct object_name *b, \
|
||||
unsigned long flags) \
|
||||
{ \
|
||||
object_unlock_pair_irqrestore(&a->base, &b->base, flags); \
|
||||
}
|
||||
|
||||
#define OBJECT_MAGIC 0xBADDCAFE
|
||||
@@ -52,7 +66,10 @@ enum object_type_flags {
|
||||
};
|
||||
|
||||
struct object_ops {
|
||||
kern_status_t (*destroy)(struct object *obj);
|
||||
kern_status_t (*destroy)(struct object *obj, struct queue *q);
|
||||
kern_status_t (*destroy_recurse)(
|
||||
struct queue_entry *entry,
|
||||
struct object **out);
|
||||
};
|
||||
|
||||
struct object_type {
|
||||
@@ -67,6 +84,7 @@ struct object_type {
|
||||
|
||||
struct object {
|
||||
uint32_t ob_magic;
|
||||
koid_t ob_id;
|
||||
struct object_type *ob_type;
|
||||
spin_lock_t ob_lock;
|
||||
unsigned int ob_refcount;
|
||||
@@ -88,6 +106,15 @@ extern void object_unlock(struct object *obj);
|
||||
extern void object_lock_irqsave(struct object *obj, unsigned long *flags);
|
||||
extern void object_unlock_irqrestore(struct object *obj, unsigned long flags);
|
||||
|
||||
extern void object_lock_pair_irqsave(
|
||||
struct object *a,
|
||||
struct object *b,
|
||||
unsigned long *flags);
|
||||
extern void object_unlock_pair_irqrestore(
|
||||
struct object *a,
|
||||
struct object *b,
|
||||
unsigned long flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef MANGO_PANIC_H_
|
||||
#define MANGO_PANIC_H_
|
||||
#ifndef KERNEL_PANIC_H_
|
||||
#define KERNEL_PANIC_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/compiler.h>
|
||||
|
||||
struct ml_cpu_context;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#ifndef MANGO_PERCPU_H_
|
||||
#define MANGO_PERCPU_H_
|
||||
#ifndef KERNEL_PERCPU_H_
|
||||
#define KERNEL_PERCPU_H_
|
||||
|
||||
#include <mango/status.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/compiler.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,11 +1,11 @@
|
||||
#ifndef MANGO_PMAP_H_
|
||||
#define MANGO_PMAP_H_
|
||||
#ifndef KERNEL_PMAP_H_
|
||||
#define KERNEL_PMAP_H_
|
||||
|
||||
/* all the functions declared in this file are defined in arch/xyz/pmap.c */
|
||||
|
||||
#include <mango/machine/pmap.h>
|
||||
#include <kernel/machine/pmap.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/vm.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#define PMAP_INVALID ML_PMAP_INVALID
|
||||
@@ -61,18 +61,18 @@ extern kern_status_t pmap_add(
|
||||
pmap_t pmap,
|
||||
virt_addr_t p,
|
||||
pfn_t pfn,
|
||||
enum vm_prot prot,
|
||||
vm_prot_t prot,
|
||||
enum pmap_flags flags);
|
||||
extern kern_status_t pmap_add_block(
|
||||
pmap_t pmap,
|
||||
virt_addr_t p,
|
||||
pfn_t pfn,
|
||||
size_t len,
|
||||
enum vm_prot prot,
|
||||
vm_prot_t prot,
|
||||
enum pmap_flags flags);
|
||||
|
||||
extern kern_status_t pmap_remove(pmap_t pmap, void *p);
|
||||
extern kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len);
|
||||
extern kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p);
|
||||
extern kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
41
include/kernel/port.h
Normal file
41
include/kernel/port.h
Normal file
@@ -0,0 +1,41 @@
|
||||
#ifndef KERNEL_PORT_H_
|
||||
#define KERNEL_PORT_H_
|
||||
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
enum port_status {
|
||||
/* port is not connected */
|
||||
PORT_OFFLINE = 0,
|
||||
/* port is connected and ready to send messages */
|
||||
PORT_READY,
|
||||
/* port has sent a message, and is waiting for the remote to receive it
|
||||
*/
|
||||
PORT_SEND_BLOCKED,
|
||||
/* port has sent a message, and the remote has received it. waiting for
|
||||
* the remote to reply. */
|
||||
PORT_REPLY_BLOCKED,
|
||||
};
|
||||
|
||||
struct port {
|
||||
struct object p_base;
|
||||
enum port_status p_status;
|
||||
struct channel *p_remote;
|
||||
};
|
||||
|
||||
extern kern_status_t port_type_init(void);
|
||||
extern struct port *port_cast(struct object *obj);
|
||||
|
||||
extern struct port *port_create(void);
|
||||
|
||||
extern kern_status_t port_connect(struct port *port, struct channel *remote);
|
||||
extern kern_status_t port_disconnect(struct port *port);
|
||||
extern kern_status_t port_send_msg(
|
||||
struct port *port,
|
||||
const kern_msg_t *msg,
|
||||
kern_msg_t *out_response,
|
||||
unsigned long *lock_flags);
|
||||
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(port, p_base)
|
||||
|
||||
#endif
|
||||
@@ -1,7 +1,9 @@
|
||||
#ifndef MANGO_PRINTK_H_
|
||||
#define MANGO_PRINTK_H_
|
||||
#ifndef KERNEL_PRINTK_H_
|
||||
#define KERNEL_PRINTK_H_
|
||||
|
||||
#include <mango/console.h>
|
||||
#include <kernel/console.h>
|
||||
|
||||
#undef TRACE
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,7 +1,7 @@
|
||||
#ifndef MANGO_QUEUE_H_
|
||||
#define MANGO_QUEUE_H_
|
||||
#ifndef KERNEL_QUEUE_H_
|
||||
#define KERNEL_QUEUE_H_
|
||||
|
||||
#include <mango/libc/string.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef MANGO_RINGBUFFER_H_
|
||||
#define MANGO_RINGBUFFER_H_
|
||||
#ifndef KERNEL_RINGBUFFER_H_
|
||||
#define KERNEL_RINGBUFFER_H_
|
||||
|
||||
#include <mango/locks.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/sched.h>
|
||||
|
||||
struct ringbuffer {
|
||||
unsigned char *r_buffer;
|
||||
@@ -1,11 +1,13 @@
|
||||
#ifndef MANGO_SCHED_H_
|
||||
#define MANGO_SCHED_H_
|
||||
#ifndef KERNEL_SCHED_H_
|
||||
#define KERNEL_SCHED_H_
|
||||
|
||||
#include <mango/btree.h>
|
||||
#include <mango/locks.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/pmap.h>
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/btree.h>
|
||||
#include <kernel/handle.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/msg.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/pmap.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <mango/status.h>
|
||||
|
||||
#define TASK_NAME_MAX 64
|
||||
@@ -33,6 +35,7 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct channel;
|
||||
struct runqueue;
|
||||
struct work_item;
|
||||
|
||||
@@ -82,7 +85,9 @@ struct task {
|
||||
|
||||
pmap_t t_pmap;
|
||||
struct vm_region *t_address_space;
|
||||
spin_lock_t t_handles_lock;
|
||||
struct handle_table *t_handles;
|
||||
struct btree b_channels;
|
||||
|
||||
struct btree_node t_tasklist;
|
||||
struct queue_entry t_child_entry;
|
||||
@@ -110,6 +115,7 @@ struct thread {
|
||||
virt_addr_t tr_cpu_user_sp, tr_cpu_kernel_sp;
|
||||
|
||||
struct runqueue *tr_rq;
|
||||
struct msg tr_msg;
|
||||
|
||||
struct queue_entry tr_parent_entry;
|
||||
struct queue_entry tr_rqentry;
|
||||
@@ -183,7 +189,8 @@ extern void rq_remove_thread(struct runqueue *rq, struct thread *thr);
|
||||
extern struct runqueue *cpu_rq(unsigned int cpu);
|
||||
|
||||
extern struct task *task_alloc(void);
|
||||
extern struct task *task_create(struct task *parent, const char *name);
|
||||
extern struct task *task_cast(struct object *obj);
|
||||
extern struct task *task_create(const char *name, size_t name_len);
|
||||
static inline struct task *task_ref(struct task *task)
|
||||
{
|
||||
return OBJECT_CAST(struct task, t_base, object_ref(&task->t_base));
|
||||
@@ -192,7 +199,24 @@ static inline void task_unref(struct task *task)
|
||||
{
|
||||
object_unref(&task->t_base);
|
||||
}
|
||||
extern struct task *task_from_pid(unsigned int pid);
|
||||
extern kern_status_t task_add_child(struct task *parent, struct task *child);
|
||||
extern kern_status_t task_add_channel(
|
||||
struct task *task,
|
||||
struct channel *channel,
|
||||
unsigned int id);
|
||||
extern struct channel *task_get_channel(struct task *task, unsigned int id);
|
||||
extern struct task *task_from_tid(tid_t id);
|
||||
extern kern_status_t task_open_handle(
|
||||
struct task *task,
|
||||
struct object *obj,
|
||||
handle_flags_t flags,
|
||||
kern_handle_t *out);
|
||||
extern kern_status_t task_resolve_handle(
|
||||
struct task *task,
|
||||
kern_handle_t handle,
|
||||
struct object **out_obj,
|
||||
handle_flags_t *out_flags);
|
||||
extern kern_status_t task_close_handle(struct task *task, kern_handle_t handle);
|
||||
extern struct thread *task_create_thread(struct task *parent);
|
||||
extern struct task *kernel_task(void);
|
||||
extern struct task *idle_task(void);
|
||||
@@ -211,12 +235,16 @@ extern void end_charge_period(void);
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(task, t_base)
|
||||
|
||||
extern struct thread *thread_alloc(void);
|
||||
extern struct thread *thread_cast(struct object *obj);
|
||||
extern kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip);
|
||||
extern kern_status_t thread_init_user(
|
||||
struct thread *thr,
|
||||
virt_addr_t ip,
|
||||
virt_addr_t sp);
|
||||
virt_addr_t sp,
|
||||
const uintptr_t *args,
|
||||
size_t nr_args);
|
||||
extern int thread_priority(struct thread *thr);
|
||||
extern void thread_awaken(struct thread *thr);
|
||||
extern void idle(void);
|
||||
extern struct thread *create_kernel_thread(void (*fn)(void));
|
||||
extern struct thread *create_idle_thread(void);
|
||||
181
include/kernel/syscall.h
Normal file
181
include/kernel/syscall.h
Normal file
@@ -0,0 +1,181 @@
|
||||
#ifndef KERNEL_SYSCALL_H_
|
||||
#define KERNEL_SYSCALL_H_
|
||||
|
||||
#include <kernel/handle.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/vm-region.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/syscall.h>
|
||||
|
||||
#define validate_access(task, ptr, len, flags) \
|
||||
__validate_access(task, (const void *)ptr, len, flags)
|
||||
#define validate_access_r(task, ptr, len) \
|
||||
validate_access(task, ptr, len, VM_PROT_READ | VM_PROT_USER)
|
||||
#define validate_access_w(task, ptr, len) \
|
||||
validate_access(task, ptr, len, VM_PROT_WRITE | VM_PROT_USER)
|
||||
#define validate_access_rw(task, ptr, len) \
|
||||
validate_access( \
|
||||
task, \
|
||||
ptr, \
|
||||
len, \
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER)
|
||||
|
||||
static inline bool __validate_access(
|
||||
struct task *task,
|
||||
const void *ptr,
|
||||
size_t len,
|
||||
vm_prot_t flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
vm_region_lock_irqsave(task->t_address_space, &irq_flags);
|
||||
bool result = vm_region_validate_access(
|
||||
task->t_address_space,
|
||||
(virt_addr_t)ptr,
|
||||
len,
|
||||
flags | VM_PROT_USER);
|
||||
vm_region_unlock_irqrestore(task->t_address_space, irq_flags);
|
||||
return result;
|
||||
}
|
||||
|
||||
extern kern_status_t sys_task_exit(int status);
|
||||
extern kern_status_t sys_task_self(kern_handle_t *out);
|
||||
extern kern_status_t sys_task_create(
|
||||
kern_handle_t parent_handle,
|
||||
const char *name,
|
||||
size_t name_len,
|
||||
kern_handle_t *out_task,
|
||||
kern_handle_t *out_address_space);
|
||||
extern kern_status_t sys_task_create_thread(
|
||||
kern_handle_t task,
|
||||
virt_addr_t ip,
|
||||
virt_addr_t sp,
|
||||
uintptr_t *args,
|
||||
size_t nr_args,
|
||||
kern_handle_t *out_thread);
|
||||
extern kern_status_t sys_task_get_address_space(
|
||||
kern_handle_t task,
|
||||
kern_handle_t *out);
|
||||
|
||||
extern kern_status_t sys_thread_start(kern_handle_t thread);
|
||||
|
||||
extern kern_status_t sys_vm_object_create(
|
||||
const char *name,
|
||||
size_t name_len,
|
||||
size_t data_len,
|
||||
vm_prot_t prot,
|
||||
kern_handle_t *out);
|
||||
extern kern_status_t sys_vm_object_read(
|
||||
kern_handle_t object,
|
||||
void *dst,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t sys_vm_object_write(
|
||||
kern_handle_t object,
|
||||
const void *src,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_written);
|
||||
extern kern_status_t sys_vm_object_copy(
|
||||
kern_handle_t dst,
|
||||
off_t dst_offset,
|
||||
kern_handle_t src,
|
||||
off_t src_offset,
|
||||
size_t count,
|
||||
size_t *nr_copied);
|
||||
|
||||
extern kern_status_t sys_vm_region_create(
|
||||
kern_handle_t parent,
|
||||
const char *name,
|
||||
size_t name_len,
|
||||
off_t offset,
|
||||
size_t region_len,
|
||||
vm_prot_t prot,
|
||||
kern_handle_t *out,
|
||||
virt_addr_t *out_base_address);
|
||||
extern kern_status_t sys_vm_region_kill(kern_handle_t region);
|
||||
extern kern_status_t sys_vm_region_read(
|
||||
kern_handle_t region,
|
||||
void *dst,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t sys_vm_region_write(
|
||||
kern_handle_t region,
|
||||
const void *src,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t sys_vm_region_map_absolute(
|
||||
kern_handle_t region,
|
||||
virt_addr_t map_address,
|
||||
kern_handle_t object,
|
||||
off_t object_offset,
|
||||
size_t length,
|
||||
vm_prot_t prot,
|
||||
virt_addr_t *out_base_address);
|
||||
extern kern_status_t sys_vm_region_map_relative(
|
||||
kern_handle_t region,
|
||||
off_t region_offset,
|
||||
kern_handle_t object,
|
||||
off_t object_offset,
|
||||
size_t length,
|
||||
vm_prot_t prot,
|
||||
virt_addr_t *out_base_address);
|
||||
extern kern_status_t sys_vm_region_unmap_absolute(
|
||||
kern_handle_t region,
|
||||
virt_addr_t address,
|
||||
size_t length);
|
||||
extern kern_status_t sys_vm_region_unmap_relative(
|
||||
kern_handle_t region,
|
||||
off_t offset,
|
||||
size_t length);
|
||||
|
||||
extern kern_status_t sys_kern_log(const char *s);
|
||||
extern kern_status_t sys_kern_handle_close(kern_handle_t handle);
|
||||
extern kern_status_t sys_kern_config_get(
|
||||
kern_config_key_t key,
|
||||
void *ptr,
|
||||
size_t len);
|
||||
extern kern_status_t sys_kern_config_set(
|
||||
kern_config_key_t key,
|
||||
const void *ptr,
|
||||
size_t len);
|
||||
|
||||
extern kern_status_t sys_channel_create(unsigned int id, kern_handle_t *out);
|
||||
extern kern_status_t sys_port_create(kern_handle_t *out);
|
||||
extern kern_status_t sys_port_connect(
|
||||
kern_handle_t port,
|
||||
tid_t task_id,
|
||||
unsigned int channel_id);
|
||||
extern kern_status_t sys_port_disconnect(kern_handle_t port);
|
||||
|
||||
extern kern_status_t sys_msg_send(
|
||||
kern_handle_t port,
|
||||
const kern_msg_t *msg,
|
||||
kern_msg_t *out_reply);
|
||||
extern kern_status_t sys_msg_recv(kern_handle_t channel, kern_msg_t *out_msg);
|
||||
|
||||
extern kern_status_t sys_msg_reply(
|
||||
kern_handle_t channel,
|
||||
msgid_t id,
|
||||
const kern_msg_t *msg);
|
||||
extern kern_status_t sys_msg_read(
|
||||
kern_handle_t channel_handle,
|
||||
msgid_t id,
|
||||
size_t offset,
|
||||
const kern_iovec_t *iov,
|
||||
size_t iov_count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t sys_msg_write(
|
||||
kern_handle_t channel,
|
||||
msgid_t id,
|
||||
size_t offset,
|
||||
const kern_iovec_t *in,
|
||||
size_t nr_in,
|
||||
size_t *nr_written);
|
||||
|
||||
extern virt_addr_t syscall_get_function(unsigned int sysid);
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_TEST_H_
|
||||
#define MANGO_TEST_H_
|
||||
#ifndef KERNEL_TEST_H_
|
||||
#define KERNEL_TEST_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,18 +1,14 @@
|
||||
#ifndef MANGO_TYPES_H_
|
||||
#define MANGO_TYPES_H_
|
||||
#ifndef KERNEL_TYPES_H_
|
||||
#define KERNEL_TYPES_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define CYCLES_MAX UINT64_MAX
|
||||
|
||||
typedef uintptr_t phys_addr_t;
|
||||
typedef uintptr_t virt_addr_t;
|
||||
typedef uint64_t cycles_t;
|
||||
typedef uint64_t sectors_t;
|
||||
typedef uint64_t off_t;
|
||||
|
||||
typedef unsigned int umode_t;
|
||||
|
||||
struct boot_module {
|
||||
phys_addr_t mod_base;
|
||||
@@ -1,6 +1,7 @@
|
||||
#ifndef MANGO_UTIL_H_
|
||||
#define MANGO_UTIL_H_
|
||||
#ifndef KERNEL_UTIL_H_
|
||||
#define KERNEL_UTIL_H_
|
||||
|
||||
#include <mango/types.h>
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef MANGO_VM_OBJECT_H_
|
||||
#define MANGO_VM_OBJECT_H_
|
||||
#ifndef KERNEL_VM_OBJECT_H_
|
||||
#define KERNEL_VM_OBJECT_H_
|
||||
|
||||
#include <mango/locks.h>
|
||||
#include <mango/object.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/object.h>
|
||||
|
||||
#define VM_OBJECT_NAME_MAX 64
|
||||
|
||||
@@ -23,7 +23,7 @@ struct vm_object {
|
||||
|
||||
/* memory protection flags. mappings of this vm_object can only use
|
||||
* a subset of the flags set in this mask. */
|
||||
enum vm_prot vo_prot;
|
||||
vm_prot_t vo_prot;
|
||||
|
||||
/* btree of vm_pages that have been allocated to this vm_object.
|
||||
* vm_page->p_vmo_offset and the size of each page is the bst key. */
|
||||
@@ -33,6 +33,7 @@ struct vm_object {
|
||||
};
|
||||
|
||||
extern kern_status_t vm_object_type_init(void);
|
||||
extern struct vm_object *vm_object_cast(struct object *obj);
|
||||
|
||||
/* create a vm_object with the specified length in bytes and protection flags.
|
||||
* the length will be automatically rounded up to the nearest vm_object page
|
||||
@@ -40,8 +41,9 @@ extern kern_status_t vm_object_type_init(void);
|
||||
* they are mapped and accessed. */
|
||||
extern struct vm_object *vm_object_create(
|
||||
const char *name,
|
||||
size_t len,
|
||||
enum vm_prot prot);
|
||||
size_t name_len,
|
||||
size_t data_len,
|
||||
vm_prot_t prot);
|
||||
|
||||
/* create a vm_object that represents the specified range of physical memory.
|
||||
* the length will be automatically rounded up to the nearest vm_object page
|
||||
@@ -50,9 +52,10 @@ extern struct vm_object *vm_object_create(
|
||||
* reserved, and is not in use by any other kernel component. */
|
||||
extern struct vm_object *vm_object_create_in_place(
|
||||
const char *name,
|
||||
size_t name_len,
|
||||
phys_addr_t base,
|
||||
size_t len,
|
||||
enum vm_prot prot);
|
||||
size_t data_len,
|
||||
vm_prot_t prot);
|
||||
|
||||
extern struct vm_page *vm_object_get_page(
|
||||
const struct vm_object *vo,
|
||||
@@ -63,6 +66,26 @@ extern struct vm_page *vm_object_alloc_page(
|
||||
off_t offset,
|
||||
enum vm_page_order size);
|
||||
|
||||
extern kern_status_t vm_object_read(
|
||||
struct vm_object *vo,
|
||||
void *dst,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_read);
|
||||
extern kern_status_t vm_object_write(
|
||||
struct vm_object *vo,
|
||||
const void *dst,
|
||||
off_t offset,
|
||||
size_t count,
|
||||
size_t *nr_written);
|
||||
extern kern_status_t vm_object_copy(
|
||||
struct vm_object *dst,
|
||||
off_t dst_offset,
|
||||
struct vm_object *src,
|
||||
off_t src_offset,
|
||||
size_t count,
|
||||
size_t *nr_copied);
|
||||
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(vm_object, vo_base)
|
||||
|
||||
#endif
|
||||
191
include/kernel/vm-region.h
Normal file
191
include/kernel/vm-region.h
Normal file
@@ -0,0 +1,191 @@
|
||||
#ifndef KERNEL_VM_REGION_H_
|
||||
#define KERNEL_VM_REGION_H_
|
||||
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/pmap.h>
|
||||
#include <kernel/vm.h>
|
||||
|
||||
#define VM_REGION_NAME_MAX 64
|
||||
#define VM_REGION_COPY_ALL ((size_t)-1)
|
||||
|
||||
struct vm_region;
|
||||
struct vm_object;
|
||||
|
||||
enum vm_region_status {
|
||||
VM_REGION_DEAD = 0,
|
||||
VM_REGION_ONLINE,
|
||||
};
|
||||
|
||||
enum vm_region_entry_type {
|
||||
VM_REGION_ENTRY_NONE = 0,
|
||||
VM_REGION_ENTRY_REGION,
|
||||
VM_REGION_ENTRY_MAPPING,
|
||||
};
|
||||
|
||||
struct vm_region_entry {
|
||||
union {
|
||||
struct btree_node e_node;
|
||||
/* this entry is only used to queue vm-region objects for
|
||||
* recursive cleanup */
|
||||
struct queue_entry e_entry;
|
||||
};
|
||||
struct vm_region_entry *e_parent;
|
||||
enum vm_region_entry_type e_type;
|
||||
/* absolute address of this entry */
|
||||
virt_addr_t e_address;
|
||||
/* offset in bytes of this entry within its immediate parent. */
|
||||
off_t e_offset;
|
||||
/* size of the entry in bytes */
|
||||
size_t e_size;
|
||||
};
|
||||
|
||||
struct vm_region_mapping {
|
||||
struct vm_region_entry m_entry;
|
||||
struct vm_object *m_object;
|
||||
|
||||
/* used to link to vm_object->vo_mappings */
|
||||
struct queue_entry m_object_entry;
|
||||
|
||||
vm_prot_t m_prot;
|
||||
/* offset in bytes to the start of the object data that was mapped */
|
||||
off_t m_object_offset;
|
||||
};
|
||||
|
||||
struct vm_region {
|
||||
struct object vr_base;
|
||||
enum vm_region_status vr_status;
|
||||
struct vm_region_entry vr_entry;
|
||||
|
||||
char vr_name[VM_REGION_NAME_MAX];
|
||||
|
||||
/* btree of struct vm_region_entry.
|
||||
* sibling entries cannot overlap each other, and child entries must
|
||||
* be entirely contained within their immediate parent entry. */
|
||||
struct btree vr_entries;
|
||||
|
||||
/* memory protection restriction mask.
|
||||
* any mapping in this region, or any of its children, cannot use
|
||||
* protection flags that are not set in this mask.
|
||||
* for example, if VM_PROT_EXEC is /not/ set here, no mapping
|
||||
* can be created in this region or any child region with VM_PROT_EXEC
|
||||
* set. */
|
||||
vm_prot_t vr_prot;
|
||||
|
||||
/* the physical address space in which mappings in this region (and
|
||||
* its children) are created */
|
||||
pmap_t vr_pmap;
|
||||
};
|
||||
|
||||
extern kern_status_t vm_region_type_init(void);
|
||||
extern struct vm_region *vm_region_cast(struct object *obj);
|
||||
|
||||
/* create a new vm-region, optionally within a parent region.
|
||||
* `offset` is the byte offset within the parent region where the new region
|
||||
* should start.
|
||||
* if no parent is specified, `offset` is the absolute virtual address of the
|
||||
* start of the region.
|
||||
* in both cases, `len` is the length of the new region in bytes. */
|
||||
extern kern_status_t vm_region_create(
|
||||
struct vm_region *parent,
|
||||
const char *name,
|
||||
size_t name_len,
|
||||
off_t offset,
|
||||
size_t region_len,
|
||||
vm_prot_t prot,
|
||||
struct vm_region **out);
|
||||
|
||||
/* recursively kills a given region and all of its sub-regions.
|
||||
* when a region is killed, all of its mappings are unmapped, and any further
|
||||
* operations on the region are denied. however, all handles and references to
|
||||
* the region (any any sub-region) remain valid, and no kernel memory is
|
||||
* de-allocated.
|
||||
* the memory used by the vm-region object itself is de-allocated when the last
|
||||
* handle/reference to the object is released.
|
||||
* this function should be called with `region` locked.
|
||||
*/
|
||||
extern kern_status_t vm_region_kill(
|
||||
struct vm_region *region,
|
||||
unsigned long *lock_flags);
|
||||
|
||||
/* map a vm-object into a vm-region.
|
||||
* [region_offset,length] must fall within exactly one region, and cannot span
|
||||
* multiple sibling regions.
|
||||
* if [region_offset,length] falls within a child region, the map operation
|
||||
* will be transparently redirected to the relevant region.
|
||||
* `prot` must be allowed both by the region into which the mapping is being
|
||||
* created AND the vm-object being mapped. */
|
||||
extern kern_status_t vm_region_map_object(
|
||||
struct vm_region *region,
|
||||
off_t region_offset,
|
||||
struct vm_object *object,
|
||||
off_t object_offset,
|
||||
size_t length,
|
||||
vm_prot_t prot,
|
||||
virt_addr_t *out);
|
||||
|
||||
extern kern_status_t vm_region_unmap(
|
||||
struct vm_region *region,
|
||||
off_t region_offset,
|
||||
size_t length);
|
||||
|
||||
extern bool vm_region_validate_access(
|
||||
struct vm_region *region,
|
||||
off_t offset,
|
||||
size_t len,
|
||||
vm_prot_t prot);
|
||||
|
||||
/* find the mapping corresponding to the given virtual address, and page-in the
|
||||
* necessary vm_page to allow the memory access to succeed. if the relevant
|
||||
* vm-object page hasn't been allocated yet, it will be allocated here. */
|
||||
extern kern_status_t vm_region_demand_map(
|
||||
struct vm_region *region,
|
||||
virt_addr_t addr,
|
||||
enum pmap_fault_flags flags);
|
||||
|
||||
/* get the absolute base virtual address of a region within its
|
||||
* parent/ancestors. */
|
||||
extern virt_addr_t vm_region_get_base_address(const struct vm_region *region);
|
||||
|
||||
extern void vm_region_dump(struct vm_region *region);
|
||||
|
||||
/* read data from the user-space area of a vm-region into a kernel-mode buffer
|
||||
*/
|
||||
extern kern_status_t vm_region_read_kernel(
|
||||
struct vm_region *src_region,
|
||||
virt_addr_t src_ptr,
|
||||
size_t count,
|
||||
void *dest,
|
||||
size_t *nr_read);
|
||||
|
||||
/* write data to the user-space area of a vm-region from a kernel-mode buffer
|
||||
*/
|
||||
extern kern_status_t vm_region_write_kernel(
|
||||
struct vm_region *dst_region,
|
||||
virt_addr_t dst_ptr,
|
||||
size_t count,
|
||||
const void *src,
|
||||
size_t *nr_written);
|
||||
|
||||
extern kern_status_t vm_region_memmove(
|
||||
struct vm_region *dest_region,
|
||||
virt_addr_t dest_ptr,
|
||||
struct vm_region *src_region,
|
||||
virt_addr_t src_ptr,
|
||||
size_t count,
|
||||
size_t *nr_moved);
|
||||
|
||||
extern kern_status_t vm_region_memmove_v(
|
||||
struct vm_region *dest_region,
|
||||
size_t dest_offset,
|
||||
const kern_iovec_t *dest,
|
||||
size_t nr_dest,
|
||||
struct vm_region *src_region,
|
||||
size_t src_offset,
|
||||
const kern_iovec_t *src,
|
||||
size_t nr_src,
|
||||
size_t bytes_to_move,
|
||||
size_t *nr_bytes_moved);
|
||||
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(vm_region, vr_base)
|
||||
|
||||
#endif
|
||||
@@ -1,13 +1,13 @@
|
||||
#ifndef MANGO_VM_H_
|
||||
#define MANGO_VM_H_
|
||||
#ifndef KERNEL_VM_H_
|
||||
#define KERNEL_VM_H_
|
||||
|
||||
#include <mango/bitmap.h>
|
||||
#include <mango/btree.h>
|
||||
#include <mango/locks.h>
|
||||
#include <mango/machine/vm.h>
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/bitmap.h>
|
||||
#include <kernel/btree.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/machine/vm.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <kernel/types.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
@@ -47,15 +47,6 @@ enum vm_model {
|
||||
VM_MODEL_SPARSE,
|
||||
};
|
||||
|
||||
enum vm_prot {
|
||||
VM_PROT_READ = 0x01u,
|
||||
VM_PROT_WRITE = 0x02u,
|
||||
VM_PROT_EXEC = 0x04u,
|
||||
VM_PROT_USER = 0x08u,
|
||||
VM_PROT_SVR = 0x10u,
|
||||
VM_PROT_NOCACHE = 0x20u,
|
||||
};
|
||||
|
||||
enum vm_flags {
|
||||
VM_NORMAL = 0x00u,
|
||||
VM_GET_DMA = 0x01u,
|
||||
@@ -1,25 +0,0 @@
|
||||
#ifndef MANGO_LOCKS_H_
|
||||
#define MANGO_LOCKS_H_
|
||||
|
||||
#include <mango/compiler.h>
|
||||
#include <mango/machine/hwlock.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef __aligned(8) ml_hwlock_t spin_lock_t;
|
||||
|
||||
#define SPIN_LOCK_INIT ML_HWLOCK_INIT
|
||||
|
||||
#define spin_lock(lck) ml_hwlock_lock(lck);
|
||||
#define spin_unlock(lck) ml_hwlock_unlock(lck);
|
||||
|
||||
#define spin_lock_irqsave(lck, flags) ml_hwlock_lock_irqsave(lck, flags);
|
||||
#define spin_unlock_irqrestore(lck, flags) ml_hwlock_unlock_irqrestore(lck, flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,20 +0,0 @@
|
||||
#ifndef MANGO_SYSCALL_H_
|
||||
#define MANGO_SYSCALL_H_
|
||||
|
||||
#include <mango/handle.h>
|
||||
#include <mango/status.h>
|
||||
#include <mango/vm.h>
|
||||
|
||||
#define SYS_EXIT 1
|
||||
#define SYS_VM_OBJECT_CREATE 2
|
||||
|
||||
extern kern_status_t sys_exit(int status);
|
||||
extern kern_status_t sys_vm_object_create(
|
||||
const char *name,
|
||||
size_t len,
|
||||
enum vm_prot prot,
|
||||
kern_handle_t *out_handle);
|
||||
|
||||
extern virt_addr_t syscall_get_func(unsigned int sysid);
|
||||
|
||||
#endif
|
||||
@@ -1,126 +0,0 @@
|
||||
#ifndef MANGO_VM_REGION_H_
|
||||
#define MANGO_VM_REGION_H_
|
||||
|
||||
#include <mango/object.h>
|
||||
#include <mango/pmap.h>
|
||||
#include <mango/vm.h>
|
||||
|
||||
#define VM_REGION_NAME_MAX 64
|
||||
|
||||
#define VM_REGION_ANY_MAP_ADDRESS ((virt_addr_t) - 1)
|
||||
|
||||
struct vm_region;
|
||||
struct vm_object;
|
||||
|
||||
enum vm_region_entry_type {
|
||||
VM_REGION_ENTRY_NONE = 0,
|
||||
VM_REGION_ENTRY_REGION,
|
||||
VM_REGION_ENTRY_MAPPING,
|
||||
};
|
||||
|
||||
struct vm_region_entry {
|
||||
struct btree_node e_node;
|
||||
struct vm_region_entry *e_parent;
|
||||
enum vm_region_entry_type e_type;
|
||||
/* absolute virtual address of the entry */
|
||||
virt_addr_t e_base_address;
|
||||
/* size of the entry in bytes */
|
||||
size_t e_size;
|
||||
};
|
||||
|
||||
struct vm_region_mapping {
|
||||
struct vm_region_entry m_entry;
|
||||
struct vm_object *m_object;
|
||||
|
||||
/* used to link to vm_object->vo_mappings */
|
||||
struct queue_entry m_object_entry;
|
||||
|
||||
enum vm_prot m_prot;
|
||||
/* offset in bytes to the start of the object data that was mapped */
|
||||
off_t m_object_offset;
|
||||
};
|
||||
|
||||
struct vm_region {
|
||||
struct object vr_base;
|
||||
struct vm_region_entry vr_entry;
|
||||
|
||||
char vr_name[VM_REGION_NAME_MAX];
|
||||
|
||||
/* btree of struct vm_region_entry.
|
||||
* sibling entries cannot overlap each other, and child entries must
|
||||
* be entirely contained within their immediate parent entry. */
|
||||
struct btree vr_entries;
|
||||
|
||||
/* memory protection restriction mask.
|
||||
* any mapping in this region, or any of its children, cannot use
|
||||
* protection flags that are not set in this mask.
|
||||
* for example, if VM_PROT_EXEC is /not/ set here, no mapping
|
||||
* can be created in this region or any child region with VM_PROT_EXEC
|
||||
* set. */
|
||||
enum vm_prot vr_prot;
|
||||
|
||||
/* the physical address space in which mappings in this region (and
|
||||
* its children) are created */
|
||||
pmap_t vr_pmap;
|
||||
};
|
||||
|
||||
extern kern_status_t vm_region_type_init(void);
|
||||
|
||||
extern kern_status_t vm_region_create(
|
||||
struct vm_region *parent,
|
||||
const char *name,
|
||||
virt_addr_t base,
|
||||
size_t len,
|
||||
enum vm_prot prot,
|
||||
struct vm_region **out);
|
||||
|
||||
/* find the child region that has jurisdiction over the specified virtual
|
||||
* address. returns the lowest-nested region that covers the specified virtual
|
||||
* address. */
|
||||
extern struct vm_region *vm_region_find_child(
|
||||
struct vm_region *region,
|
||||
virt_addr_t addr);
|
||||
|
||||
/* find the child region that has jurisdiction over the specified virtual
|
||||
* address area. returns the lowest-nested region that covers the specified
|
||||
* virtual address area. the area must be fully contained within a region, with
|
||||
* no partial overlaps. if an area is covered by multiple regions, or is only
|
||||
* partially within a region, returns NULL. */
|
||||
extern struct vm_region *vm_region_find_child_for_area(
|
||||
struct vm_region *region,
|
||||
virt_addr_t addr,
|
||||
size_t len);
|
||||
extern struct vm_region_mapping *vm_region_find_mapping(
|
||||
struct vm_region *region,
|
||||
virt_addr_t addr);
|
||||
|
||||
extern kern_status_t vm_region_map_object(
|
||||
struct vm_region *region,
|
||||
virt_addr_t map_address,
|
||||
struct vm_object *object,
|
||||
off_t object_offset,
|
||||
size_t length,
|
||||
enum vm_prot prot,
|
||||
virt_addr_t *out);
|
||||
|
||||
/* returns true if the memory area defined by [base, base+len] contains:
|
||||
* - no child regions
|
||||
* - no vm_object mappings
|
||||
* if any child regions or mappings exist in the memory area, returns false.
|
||||
* if the memory area exceeds the bounds of the region, returns false.
|
||||
*/
|
||||
extern bool vm_region_is_area_free(
|
||||
const struct vm_region *region,
|
||||
virt_addr_t base,
|
||||
size_t len);
|
||||
|
||||
extern kern_status_t vm_region_demand_map(
|
||||
struct vm_region *region,
|
||||
virt_addr_t addr,
|
||||
enum pmap_fault_flags flags);
|
||||
|
||||
extern void vm_region_dump(struct vm_region *region, int depth);
|
||||
|
||||
DEFINE_OBJECT_LOCK_FUNCTION(vm_region, vr_base)
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,4 @@
|
||||
#include <mango/init.h>
|
||||
|
||||
#include <kernel/init.h>
|
||||
|
||||
int do_initcalls(void)
|
||||
{
|
||||
|
||||
39
init/main.c
39
init/main.c
@@ -1,18 +1,20 @@
|
||||
#include <mango/arg.h>
|
||||
#include <mango/bsp.h>
|
||||
#include <mango/clock.h>
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/handle.h>
|
||||
#include <mango/init.h>
|
||||
#include <mango/input.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/machine/init.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/panic.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/sched.h>
|
||||
#include <mango/test.h>
|
||||
#include <mango/vm-object.h>
|
||||
#include <kernel/arg.h>
|
||||
#include <kernel/bsp.h>
|
||||
#include <kernel/channel.h>
|
||||
#include <kernel/clock.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/handle.h>
|
||||
#include <kernel/init.h>
|
||||
#include <kernel/input.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/machine/init.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/panic.h>
|
||||
#include <kernel/port.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/test.h>
|
||||
#include <kernel/vm-object.h>
|
||||
#include <stdint.h>
|
||||
|
||||
extern unsigned long get_rflags(void);
|
||||
@@ -31,7 +33,7 @@ static void hang(void)
|
||||
|
||||
while (1) {
|
||||
#if 0
|
||||
printk("[cpu %u, task %u, thread %u]: tick",
|
||||
printk("[cpu %u, task %ld, thread %u]: tick",
|
||||
this_cpu(),
|
||||
self->t_id,
|
||||
thread->tr_id);
|
||||
@@ -72,6 +74,9 @@ void kernel_init(uintptr_t arg)
|
||||
{
|
||||
ml_init(arg);
|
||||
|
||||
port_type_init();
|
||||
channel_type_init();
|
||||
|
||||
struct boot_module bsp_image = {0};
|
||||
bsp_get_location(&bsp_image);
|
||||
|
||||
@@ -102,7 +107,7 @@ void kernel_init(uintptr_t arg)
|
||||
bsp.bsp_trailer.bsp_exec_entry,
|
||||
bsp.bsp_vmo);
|
||||
|
||||
struct task *bootstrap_task = task_create(kernel_task(), "bootstrap");
|
||||
struct task *bootstrap_task = task_create("bootstrap", 9);
|
||||
tracek("created bootstrap task (pid=%u)", bootstrap_task->t_id);
|
||||
|
||||
bsp_launch_async(&bsp, bootstrap_task);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <mango/arg.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <mango/libc/ctype.h>
|
||||
#include <kernel/arg.h>
|
||||
#include <kernel/libc/ctype.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <mango/status.h>
|
||||
|
||||
static char g_cmdline[CMDLINE_MAX + 1] = {0};
|
||||
|
||||
@@ -81,7 +82,6 @@ static char *advance_to_next_arg(char *s, char *max)
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
const char *arg_value(const char *arg_name)
|
||||
{
|
||||
char *s = g_cmdline;
|
||||
|
||||
196
kernel/bsp.c
196
kernel/bsp.c
@@ -1,9 +1,10 @@
|
||||
#include <mango/bsp.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/sched.h>
|
||||
#include <mango/util.h>
|
||||
#include <mango/vm-object.h>
|
||||
#include <mango/vm-region.h>
|
||||
#include <kernel/bsp.h>
|
||||
#include <kernel/handle.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/vm-object.h>
|
||||
#include <kernel/vm-region.h>
|
||||
|
||||
#define BOOTSTRAP_STACK_SIZE 0x10000
|
||||
|
||||
@@ -57,6 +58,7 @@ kern_status_t bsp_load(struct bsp *bsp, const struct boot_module *mod)
|
||||
|
||||
bsp->bsp_vmo = vm_object_create_in_place(
|
||||
"bsp",
|
||||
3,
|
||||
mod->mod_base,
|
||||
mod->mod_size,
|
||||
VM_PROT_READ | VM_PROT_EXEC | VM_PROT_USER);
|
||||
@@ -67,7 +69,7 @@ kern_status_t bsp_load(struct bsp *bsp, const struct boot_module *mod)
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
static kern_status_t map_executable(
|
||||
static kern_status_t map_executable_dyn(
|
||||
struct bsp *bsp,
|
||||
struct task *task,
|
||||
virt_addr_t *entry)
|
||||
@@ -86,7 +88,8 @@ static kern_status_t map_executable(
|
||||
status = vm_region_create(
|
||||
task->t_address_space,
|
||||
"exec",
|
||||
VM_REGION_ANY_MAP_ADDRESS,
|
||||
4,
|
||||
VM_REGION_ANY_OFFSET,
|
||||
exec_size,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_USER,
|
||||
®ion);
|
||||
@@ -96,32 +99,46 @@ static kern_status_t map_executable(
|
||||
|
||||
struct vm_object *data = vm_object_create(
|
||||
".data",
|
||||
5,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER);
|
||||
/* TODO copy .data from executable to memory */
|
||||
if (!data) {
|
||||
return KERN_NO_MEMORY;
|
||||
}
|
||||
|
||||
off_t text_offset = bsp->bsp_trailer.bsp_exec_offset
|
||||
+ bsp->bsp_trailer.bsp_text_faddr;
|
||||
off_t data_offset = 0;
|
||||
virt_addr_t text_base = region->vr_entry.e_base_address
|
||||
+ bsp->bsp_trailer.bsp_text_vaddr;
|
||||
virt_addr_t data_base = region->vr_entry.e_base_address
|
||||
+ bsp->bsp_trailer.bsp_data_vaddr;
|
||||
virt_addr_t text_base = 0, data_base = 0;
|
||||
|
||||
tracek("exec_offset=%llx, text_faddr=%llx",
|
||||
bsp->bsp_trailer.bsp_exec_offset,
|
||||
bsp->bsp_trailer.bsp_text_faddr);
|
||||
tracek("text_offset=%llx, data_offset=%llx", text_offset, data_offset);
|
||||
tracek("text_base=%llx, data_base=%llx", text_base, data_base);
|
||||
off_t text_foffset = bsp->bsp_trailer.bsp_exec_offset
|
||||
+ bsp->bsp_trailer.bsp_text_faddr;
|
||||
off_t data_foffset = 0;
|
||||
off_t text_voffset = bsp->bsp_trailer.bsp_text_vaddr;
|
||||
off_t data_voffset = bsp->bsp_trailer.bsp_data_vaddr;
|
||||
|
||||
#if 0
|
||||
size_t tmp = 0;
|
||||
status = vm_object_copy(
|
||||
data,
|
||||
0,
|
||||
bsp->bsp_vmo,
|
||||
bsp->bsp_trailer.bsp_data_faddr,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
&tmp);
|
||||
|
||||
tracek("read %zuB of data from executable", tmp);
|
||||
#endif
|
||||
|
||||
tracek("text_foffset=%06llx, data_foffset=%06llx",
|
||||
text_foffset,
|
||||
data_foffset);
|
||||
tracek("text_voffset=%08llx, data_voffset=%08llx",
|
||||
text_voffset,
|
||||
data_voffset);
|
||||
|
||||
status = vm_region_map_object(
|
||||
region,
|
||||
text_base,
|
||||
text_voffset,
|
||||
bsp->bsp_vmo,
|
||||
text_offset,
|
||||
text_foffset,
|
||||
bsp->bsp_trailer.bsp_text_size,
|
||||
VM_PROT_READ | VM_PROT_EXEC | VM_PROT_USER,
|
||||
&text_base);
|
||||
@@ -131,9 +148,9 @@ static kern_status_t map_executable(
|
||||
|
||||
status = vm_region_map_object(
|
||||
region,
|
||||
data_base,
|
||||
data_voffset,
|
||||
data,
|
||||
data_offset,
|
||||
data_foffset,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER,
|
||||
&data_base);
|
||||
@@ -141,21 +158,97 @@ static kern_status_t map_executable(
|
||||
return status;
|
||||
}
|
||||
|
||||
tracek("text_base=%08llx, data_base=%08llx", text_base, data_base);
|
||||
|
||||
*entry = text_base + bsp->bsp_trailer.bsp_exec_entry;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
static kern_status_t map_executable_exec(
|
||||
struct bsp *bsp,
|
||||
struct task *task,
|
||||
virt_addr_t *entry)
|
||||
{
|
||||
kern_status_t status = KERN_OK;
|
||||
struct vm_object *data = vm_object_create(
|
||||
".data",
|
||||
5,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER);
|
||||
if (!data) {
|
||||
return KERN_NO_MEMORY;
|
||||
}
|
||||
|
||||
virt_addr_t text_base = 0, data_base = 0;
|
||||
|
||||
off_t text_foffset = bsp->bsp_trailer.bsp_exec_offset
|
||||
+ bsp->bsp_trailer.bsp_text_faddr;
|
||||
off_t data_foffset = 0;
|
||||
off_t text_voffset = bsp->bsp_trailer.bsp_text_vaddr;
|
||||
off_t data_voffset = bsp->bsp_trailer.bsp_data_vaddr;
|
||||
|
||||
text_voffset -= vm_region_get_base_address(task->t_address_space);
|
||||
data_voffset -= vm_region_get_base_address(task->t_address_space);
|
||||
|
||||
#if 0
|
||||
size_t tmp = 0;
|
||||
status = vm_object_copy(
|
||||
data,
|
||||
0,
|
||||
bsp->bsp_vmo,
|
||||
bsp->bsp_trailer.bsp_data_faddr,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
&tmp);
|
||||
|
||||
tracek("read %zuB of data from executable", tmp);
|
||||
#endif
|
||||
|
||||
tracek("text_foffset=%06llx, data_foffset=%06llx",
|
||||
text_foffset,
|
||||
data_foffset);
|
||||
tracek("text_voffset=%08llx, data_voffset=%08llx",
|
||||
text_voffset,
|
||||
data_voffset);
|
||||
|
||||
status = vm_region_map_object(
|
||||
task->t_address_space,
|
||||
text_voffset,
|
||||
bsp->bsp_vmo,
|
||||
text_foffset,
|
||||
bsp->bsp_trailer.bsp_text_size,
|
||||
VM_PROT_READ | VM_PROT_EXEC | VM_PROT_USER,
|
||||
&text_base);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
status = vm_region_map_object(
|
||||
task->t_address_space,
|
||||
data_voffset,
|
||||
data,
|
||||
data_foffset,
|
||||
bsp->bsp_trailer.bsp_data_size,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER,
|
||||
&data_base);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
tracek("text_base=%08llx, data_base=%08llx", text_base, data_base);
|
||||
|
||||
*entry = bsp->bsp_trailer.bsp_exec_entry;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t bsp_launch_async(struct bsp *bsp, struct task *task)
|
||||
{
|
||||
virt_addr_t stack_buffer;
|
||||
virt_addr_t stack_buffer, bsp_data_base;
|
||||
virt_addr_t entry, sp;
|
||||
kern_status_t status = map_executable(bsp, task, &entry);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
kern_status_t status;
|
||||
|
||||
struct vm_object *user_stack = vm_object_create(
|
||||
"stack",
|
||||
5,
|
||||
BOOTSTRAP_STACK_SIZE,
|
||||
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER);
|
||||
if (!user_stack) {
|
||||
@@ -164,7 +257,7 @@ kern_status_t bsp_launch_async(struct bsp *bsp, struct task *task)
|
||||
|
||||
status = vm_region_map_object(
|
||||
task->t_address_space,
|
||||
VM_REGION_ANY_MAP_ADDRESS,
|
||||
VM_REGION_ANY_OFFSET,
|
||||
user_stack,
|
||||
0,
|
||||
BOOTSTRAP_STACK_SIZE,
|
||||
@@ -175,15 +268,52 @@ kern_status_t bsp_launch_async(struct bsp *bsp, struct task *task)
|
||||
return status;
|
||||
}
|
||||
|
||||
status = vm_region_map_object(
|
||||
task->t_address_space,
|
||||
VM_REGION_ANY_OFFSET,
|
||||
bsp->bsp_vmo,
|
||||
0,
|
||||
bsp->bsp_trailer.bsp_exec_offset,
|
||||
VM_PROT_READ | VM_PROT_USER,
|
||||
&bsp_data_base);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
status = map_executable_exec(bsp, task, &entry);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
#ifdef TRACE
|
||||
vm_region_dump(task->t_address_space, 0);
|
||||
vm_region_dump(task->t_address_space);
|
||||
#endif
|
||||
|
||||
sp = stack_buffer + BOOTSTRAP_STACK_SIZE;
|
||||
tracek("bootstrap: entry=%llx, sp=%llx", entry, sp);
|
||||
|
||||
kern_handle_t self, self_address_space;
|
||||
task_open_handle(task, &task->t_base, 0, &self);
|
||||
task_open_handle(
|
||||
task,
|
||||
&task->t_address_space->vr_base,
|
||||
0,
|
||||
&self_address_space);
|
||||
|
||||
const uintptr_t args[] = {
|
||||
0, // int argc
|
||||
0, // const char ** argv
|
||||
self, // kern_handle_t task
|
||||
self_address_space, // kern_handle_t address_space
|
||||
|
||||
/* this parameter is specific to the bsp bootstrap program, so
|
||||
* that it can access the rest of the bsp image. */
|
||||
bsp_data_base,
|
||||
};
|
||||
const size_t nr_args = sizeof args / sizeof args[0];
|
||||
|
||||
struct thread *init_thread = task_create_thread(task);
|
||||
thread_init_user(init_thread, entry, sp);
|
||||
thread_init_user(init_thread, entry, sp, args, nr_args);
|
||||
schedule_thread_on_cpu(init_thread);
|
||||
|
||||
return KERN_OK;
|
||||
|
||||
379
kernel/channel.c
Normal file
379
kernel/channel.c
Normal file
@@ -0,0 +1,379 @@
|
||||
#include <kernel/channel.h>
|
||||
#include <kernel/msg.h>
|
||||
#include <kernel/port.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/vm-region.h>
|
||||
|
||||
#define CHANNEL_CAST(p) OBJECT_C_CAST(struct channel, c_base, &channel_type, p)
|
||||
|
||||
static struct object_type channel_type = {
|
||||
.ob_name = "channel",
|
||||
.ob_size = sizeof(struct channel),
|
||||
.ob_header_offset = offsetof(struct channel, c_base),
|
||||
};
|
||||
|
||||
BTREE_DEFINE_SIMPLE_GET(struct msg, msgid_t, msg_node, msg_id, get_msg_with_id)
|
||||
|
||||
kern_status_t channel_type_init(void)
|
||||
{
|
||||
return object_type_register(&channel_type);
|
||||
}
|
||||
|
||||
struct channel *channel_cast(struct object *obj)
|
||||
{
|
||||
return CHANNEL_CAST(obj);
|
||||
}
|
||||
|
||||
extern struct channel *channel_create(void)
|
||||
{
|
||||
struct object *channel_object = object_create(&channel_type);
|
||||
if (!channel_object) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct channel *channel = CHANNEL_CAST(channel_object);
|
||||
|
||||
return channel;
|
||||
}
|
||||
|
||||
static bool try_enqueue(struct btree *tree, struct msg *msg)
|
||||
{
|
||||
if (!tree->b_root) {
|
||||
tree->b_root = &msg->msg_node;
|
||||
btree_insert_fixup(tree, &msg->msg_node);
|
||||
return true;
|
||||
}
|
||||
|
||||
struct btree_node *cur = tree->b_root;
|
||||
while (1) {
|
||||
struct msg *cur_node
|
||||
= BTREE_CONTAINER(struct msg, msg_node, cur);
|
||||
struct btree_node *next = NULL;
|
||||
|
||||
if (msg->msg_id > cur_node->msg_id) {
|
||||
next = btree_right(cur);
|
||||
|
||||
if (!next) {
|
||||
btree_put_right(cur, &msg->msg_node);
|
||||
break;
|
||||
}
|
||||
} else if (msg->msg_id < cur_node->msg_id) {
|
||||
next = btree_left(cur);
|
||||
|
||||
if (!next) {
|
||||
btree_put_left(cur, &msg->msg_node);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
cur = next;
|
||||
}
|
||||
|
||||
btree_insert_fixup(tree, &msg->msg_node);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kmsg_reply_error(
|
||||
struct msg *msg,
|
||||
kern_status_t status,
|
||||
unsigned long *lock_flags)
|
||||
{
|
||||
msg->msg_status = KMSG_REPLY_SENT;
|
||||
msg->msg_sender_port->p_status = PORT_READY;
|
||||
msg->msg_result = status;
|
||||
thread_awaken(msg->msg_sender_thread);
|
||||
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
|
||||
}
|
||||
|
||||
static struct msg *get_next_msg(
|
||||
struct channel *channel,
|
||||
unsigned long *lock_flags)
|
||||
{
|
||||
struct btree_node *cur = btree_first(&channel->c_msg);
|
||||
while (cur) {
|
||||
struct msg *msg = BTREE_CONTAINER(struct msg, msg_node, cur);
|
||||
spin_lock_irqsave(&msg->msg_lock, lock_flags);
|
||||
if (msg->msg_status == KMSG_WAIT_RECEIVE) {
|
||||
msg->msg_status = KMSG_WAIT_REPLY;
|
||||
msg->msg_sender_port->p_status = PORT_REPLY_BLOCKED;
|
||||
return msg;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&msg->msg_lock, *lock_flags);
|
||||
cur = btree_next(cur);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern kern_status_t channel_enqueue_msg(
|
||||
struct channel *channel,
|
||||
struct msg *msg)
|
||||
{
|
||||
fill_random(&msg->msg_id, sizeof msg->msg_id);
|
||||
while (!try_enqueue(&channel->c_msg, msg)) {
|
||||
msg->msg_id++;
|
||||
}
|
||||
|
||||
wakeup_one(&channel->c_wq);
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
extern kern_status_t channel_recv_msg(
|
||||
struct channel *channel,
|
||||
kern_msg_t *out_msg,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
struct wait_item waiter;
|
||||
struct thread *self = current_thread();
|
||||
struct msg *msg = NULL;
|
||||
unsigned long msg_lock_flags;
|
||||
|
||||
wait_item_init(&waiter, self);
|
||||
for (;;) {
|
||||
thread_wait_begin(&waiter, &channel->c_wq);
|
||||
msg = get_next_msg(channel, &msg_lock_flags);
|
||||
if (msg) {
|
||||
break;
|
||||
}
|
||||
|
||||
object_unlock_irqrestore(&channel->c_base, *irq_flags);
|
||||
schedule(SCHED_NORMAL);
|
||||
object_lock_irqsave(&channel->c_base, irq_flags);
|
||||
}
|
||||
thread_wait_end(&waiter, &channel->c_wq);
|
||||
|
||||
/* msg is now set to the next message to process */
|
||||
|
||||
struct task *sender = msg->msg_sender_thread->tr_parent;
|
||||
struct task *receiver = self->tr_parent;
|
||||
|
||||
struct vm_region *src = sender->t_address_space,
|
||||
*dst = receiver->t_address_space;
|
||||
|
||||
unsigned long f;
|
||||
vm_region_lock_pair_irqsave(src, dst, &f);
|
||||
|
||||
kern_status_t status = vm_region_memmove_v(
|
||||
dst,
|
||||
0,
|
||||
out_msg->msg_data,
|
||||
out_msg->msg_data_count,
|
||||
src,
|
||||
0,
|
||||
msg->msg_req.msg_data,
|
||||
msg->msg_req.msg_data_count,
|
||||
VM_REGION_COPY_ALL,
|
||||
NULL);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
kmsg_reply_error(msg, status, &msg_lock_flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
struct handle_table *src_table = sender->t_handles,
|
||||
*dst_table = receiver->t_handles;
|
||||
|
||||
spin_lock_pair_irqsave(
|
||||
&sender->t_handles_lock,
|
||||
&receiver->t_handles_lock,
|
||||
&f);
|
||||
status = handle_table_transfer(
|
||||
dst,
|
||||
dst_table,
|
||||
out_msg->msg_handles,
|
||||
out_msg->msg_handles_count,
|
||||
src,
|
||||
src_table,
|
||||
msg->msg_req.msg_handles,
|
||||
msg->msg_req.msg_handles_count);
|
||||
spin_unlock_pair_irqrestore(
|
||||
&sender->t_handles_lock,
|
||||
&receiver->t_handles_lock,
|
||||
f);
|
||||
vm_region_unlock_pair_irqrestore(src, dst, f);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
kmsg_reply_error(msg, status, &msg_lock_flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
out_msg->msg_id = msg->msg_id;
|
||||
out_msg->msg_sender = msg->msg_sender_thread->tr_parent->t_id;
|
||||
out_msg->msg_endpoint = msg->msg_sender_port->p_base.ob_id;
|
||||
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
extern kern_status_t channel_reply_msg(
|
||||
struct channel *channel,
|
||||
msgid_t id,
|
||||
const kern_msg_t *reply,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
unsigned long msg_lock_flags;
|
||||
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
|
||||
if (!msg) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
|
||||
if (msg->msg_status != KMSG_WAIT_REPLY) {
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
struct thread *self = current_thread();
|
||||
/* the task that is about to receive the response */
|
||||
struct task *receiver = msg->msg_sender_thread->tr_parent;
|
||||
/* the task that is about to send the response */
|
||||
struct task *sender = self->tr_parent;
|
||||
|
||||
struct vm_region *src = sender->t_address_space,
|
||||
*dst = receiver->t_address_space;
|
||||
unsigned long f;
|
||||
vm_region_lock_pair_irqsave(src, dst, &f);
|
||||
|
||||
kern_status_t status = vm_region_memmove_v(
|
||||
dst,
|
||||
0,
|
||||
msg->msg_resp.msg_data,
|
||||
msg->msg_resp.msg_data_count,
|
||||
src,
|
||||
0,
|
||||
reply->msg_data,
|
||||
reply->msg_data_count,
|
||||
VM_REGION_COPY_ALL,
|
||||
NULL);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
kmsg_reply_error(msg, status, &msg_lock_flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
struct handle_table *src_table = sender->t_handles,
|
||||
*dst_table = receiver->t_handles;
|
||||
|
||||
spin_lock_pair_irqsave(
|
||||
&sender->t_handles_lock,
|
||||
&receiver->t_handles_lock,
|
||||
&f);
|
||||
status = handle_table_transfer(
|
||||
dst,
|
||||
dst_table,
|
||||
msg->msg_resp.msg_handles,
|
||||
msg->msg_resp.msg_handles_count,
|
||||
src,
|
||||
src_table,
|
||||
reply->msg_handles,
|
||||
reply->msg_handles_count);
|
||||
spin_unlock_pair_irqrestore(
|
||||
&sender->t_handles_lock,
|
||||
&receiver->t_handles_lock,
|
||||
f);
|
||||
vm_region_unlock_pair_irqrestore(src, dst, f);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
kmsg_reply_error(msg, status, &msg_lock_flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
kmsg_reply_error(msg, KERN_OK, &msg_lock_flags);
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
extern kern_status_t channel_read_msg(
|
||||
struct channel *channel,
|
||||
msgid_t id,
|
||||
size_t offset,
|
||||
struct vm_region *dest_region,
|
||||
const kern_iovec_t *dest_iov,
|
||||
size_t dest_iov_count,
|
||||
size_t *nr_read)
|
||||
{
|
||||
unsigned long msg_lock_flags;
|
||||
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
|
||||
if (!msg) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
|
||||
if (msg->msg_status != KMSG_WAIT_REPLY) {
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
struct vm_region *src_region
|
||||
= msg->msg_sender_thread->tr_parent->t_address_space;
|
||||
|
||||
unsigned long f;
|
||||
vm_region_lock_pair_irqsave(src_region, dest_region, &f);
|
||||
|
||||
kern_status_t status = vm_region_memmove_v(
|
||||
dest_region,
|
||||
0,
|
||||
dest_iov,
|
||||
dest_iov_count,
|
||||
src_region,
|
||||
offset,
|
||||
msg->msg_req.msg_data,
|
||||
msg->msg_req.msg_data_count,
|
||||
VM_REGION_COPY_ALL,
|
||||
nr_read);
|
||||
vm_region_unlock_pair_irqrestore(src_region, dest_region, f);
|
||||
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
extern kern_status_t channel_write_msg(
|
||||
struct channel *channel,
|
||||
msgid_t id,
|
||||
size_t offset,
|
||||
struct vm_region *src_region,
|
||||
const kern_iovec_t *src_iov,
|
||||
size_t src_iov_count,
|
||||
size_t *nr_written)
|
||||
{
|
||||
unsigned long msg_lock_flags;
|
||||
struct msg *msg = get_msg_with_id(&channel->c_msg, id);
|
||||
if (!msg) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&msg->msg_lock, &msg_lock_flags);
|
||||
if (msg->msg_status != KMSG_WAIT_REPLY) {
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
struct vm_region *dest_region
|
||||
= msg->msg_sender_thread->tr_parent->t_address_space;
|
||||
|
||||
unsigned long f;
|
||||
vm_region_lock_pair_irqsave(src_region, dest_region, &f);
|
||||
|
||||
kern_status_t status = vm_region_memmove_v(
|
||||
dest_region,
|
||||
offset,
|
||||
msg->msg_resp.msg_data,
|
||||
msg->msg_resp.msg_data_count,
|
||||
src_region,
|
||||
0,
|
||||
src_iov,
|
||||
src_iov_count,
|
||||
VM_REGION_COPY_ALL,
|
||||
nr_written);
|
||||
vm_region_unlock_pair_irqrestore(src_region, dest_region, f);
|
||||
|
||||
spin_unlock_irqrestore(&msg->msg_lock, msg_lock_flags);
|
||||
|
||||
return status;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
#include <mango/clock.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/compiler.h>
|
||||
#include <kernel/clock.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/compiler.h>
|
||||
|
||||
static clock_ticks_t ticks_per_sec = 0;
|
||||
volatile clock_ticks_t clock_ticks = 0;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#include <mango/console.h>
|
||||
#include <mango/queue.h>
|
||||
#include <mango/locks.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <kernel/console.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/libc/string.h>
|
||||
|
||||
static struct queue consoles;
|
||||
static spin_lock_t consoles_lock = SPIN_LOCK_INIT;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/percpu.h>
|
||||
#include <mango/bitmap.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/percpu.h>
|
||||
#include <kernel/bitmap.h>
|
||||
|
||||
DECLARE_BITMAP(cpu_available, CPU_MAX);
|
||||
DECLARE_BITMAP(cpu_online, CPU_MAX);
|
||||
|
||||
181
kernel/handle.c
181
kernel/handle.c
@@ -1,10 +1,15 @@
|
||||
#include <mango/handle.h>
|
||||
#include <mango/libc/string.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/vm.h>
|
||||
#include <kernel/handle.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/vm-region.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <mango/types.h>
|
||||
|
||||
/* depth=3 gives a maximum of ~66.6 million handles */
|
||||
#define MAX_TABLE_DEPTH 3
|
||||
#define MAX_TABLE_DEPTH 3
|
||||
#define RESERVED_HANDLES 64
|
||||
|
||||
static struct vm_cache handle_table_cache = {
|
||||
.c_name = "handle_table",
|
||||
@@ -36,7 +41,6 @@ static kern_status_t decode_handle_indices(
|
||||
kern_handle_t handle,
|
||||
unsigned int indices[MAX_TABLE_DEPTH])
|
||||
{
|
||||
handle >>= 2;
|
||||
for (int i = 0; i < MAX_TABLE_DEPTH; i++) {
|
||||
unsigned int div = (i > 0 ? REFS_PER_TABLE : HANDLES_PER_TABLE);
|
||||
|
||||
@@ -61,8 +65,6 @@ static kern_status_t encode_handle_indices(
|
||||
mul *= REFS_PER_TABLE;
|
||||
}
|
||||
|
||||
handle <<= 2;
|
||||
|
||||
*out_handle = handle;
|
||||
return KERN_OK;
|
||||
}
|
||||
@@ -74,6 +76,7 @@ kern_status_t handle_table_alloc_handle(
|
||||
{
|
||||
int i;
|
||||
unsigned int indices[MAX_TABLE_DEPTH] = {0};
|
||||
static const unsigned int reserved_indices[MAX_TABLE_DEPTH] = {0};
|
||||
|
||||
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
||||
unsigned int next_index = bitmap_lowest_clear(
|
||||
@@ -99,6 +102,10 @@ kern_status_t handle_table_alloc_handle(
|
||||
tab = next;
|
||||
}
|
||||
|
||||
if (memcmp(indices, reserved_indices, sizeof indices) == 0) {
|
||||
bitmap_fill(tab->t_handles.t_handle_map, RESERVED_HANDLES);
|
||||
}
|
||||
|
||||
unsigned int handle_index = bitmap_lowest_clear(
|
||||
tab->t_handles.t_handle_map,
|
||||
HANDLES_PER_TABLE);
|
||||
@@ -117,11 +124,13 @@ kern_status_t handle_table_alloc_handle(
|
||||
return encode_handle_indices(indices, out_handle);
|
||||
}
|
||||
|
||||
void handle_table_free_handle(struct handle_table *tab, kern_handle_t handle)
|
||||
kern_status_t handle_table_free_handle(
|
||||
struct handle_table *tab,
|
||||
kern_handle_t handle)
|
||||
{
|
||||
unsigned int indices[MAX_TABLE_DEPTH];
|
||||
if (decode_handle_indices(handle, indices) != KERN_OK) {
|
||||
return;
|
||||
return KERN_NO_ENTRY;
|
||||
}
|
||||
|
||||
int i;
|
||||
@@ -129,7 +138,7 @@ void handle_table_free_handle(struct handle_table *tab, kern_handle_t handle)
|
||||
struct handle_table *next
|
||||
= tab->t_subtables.t_subtable_list[indices[i]];
|
||||
if (!next) {
|
||||
return;
|
||||
return KERN_NO_ENTRY;
|
||||
}
|
||||
|
||||
bitmap_clear(tab->t_subtables.t_subtable_map, indices[i]);
|
||||
@@ -137,6 +146,10 @@ void handle_table_free_handle(struct handle_table *tab, kern_handle_t handle)
|
||||
}
|
||||
|
||||
unsigned int handle_index = indices[i];
|
||||
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
|
||||
return KERN_NO_ENTRY;
|
||||
}
|
||||
|
||||
bitmap_clear(tab->t_handles.t_handle_map, handle_index);
|
||||
struct handle *handle_entry
|
||||
= &tab->t_handles.t_handle_list[handle_index];
|
||||
@@ -146,6 +159,7 @@ void handle_table_free_handle(struct handle_table *tab, kern_handle_t handle)
|
||||
}
|
||||
|
||||
memset(handle_entry, 0x0, sizeof *handle_entry);
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
struct handle *handle_table_get_handle(
|
||||
@@ -173,5 +187,150 @@ struct handle *handle_table_get_handle(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!tab->t_handles.t_handle_list[handle_index].h_object) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &tab->t_handles.t_handle_list[handle_index];
|
||||
}
|
||||
|
||||
kern_status_t handle_table_transfer(
|
||||
struct vm_region *dst_region,
|
||||
struct handle_table *dst,
|
||||
kern_msg_handle_t *dst_handles,
|
||||
size_t dst_handles_max,
|
||||
struct vm_region *src_region,
|
||||
struct handle_table *src,
|
||||
kern_msg_handle_t *src_handles,
|
||||
size_t src_handles_count)
|
||||
{
|
||||
kern_status_t status = KERN_OK;
|
||||
size_t to_transfer = MIN(dst_handles_max, src_handles_count);
|
||||
|
||||
size_t i = 0;
|
||||
for (size_t i = 0; i < to_transfer; i++) {
|
||||
kern_msg_handle_t src_handle = {0}, dst_handle = {0};
|
||||
virt_addr_t src_handle_addr
|
||||
= (virt_addr_t)src_handles + (i * sizeof src_handle);
|
||||
virt_addr_t dst_handle_addr
|
||||
= (virt_addr_t)dst_handles + (i * sizeof dst_handle);
|
||||
status = vm_region_read_kernel(
|
||||
src_region,
|
||||
src_handle_addr,
|
||||
sizeof src_handle,
|
||||
&src_handle,
|
||||
NULL);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
src_handle.hnd_result = KERN_OK;
|
||||
vm_region_write_kernel(
|
||||
src_region,
|
||||
src_handle_addr,
|
||||
sizeof src_handle,
|
||||
&src_handle,
|
||||
NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
struct handle *src_entry
|
||||
= handle_table_get_handle(src, src_handle.hnd_value);
|
||||
struct handle *dst_entry = NULL;
|
||||
kern_handle_t dst_value = KERN_HANDLE_INVALID;
|
||||
|
||||
if (!src_entry) {
|
||||
status = KERN_INVALID_ARGUMENT;
|
||||
src_handle.hnd_result = KERN_OK;
|
||||
vm_region_write_kernel(
|
||||
src_region,
|
||||
src_handle_addr,
|
||||
sizeof src_handle,
|
||||
&src_handle,
|
||||
NULL);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (src_handle.hnd_mode) {
|
||||
case KERN_MSG_HANDLE_IGNORE:
|
||||
break;
|
||||
case KERN_MSG_HANDLE_MOVE:
|
||||
status = handle_table_alloc_handle(
|
||||
dst,
|
||||
&dst_entry,
|
||||
&dst_value);
|
||||
if (status != KERN_OK) {
|
||||
break;
|
||||
}
|
||||
|
||||
dst_entry->h_object = src_entry->h_object;
|
||||
dst_entry->h_flags = src_entry->h_flags;
|
||||
object_add_handle(dst_entry->h_object);
|
||||
|
||||
handle_table_free_handle(src, src_handles[i].hnd_value);
|
||||
|
||||
dst_handle.hnd_mode = src_handles[i].hnd_mode;
|
||||
dst_handle.hnd_value = dst_value;
|
||||
dst_handle.hnd_result = KERN_OK;
|
||||
break;
|
||||
case KERN_MSG_HANDLE_COPY:
|
||||
status = handle_table_alloc_handle(
|
||||
dst,
|
||||
&dst_entry,
|
||||
&dst_value);
|
||||
if (status != KERN_OK) {
|
||||
break;
|
||||
}
|
||||
|
||||
dst_entry->h_object = src_entry->h_object;
|
||||
dst_entry->h_flags = src_entry->h_flags;
|
||||
object_add_handle(dst_entry->h_object);
|
||||
|
||||
dst_handle.hnd_mode = src_handles[i].hnd_mode;
|
||||
dst_handle.hnd_value = dst_value;
|
||||
dst_handle.hnd_result = KERN_OK;
|
||||
break;
|
||||
default:
|
||||
status = KERN_INVALID_ARGUMENT;
|
||||
break;
|
||||
}
|
||||
|
||||
src_handle.hnd_result = status;
|
||||
|
||||
vm_region_write_kernel(
|
||||
src_region,
|
||||
src_handle_addr,
|
||||
sizeof src_handle,
|
||||
&src_handle,
|
||||
NULL);
|
||||
vm_region_write_kernel(
|
||||
dst_region,
|
||||
dst_handle_addr,
|
||||
sizeof dst_handle,
|
||||
&dst_handle,
|
||||
NULL);
|
||||
}
|
||||
|
||||
for (; i < src_handles_count; i++) {
|
||||
kern_msg_handle_t handle = {0};
|
||||
virt_addr_t handle_addr
|
||||
= (virt_addr_t)src_handles + (i * sizeof handle);
|
||||
vm_region_read_kernel(
|
||||
src_region,
|
||||
handle_addr,
|
||||
sizeof handle,
|
||||
&handle,
|
||||
NULL);
|
||||
|
||||
if (handle.hnd_mode != KERN_MSG_HANDLE_MOVE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
struct handle *src_entry
|
||||
= handle_table_get_handle(src, handle.hnd_value);
|
||||
if (src_entry) {
|
||||
object_remove_handle(src_entry->h_object);
|
||||
handle_table_free_handle(src, handle.hnd_value);
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
121
kernel/iovec.c
Normal file
121
kernel/iovec.c
Normal file
@@ -0,0 +1,121 @@
|
||||
#include <kernel/iovec.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/vm-region.h>
|
||||
|
||||
static bool read_iovec(
|
||||
struct iovec_iterator *it,
|
||||
size_t index,
|
||||
kern_iovec_t *out)
|
||||
{
|
||||
if (index >= it->it_nr_vecs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!it->it_region) {
|
||||
memcpy(out, &it->it_vecs[index], sizeof *out);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t nr_read = 0;
|
||||
kern_status_t status = vm_region_read_kernel(
|
||||
it->it_region,
|
||||
(virt_addr_t)it->it_vecs + (index * sizeof(kern_iovec_t)),
|
||||
sizeof(kern_iovec_t),
|
||||
out,
|
||||
&nr_read);
|
||||
|
||||
return (status == KERN_OK && nr_read != sizeof(kern_iovec_t));
|
||||
}
|
||||
|
||||
void iovec_iterator_begin_user(
|
||||
struct iovec_iterator *it,
|
||||
struct vm_region *region,
|
||||
const kern_iovec_t *vecs,
|
||||
size_t nr_vecs)
|
||||
{
|
||||
memset(it, 0x0, sizeof *it);
|
||||
it->it_region = region;
|
||||
it->it_vecs = vecs;
|
||||
it->it_nr_vecs = nr_vecs;
|
||||
|
||||
kern_iovec_t iov;
|
||||
|
||||
while (it->it_vec_ptr < nr_vecs) {
|
||||
read_iovec(it, it->it_vec_ptr, &iov);
|
||||
if (iov.io_len > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
it->it_vec_ptr++;
|
||||
}
|
||||
|
||||
if (it->it_vec_ptr >= nr_vecs) {
|
||||
return;
|
||||
}
|
||||
|
||||
it->it_base = iov.io_base;
|
||||
it->it_len = iov.io_len;
|
||||
}
|
||||
|
||||
void iovec_iterator_begin(
|
||||
struct iovec_iterator *it,
|
||||
const kern_iovec_t *vecs,
|
||||
size_t nr_vecs)
|
||||
{
|
||||
memset(it, 0x0, sizeof *it);
|
||||
it->it_vecs = vecs;
|
||||
it->it_nr_vecs = nr_vecs;
|
||||
|
||||
while (it->it_vec_ptr < nr_vecs) {
|
||||
if (vecs[it->it_vec_ptr].io_len > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
it->it_vec_ptr++;
|
||||
}
|
||||
|
||||
if (it->it_vec_ptr >= nr_vecs) {
|
||||
it->it_len = 0;
|
||||
it->it_base = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
it->it_base = vecs[it->it_vec_ptr].io_base;
|
||||
it->it_len = vecs[it->it_vec_ptr].io_len;
|
||||
}
|
||||
|
||||
void iovec_iterator_seek(struct iovec_iterator *it, size_t nr_bytes)
|
||||
{
|
||||
while (nr_bytes > 0) {
|
||||
size_t to_seek = MIN(nr_bytes, it->it_len);
|
||||
|
||||
if (to_seek < it->it_len) {
|
||||
it->it_len -= to_seek;
|
||||
it->it_base += to_seek;
|
||||
break;
|
||||
}
|
||||
|
||||
nr_bytes -= to_seek;
|
||||
kern_iovec_t iov;
|
||||
|
||||
it->it_vec_ptr++;
|
||||
while (it->it_vec_ptr < it->it_nr_vecs) {
|
||||
read_iovec(it, it->it_vec_ptr, &iov);
|
||||
if (iov.io_len > 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
it->it_vec_ptr++;
|
||||
}
|
||||
|
||||
if (it->it_vec_ptr >= it->it_nr_vecs) {
|
||||
it->it_len = 0;
|
||||
it->it_base = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
it->it_base = iov.io_base;
|
||||
it->it_len = iov.io_len;
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,26 @@
|
||||
#include <mango/locks.h>
|
||||
#include <mango/object.h>
|
||||
#include <mango/queue.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/object.h>
|
||||
#include <kernel/queue.h>
|
||||
|
||||
#define HAS_OP(obj, opname) ((obj)->ob_type->ob_ops.opname)
|
||||
|
||||
static struct queue object_types;
|
||||
static spin_lock_t object_types_lock = SPIN_LOCK_INIT;
|
||||
|
||||
static koid_t koid_alloc(void)
|
||||
{
|
||||
static koid_t counter = 0;
|
||||
static spin_lock_t lock = SPIN_LOCK_INIT;
|
||||
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&lock, &flags);
|
||||
koid_t result = counter;
|
||||
counter++;
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
kern_status_t object_bootstrap(void)
|
||||
{
|
||||
return KERN_OK;
|
||||
@@ -50,11 +64,10 @@ struct object *object_create(struct object_type *type)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(obj_buf, 0x00, type->ob_size);
|
||||
|
||||
struct object *obj = (struct object *)((unsigned char *)obj_buf
|
||||
+ type->ob_header_offset);
|
||||
|
||||
obj->ob_id = koid_alloc();
|
||||
obj->ob_type = type;
|
||||
obj->ob_lock = SPIN_LOCK_INIT;
|
||||
obj->ob_magic = OBJECT_MAGIC;
|
||||
@@ -70,6 +83,15 @@ struct object *object_ref(struct object *obj)
|
||||
return obj;
|
||||
}
|
||||
|
||||
static void __cleanup(struct object *obj, struct queue *queue)
|
||||
{
|
||||
if (HAS_OP(obj, destroy)) {
|
||||
obj->ob_type->ob_ops.destroy(obj, queue);
|
||||
}
|
||||
|
||||
vm_cache_free(&obj->ob_type->ob_cache, obj);
|
||||
}
|
||||
|
||||
static void object_cleanup(struct object *obj, unsigned long flags)
|
||||
{
|
||||
if (obj->ob_refcount > 0 || obj->ob_handles > 0) {
|
||||
@@ -77,11 +99,30 @@ static void object_cleanup(struct object *obj, unsigned long flags)
|
||||
return;
|
||||
}
|
||||
|
||||
if (HAS_OP(obj, destroy)) {
|
||||
obj->ob_type->ob_ops.destroy(obj);
|
||||
struct queue queue = QUEUE_INIT;
|
||||
__cleanup(obj, &queue);
|
||||
|
||||
if (!HAS_OP(obj, destroy_recurse)) {
|
||||
return;
|
||||
}
|
||||
|
||||
vm_cache_free(&obj->ob_type->ob_cache, obj);
|
||||
while (!queue_empty(&queue)) {
|
||||
struct queue_entry *entry = queue_pop_front(&queue);
|
||||
struct object *child = NULL;
|
||||
obj->ob_type->ob_ops.destroy_recurse(entry, &child);
|
||||
if (!child) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (child->ob_refcount > 1) {
|
||||
child->ob_refcount--;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (child->ob_refcount == 0 && child->ob_handles == 0) {
|
||||
__cleanup(child, &queue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void object_unref(struct object *obj)
|
||||
@@ -137,6 +178,38 @@ void object_unlock_irqrestore(struct object *obj, unsigned long flags)
|
||||
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
||||
}
|
||||
|
||||
void object_lock_pair_irqsave(
|
||||
struct object *a,
|
||||
struct object *b,
|
||||
unsigned long *flags)
|
||||
{
|
||||
if (a == b) {
|
||||
object_lock_irqsave(a, flags);
|
||||
} else if (a < b) {
|
||||
object_lock_irqsave(a, flags);
|
||||
object_lock(b);
|
||||
} else {
|
||||
object_lock_irqsave(b, flags);
|
||||
object_lock(a);
|
||||
}
|
||||
}
|
||||
|
||||
void object_unlock_pair_irqrestore(
|
||||
struct object *a,
|
||||
struct object *b,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (a == b) {
|
||||
object_unlock_irqrestore(a, flags);
|
||||
} else if (a < b) {
|
||||
object_unlock(b);
|
||||
object_unlock_irqrestore(a, flags);
|
||||
} else {
|
||||
object_unlock(a);
|
||||
object_unlock_irqrestore(b, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void *object_data(struct object *obj)
|
||||
{
|
||||
return (char *)obj + sizeof *obj;
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <mango/machine/panic.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/sched.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <kernel/machine/panic.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/sched.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
static int has_panicked = 0;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include <mango/percpu.h>
|
||||
#include <mango/cpu.h>
|
||||
#include <mango/vm.h>
|
||||
#include <kernel/percpu.h>
|
||||
#include <kernel/cpu.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
|
||||
|
||||
111
kernel/port.c
Normal file
111
kernel/port.c
Normal file
@@ -0,0 +1,111 @@
|
||||
#include <kernel/channel.h>
|
||||
#include <kernel/port.h>
|
||||
#include <kernel/util.h>
|
||||
|
||||
#define PORT_CAST(p) OBJECT_C_CAST(struct port, p_base, &port_type, p)
|
||||
|
||||
static struct object_type port_type = {
|
||||
.ob_name = "port",
|
||||
.ob_size = sizeof(struct port),
|
||||
.ob_header_offset = offsetof(struct port, p_base),
|
||||
};
|
||||
|
||||
kern_status_t port_type_init(void)
|
||||
{
|
||||
return object_type_register(&port_type);
|
||||
}
|
||||
|
||||
struct port *port_cast(struct object *obj)
|
||||
{
|
||||
return PORT_CAST(obj);
|
||||
}
|
||||
|
||||
static void wait_for_reply(struct msg *msg, unsigned long *lock_flags)
|
||||
{
|
||||
struct wait_item waiter;
|
||||
struct thread *self = current_thread();
|
||||
|
||||
wait_item_init(&waiter, self);
|
||||
for (;;) {
|
||||
self->tr_state = THREAD_SLEEPING;
|
||||
if (msg->msg_status == KMSG_REPLY_SENT) {
|
||||
break;
|
||||
}
|
||||
|
||||
port_unlock_irqrestore(msg->msg_sender_port, *lock_flags);
|
||||
schedule(SCHED_NORMAL);
|
||||
port_lock_irqsave(msg->msg_sender_port, lock_flags);
|
||||
}
|
||||
|
||||
self->tr_state = THREAD_READY;
|
||||
}
|
||||
|
||||
struct port *port_create(void)
|
||||
{
|
||||
struct object *port_object = object_create(&port_type);
|
||||
if (!port_object) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct port *port = PORT_CAST(port_object);
|
||||
|
||||
port->p_status = PORT_OFFLINE;
|
||||
|
||||
return port;
|
||||
}
|
||||
|
||||
kern_status_t port_connect(struct port *port, struct channel *remote)
|
||||
{
|
||||
if (port->p_status != PORT_OFFLINE) {
|
||||
return KERN_BAD_STATE;
|
||||
}
|
||||
|
||||
port->p_remote = remote;
|
||||
port->p_status = PORT_READY;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t port_disconnect(struct port *port)
|
||||
{
|
||||
if (port->p_status != PORT_READY) {
|
||||
return KERN_BAD_STATE;
|
||||
}
|
||||
|
||||
port->p_remote = NULL;
|
||||
port->p_status = PORT_OFFLINE;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t port_send_msg(
|
||||
struct port *port,
|
||||
const kern_msg_t *in_msg,
|
||||
kern_msg_t *out_reply,
|
||||
unsigned long *lock_flags)
|
||||
{
|
||||
if (port->p_status != PORT_READY) {
|
||||
return KERN_BAD_STATE;
|
||||
}
|
||||
|
||||
struct thread *self = current_thread();
|
||||
struct msg *msg = &self->tr_msg;
|
||||
memset(msg, 0x0, sizeof *msg);
|
||||
msg->msg_status = KMSG_WAIT_RECEIVE;
|
||||
msg->msg_sender_thread = self;
|
||||
msg->msg_sender_port = port;
|
||||
memcpy(&msg->msg_req, in_msg, sizeof msg->msg_req);
|
||||
memcpy(&msg->msg_resp, out_reply, sizeof msg->msg_req);
|
||||
|
||||
unsigned long flags;
|
||||
channel_lock_irqsave(port->p_remote, &flags);
|
||||
port->p_status = PORT_SEND_BLOCKED;
|
||||
channel_enqueue_msg(port->p_remote, msg);
|
||||
channel_unlock_irqrestore(port->p_remote, flags);
|
||||
|
||||
wait_for_reply(msg, lock_flags);
|
||||
|
||||
channel_lock_irqsave(port->p_remote, &flags);
|
||||
btree_delete(&port->p_remote->c_msg, &msg->msg_node);
|
||||
channel_unlock_irqrestore(port->p_remote, flags);
|
||||
|
||||
return msg->msg_result;
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
#include <mango/printk.h>
|
||||
#include <mango/locks.h>
|
||||
#include <mango/console.h>
|
||||
#include <mango/libc/stdio.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/console.h>
|
||||
#include <kernel/libc/stdio.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#define LOG_BUFFER_SIZE 0x40000
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
#include <mango/status.h>
|
||||
#include <mango/types.h>
|
||||
|
||||
#define ERROR_STRING_CASE(code) \
|
||||
case code: \
|
||||
return #code
|
||||
#define ERROR_STRING_CASE(code) \
|
||||
case code: \
|
||||
return #code
|
||||
|
||||
const char *kern_status_string(kern_status_t status)
|
||||
{
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#include <mango/machine/cpu.h>
|
||||
#include <mango/printk.h>
|
||||
#include <mango/syscall.h>
|
||||
|
||||
kern_status_t sys_exit(int status)
|
||||
{
|
||||
printk("sys_exit(%d)", status);
|
||||
while (1) {
|
||||
ml_cpu_pause();
|
||||
}
|
||||
return KERN_UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
kern_status_t sys_vm_object_create(
|
||||
const char *name,
|
||||
size_t len,
|
||||
enum vm_prot prot,
|
||||
kern_handle_t *out_handle)
|
||||
{
|
||||
printk("sys_vm_object_create()");
|
||||
return KERN_UNIMPLEMENTED;
|
||||
}
|
||||
|
||||
#define SYSCALL_TABLE_ENTRY(id, p) [SYS_##id] = (virt_addr_t)(sys_##p)
|
||||
|
||||
static const virt_addr_t syscall_table[] = {
|
||||
SYSCALL_TABLE_ENTRY(EXIT, exit),
|
||||
SYSCALL_TABLE_ENTRY(VM_OBJECT_CREATE, vm_object_create),
|
||||
};
|
||||
static const size_t syscall_table_count
|
||||
= sizeof syscall_table / sizeof syscall_table[0];
|
||||
|
||||
virt_addr_t syscall_get_func(unsigned int sysid)
|
||||
{
|
||||
if (sysid >= syscall_table_count) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return syscall_table[sysid];
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_LIBC_TYPES_H_
|
||||
#define MANGO_LIBC_TYPES_H_
|
||||
#ifndef KERNEL_LIBC_TYPES_H_
|
||||
#define KERNEL_LIBC_TYPES_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_STDIO_H_
|
||||
#define MANGO_STDIO_H_
|
||||
#ifndef KERNEL_STDIO_H_
|
||||
#define KERNEL_STDIO_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stddef.h>
|
||||
@@ -1,5 +1,5 @@
|
||||
#ifndef MANGO_LIBC_STRING_H_
|
||||
#define MANGO_LIBC_STRING_H_
|
||||
#ifndef KERNEL_LIBC_STRING_H_
|
||||
#define KERNEL_LIBC_STRING_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
1616
libc/stdio/printf.c
1616
libc/stdio/printf.c
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user