Compare commits

..

115 Commits

Author SHA1 Message Date
de520cdd2d libmango: types: add macro to define a kern_msg_handle_t 2026-03-10 19:08:49 +00:00
e84ed6057d channel: fix incorrect offset used in channel_write_msg 2026-03-10 19:08:20 +00:00
1d4cb882a8 libmango: types: add ssize_t definition 2026-03-06 20:12:32 +00:00
18b281debf kernel: bsp: add support for static bootstrap executables 2026-03-06 20:12:12 +00:00
09d292fd09 kernel: msg: include details about who sent a message 2026-03-05 21:04:02 +00:00
36c5ac7837 kernel: re-implement sending handles via port messages 2026-03-01 19:10:01 +00:00
b1bdb89ca4 vm: region: add a function to write data from a kernel buffer to a vm-region 2026-03-01 19:09:30 +00:00
f8a7a4285f syscall: msg: validate iovec array itself as well as the buffers it points to 2026-02-26 20:55:17 +00:00
f9bf4c618a syscall: log: add task id to log output 2026-02-26 20:54:14 +00:00
e4de3af00d kernel: remove support for sending kernel handles via port/channel 2026-02-26 20:53:47 +00:00
b59d0d8948 syscall: msg: locking of vm-region is now handled by channel_read_msg 2026-02-26 19:43:07 +00:00
8cc877c251 kernel: port: dequeue kmsg struct once reply is received 2026-02-26 19:42:29 +00:00
2073cad97b kernel: fix channel locking and status update issues 2026-02-26 19:42:12 +00:00
eb8758bc5e vm: region: fix some cases where regions weren't being unlocked after use. 2026-02-26 19:41:40 +00:00
1cdde0d32e kernel: add functions for safely (un)locking pairs of objects
when locking a pair of objects, the object with the lesser memory address
is always locked first. the pair is unlocked in the opposite order.
2026-02-26 19:38:49 +00:00
1c7c90ef39 kernel: channel: implement channel_read_msg and msg_read 2026-02-23 21:52:03 +00:00
11c741bd68 libmango: add nr_read output param to msg_read 2026-02-23 21:51:26 +00:00
34bd6e479c vm: region: add nr_bytes_moved output param to memmove_v 2026-02-23 21:50:35 +00:00
5f0654430d syscall: add task_self, task_get_address_space, and vm_region_kill 2026-02-23 18:43:49 +00:00
fd1bc0ad5f kernel: check object refcount before performing a recursive deletion 2026-02-23 18:43:11 +00:00
b1ffdcf2bc vm: region: improve locking rules and semantics; implement region killing
the rules around acquiring locks have been strictly defined and
implemented, and general lock usage has been improved, to fix and
prevent several different issues.

a vm-region is now destroyed in two separate steps:
 1. it is "killed": all mappings are unmapped and deleted, the
    region is removed from its parent, and the region and all of
    its sub-regions are marked as "dead", preventing any
    further actions from being performed with the region.
 2. it is "destroyed": the vm-region object is de-allocated when
    the last reference/handle is closed. the references that this
    region holds to any sub-regions are also released, meaning
    these regions may also be de-allocated too.
2026-02-23 18:42:47 +00:00
5690dd5b9c kernel: add support for recursive object destruction (without recursion)
this system makes it possible for an object that forms part of a tree
to be safely recursively destroyed without using recursion.
2026-02-23 18:34:12 +00:00
37ae7aeef7 kernel: implement globally-unique object ids 2026-02-23 18:32:11 +00:00
dbe117135b x86_64: implement proper user/kernel %gs base switching
the %gs base address is now always set to the current cpu block while
in kernel-mode, and is switched back to the userspace %gs base
when returning to user-mode.
2026-02-23 18:26:21 +00:00
273557fa9f x86_64: lock task address space while performing a demand page-map 2026-02-23 18:25:49 +00:00
fe107fbad3 kernel: locks: add spin lock/unlock function that don't change interrupt state 2026-02-23 18:24:49 +00:00
b2d04c5983 vm: object: zero-initialise pages allocated for vm-object 2026-02-21 23:19:49 +00:00
6c2ca888ee x86_64: remove kernel image post-build ELF32 patch
this patch must now be done by the wider OS build system, to avoid
interference with any bootloaders that don't support this kind of
patching (e.g GRUB i386-pc)
2026-02-21 23:18:22 +00:00
044b3688aa vm: cache: all allocations are now zero-initialised 2026-02-21 23:18:09 +00:00
77936e3511 kernel: implement sending, receiving, and replying to message via port/channel 2026-02-21 11:32:57 +00:00
08c78bd6e7 vm: object: add vm_object_copy syscall trace output 2026-02-21 11:30:44 +00:00
2537ca46de libmango: add macros for easily defining msg and iovec variables 2026-02-21 11:29:25 +00:00
3190035086 libmango: add temporary formatted log function 2026-02-21 11:28:58 +00:00
7f049293f4 vm: memblock: add memblock_dump to header 2026-02-21 11:27:28 +00:00
9b2c2f6b29 x86_64: start the kernel bootstrap heap above 16MiB
this will keep the memory area below 16MiB free for DMA memory allocations.
2026-02-21 11:24:36 +00:00
6e39dd45a4 sched: only disable/enable interrupts if schedule() is called from non-IRQ context 2026-02-21 11:23:43 +00:00
855440f584 vm: add trace output 2026-02-21 11:22:51 +00:00
e1e025ab6a vm: region: memmove_v() now supports iovec arrays stored in userspace 2026-02-21 11:20:09 +00:00
0680b73461 kernel: iovec: implement iterating through an iovec list stored in userspace 2026-02-21 11:17:16 +00:00
aa0933be10 vm: region: implement reading from a user-space vm-region into a kernel buffer 2026-02-21 11:16:11 +00:00
8b188a0ac4 vm: region: fix iterator using wrong buffer offset when seek exceeds current buffer size 2026-02-21 11:07:53 +00:00
ed25ee6761 vm: object: fix iterator using wrong buffer offset when seek exceeds current buffer size 2026-02-21 11:07:12 +00:00
0bae39e550 vm: zone: ensure memblock region bounds are page-aligned while creating zone blocks 2026-02-21 11:01:58 +00:00
9a90662eaa libmango: add userspace syscall call-gates 2026-02-19 19:22:06 +00:00
1d4fd4f586 syscall: add lots of syscalls 2026-02-19 19:21:50 +00:00
dbc7b8fc59 kernel: libc: add headers 2026-02-19 19:21:15 +00:00
aa9439c392 kernel: add channel/port ipc mechanism 2026-02-19 19:21:04 +00:00
8e072945d8 kernel: add functions for moving sets of handles between tasks 2026-02-19 19:20:39 +00:00
821246bc16 kernel: add functions for iterating through an array of iovecs 2026-02-19 19:19:52 +00:00
fc8cdf62d3 bsp: adjust bsp executable mapping 2026-02-19 19:18:31 +00:00
b2dbb88778 thread: move thread awaken functionality to a dedicated function 2026-02-19 19:17:38 +00:00
9424e7bcd6 thread: fix thread object data corruption 2026-02-19 19:17:18 +00:00
4c35723959 sched: add helper functions for opening and resolving handles for a task 2026-02-19 19:16:59 +00:00
2b7e5368c9 vm: implement copying data between different vm-regions 2026-02-19 19:15:15 +00:00
85006411bd kernel: add header files 2026-02-19 19:13:44 +00:00
f2e128c57e handle: re-arrange handle space layout
the lowest 2 bits of handle values are no longer unused, and 0 is
now a valid handle value.

the first 64 handles are now reserved, and will not be automatically
allocated by the kernel. however, they are still valid handles, and
other handles can be moved to this area using an as-yet-unwritten
function. this is to allow support for standard POSIX file descriptors,
which require the values 0, 1, and 2.
2026-02-19 19:11:11 +00:00
c6e1ba21dd vm: implement direct read/write/copy access to vm-object memory 2026-02-19 19:09:38 +00:00
2f413c603d kernel: all string parameters now take a corresponding length parameter 2026-02-19 19:08:17 +00:00
291a5f677e sched: implement passing arguments to user-mode threads 2026-02-19 19:05:53 +00:00
b188573eea x86_64: pmap: change pmap_remove* pointer args to virt_addr_t 2026-02-19 19:02:28 +00:00
c69aed254f x86_64: enable interrupts during syscall execution 2026-02-19 19:00:04 +00:00
44c2904c11 x86_64: re-arrange user and kernel GDT entries for compatibility with syscall instruction 2026-02-19 18:59:37 +00:00
f89e3cb12c kernel: adjust formatting 2026-02-19 18:57:53 +00:00
6019c9307d kernel: separate headers into kernel and user headers
all kernel headers have been moved from include/mango to include/kernel
and include definitions that are only relevant to kernel-space.

any definitions that are relevant to both kernel- and user-space
(i.e. type definitions, syscall IDs) have been moved to
include/mango within libmango.
2026-02-19 18:54:48 +00:00
e3dd48a0fa build: remove per-subdirectory log message 2026-02-08 16:17:47 +00:00
9f7b7bdd2d kernel: refactor syscall dispatch system 2026-02-08 16:17:11 +00:00
c424e8127e kernel: bsp: update vm-region api usage 2026-02-08 15:52:04 +00:00
fb7d7635c2 vm: region: refactor to use offsets rather than absolute addresses 2026-02-08 15:51:51 +00:00
409725f9d4 kernel: implementing mapping and execution of bsp executable 2026-02-08 13:13:03 +00:00
1c74291b99 kernel: add a temporary syscall dispatch system 2026-02-08 13:12:24 +00:00
5d28955dc6 vm: update vm-page documentation 2026-02-08 13:11:41 +00:00
ee82097017 sched: implement user-mode task and thread creation 2026-02-08 13:11:17 +00:00
d2f303680d sched: add root vm-region and handle table to struct task 2026-02-08 13:10:54 +00:00
27bed1a3d3 sched: all kernel-mode tasks now have negative task ids 2026-02-08 13:09:29 +00:00
18a5325fa7 sched: add PID_MAX definition 2026-02-08 13:07:14 +00:00
7eaad64969 pmap: declare fault handler function and flags 2026-02-08 13:06:19 +00:00
343689764f x86_64: irq: route user-mode page faults to pmap_handle_fault 2026-02-08 13:05:29 +00:00
5f2ad06fb0 x86_64: all intermediate page table entries now have PTE_USR set
this allows user-accessible page mappings to be created. for kernel memory
mappings, PTE_USR will only be cleared on the lowest-level table entry.
2026-02-08 13:03:41 +00:00
67b3be9732 x86_64: add pmap_handle_fault to route user-mode page faults to vm-region to resolve 2026-02-08 13:03:28 +00:00
883b5ac9e2 vm: add vm-region to manage userspace virtual memory address spaces
vm-region supports creating nested regions of virtual memory, each with their
own memory protection restrictions.

vm-objects can be mapped into a vm-region, making the underlying memory
accessible. all mappings are lazy: page tables are not updated until the
mapped memory is accessed.
2026-02-08 12:59:08 +00:00
b8ccffd2d4 vm: add vm-object to represent non-contiguous physical memory allocations
vm-object can be used to demand-allocate non-contiguous physical memory, and
will provide an api for userspace programs to do the same. unless a vm-object
is created in-place (i.e. to represent a specific area of physical memory),
its memory pages are only allocated when the object is mapped AND someone
attempts to access the memory.
2026-02-08 12:58:31 +00:00
14ebcd4875 kernel: implement object handle tables 2026-02-08 12:55:47 +00:00
6950850f5b object: add a macro to define object lock/unlock functions 2026-02-08 12:55:13 +00:00
bcda479879 sched: implement task id allocation; remove thread id bitmap 2026-02-08 12:54:43 +00:00
7c4cff24f2 test: update object api usage 2026-02-08 12:52:14 +00:00
b31c3a40b4 vm: sparse: ensure that vm_pages for the reserved bsp region are created 2026-02-08 12:51:55 +00:00
2b1bed844a vm: change virt_to_phys param to const 2026-02-08 12:51:23 +00:00
26afc3c6c3 vm: sparse: fix region base/limit alignment calculation 2026-02-08 12:50:08 +00:00
d94a6ec7cb kernel: add generic FATAL_ERROR status code 2026-02-08 12:48:59 +00:00
0d73196b4b printk: add macro for conditional trace-level printk statements 2026-02-08 12:48:33 +00:00
687ba31d55 bitmap: fix bitmal_clear() clearing bits in the wrong direction 2026-02-08 12:47:58 +00:00
9e223ca5d0 x86_64: implement syscall instruction init and dispatch 2026-02-08 12:47:28 +00:00
4de1463e7c object: add functions to track handle allocation 2026-02-08 12:37:08 +00:00
5304e5be00 object: rename deref to unref 2026-02-08 12:36:32 +00:00
0853cff56b vm: remove vm_region; add vm_page_get_size_bytes 2026-02-08 12:33:36 +00:00
aaa76ff197 memblock: make virt_to_phys pointer param const 2026-02-08 12:33:03 +00:00
0490541dc9 kernel: adjust formatting 2026-02-08 12:32:48 +00:00
49a75a1bbe pmap: change pmap_add* virtual pointer parameter to virt_addr_t 2026-02-08 12:08:26 +00:00
34f614b881 libc: move fill_random to kernel/util 2026-02-08 12:06:50 +00:00
720ed75770 x86_64: add invalid pmap pointer constant 2026-02-08 11:59:18 +00:00
880930e917 x86_64: implement functions to jump to userspace 2026-02-08 11:58:27 +00:00
da611ab070 x86_64: find, record, and reserve the memory location of the bsp 2026-02-08 11:52:33 +00:00
129e782e99 kernel: add functions to get/set the bsp boot module location 2026-02-08 11:38:50 +00:00
00ea2b1b3b x86_64: adjust formatting 2026-02-08 11:36:16 +00:00
4051265876 x86_64: implement TSS initialisation and user/kernel stack pointer switching 2026-02-08 11:34:49 +00:00
564d4f9ba0 x86_64: rename struct cpu_context; move to machine/cpu.h 2026-02-08 11:32:09 +00:00
c04b33647c x86_64: add kernel and user virtual memory boundary definitions 2026-02-08 11:27:37 +00:00
a56d69e260 kernel: add a type to represent boot modules 2026-02-08 11:02:35 +00:00
af0d97d6f5 misc: changes from a long time ago 2026-02-03 21:28:15 +00:00
c7d4463f7e x86_64: remove redundant header files 2025-05-20 23:14:42 +01:00
8811016b7d kernel: remove redundant header files 2025-05-20 23:14:33 +01:00
e1aeac9562 obj: remove redundant object functions; move to kernel/ subfolder 2025-05-20 23:14:16 +01:00
0ba46e065c build: build script now exports compile commands 2025-05-20 23:12:21 +01:00
675a6de47e build: add build script to configure kernel and tools build systems 2025-05-19 22:01:12 +01:00
4d12cab7f7 doc: add kernel interface documentation 2024-11-02 15:12:05 +00:00
178 changed files with 11139 additions and 3686 deletions

View File

@@ -1,17 +1,22 @@
cmake_minimum_required(VERSION 3.13)
project(magenta C ASM)
project(mango C ASM)
if (NOT BUILD_TOOLS_DIR)
message(FATAL_ERROR "No build tools directory specified. Please run build.sh")
endif ()
set(CMAKE_C_STANDARD 17)
set(kernel_name "Magenta")
set(kernel_exe_name "magenta_kernel")
set(kernel_arch x86_64)
set(generic_src_dirs ds init kernel libc obj sched util vm)
set(kernel_name "Mango")
set(kernel_exe_name "mango_kernel")
set(generic_src_dirs ds init kernel libc sched util vm syscall)
set(kernel_sources "")
set(kernel_headers "")
foreach (dir ${generic_src_dirs})
message(STATUS ${dir})
file(GLOB_RECURSE dir_sources ${dir}/*.c)
file(GLOB_RECURSE dir_headers ${dir}/*.h)
@@ -19,7 +24,6 @@ foreach (dir ${generic_src_dirs})
set(kernel_headers ${kernel_headers} ${dir_headers})
endforeach (dir)
set(kernel_arch x86_64)
file(GLOB_RECURSE arch_sources_c arch/${kernel_arch}/*.c)
file(GLOB_RECURSE arch_sources_asm arch/${kernel_arch}/*.S)
file(GLOB_RECURSE arch_headers arch/${kernel_arch}/*.h)
@@ -33,14 +37,22 @@ add_executable(${kernel_exe_name}
${arch_sources_asm}
${arch_headers})
target_include_directories(${kernel_exe_name} PRIVATE
include
target_include_directories(${kernel_exe_name} PRIVATE
include
libc/include
libmango/include
arch/${kernel_arch}/include)
target_compile_options(${kernel_exe_name} PRIVATE
-nostdlib -ffreestanding
-Wall -Werror -pedantic -Wno-language-extension-token -Wno-unused-function -Wno-gnu-statement-expression
-O2 -g -fPIC -Iinclude -Iarch/${kernel_arch}/include -Ilibc/include)
-g -fPIC -Iinclude -Iarch/${kernel_arch}/include -Ilibc/include)
add_custom_command(
TARGET ${kernel_exe_name}
POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:${kernel_exe_name}> $<TARGET_FILE:${kernel_exe_name}>.debug
COMMAND ${CMAKE_STRIP} -g $<TARGET_FILE:${kernel_exe_name}>)
target_link_libraries(${kernel_exe_name} -nostdlib -ffreestanding -lgcc)
target_compile_definitions(${kernel_exe_name} PRIVATE BUILD_ID="0")

View File

@@ -1,5 +1,5 @@
#include <unistd.h>
#include <mango/machine/cpu.h>
#include <kernel/machine/cpu.h>
int ml_init_bootcpu(void)
{

View File

@@ -1,5 +1,5 @@
#include <mango/machine/hwlock.h>
#include <mango/compiler.h>
#include <kernel/compiler.h>
#include <kernel/machine/hwlock.h>
void ml_hwlock_lock(ml_hwlock_t *lck)
{

View File

@@ -1,6 +1,7 @@
#ifndef MANGO_X86_64_INIT_H_
#define MANGO_X86_64_INIT_H_
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
@@ -8,16 +9,18 @@ extern "C" {
#endif
#define __X2(x) #x
#define __X(x) __X2(x)
#define __X(x) __X2(x)
#ifdef __APPLE__
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__section("__DATA,__initcall" __X(id) ".init") = (fn)
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used __section( \
"__DATA,__initcall" __X(id) ".init") \
= (fn)
#else
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__section("initcall" __X(id) "_init") = (fn)
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used __section( \
"initcall" __X(id) "_init") \
= (fn)
#endif
extern int ml_init(uintptr_t arg);

View File

@@ -1,6 +1,8 @@
#ifndef MANGO_USER_PMAP_H_
#define MANGO_USER_PMAP_H_
#include <stdint.h>
typedef uintptr_t ml_pmap_t;
typedef uint64_t ml_pfn_t;

View File

@@ -1,11 +1,11 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <mango/init.h>
#include <mango/memblock.h>
#include <mango/vm.h>
#include <mango/object.h>
#include <mango/printk.h>
#include <kernel/init.h>
#include <kernel/memblock.h>
#include <kernel/vm.h>
#include <kernel/object.h>
#include <kernel/printk.h>
#include <arch/stdcon.h>
#include <sys/mman.h>

View File

@@ -1,4 +1,4 @@
#include <mango/init.h>
#include <kernel/init.h>
#ifdef __APPLE__
extern char __start_initcall0[] __asm("section$start$__DATA$__initcall0.init");

View File

@@ -1,10 +1,10 @@
#include <mango/libc/string.h>
#include <mango/libc/ctype.h>
#include <kernel/libc/string.h>
#include <kernel/libc/ctype.h>
#include <stdint.h>
#include <stdio.h>
#include <mango/console.h>
#include <mango/vm.h>
#include <mango/printk.h>
#include <kernel/console.h>
#include <kernel/vm.h>
#include <kernel/printk.h>
static void stdcon_write(struct console *con, const char *s, unsigned int len)
{

View File

@@ -1,5 +1,5 @@
#include <mango/sched.h>
#include <mango/compiler.h>
#include <kernel/sched.h>
#include <kernel/compiler.h>
//size_t THREAD_sp = offsetof(struct thread, tr_sp);

View File

@@ -2,4 +2,3 @@ target_compile_options(${kernel_exe_name} PRIVATE
-z max-page-size=0x1000 -m64 -mcmodel=large -mno-red-zone -mno-mmx
-mno-sse -mno-sse2 -D_64BIT -DBYTE_ORDER=1234)
target_link_libraries(${kernel_exe_name} "-z max-page-size=0x1000" "-T ${CMAKE_CURRENT_SOURCE_DIR}/arch/x86_64/layout.ld")

View File

@@ -1,11 +1,14 @@
#include <mango/machine/cpu.h>
#include <arch/msr.h>
#include <kernel/machine/cpu.h>
int ml_cpu_block_init(ml_cpu_block *p)
{
p->c_this = p;
gdt_init(&p->c_gdt, &p->c_gdt_ptr);
idt_init(&p->c_idt_ptr);
tss_init(&p->c_tss, &p->c_tss_ptr);
gdt_write_tss(&p->c_gdt, &p->c_tss);
return 0;
}
@@ -13,6 +16,27 @@ int ml_cpu_block_use(ml_cpu_block *p)
{
gdt_load(&p->c_gdt_ptr);
idt_load(&p->c_idt_ptr);
tss_load(&p->c_tss);
wrmsr(MSR_GS_BASE, (uint64_t)p);
return 0;
}
virt_addr_t ml_cpu_block_get_kstack(ml_cpu_block *p)
{
return tss_get_kstack(&p->c_tss);
}
virt_addr_t ml_cpu_block_get_ustack(ml_cpu_block *p)
{
return tss_get_ustack(&p->c_tss);
}
void ml_cpu_block_set_kstack(ml_cpu_block *p, virt_addr_t sp)
{
tss_set_kstack(&p->c_tss, sp);
}
void ml_cpu_block_set_ustack(ml_cpu_block *p, virt_addr_t sp)
{
tss_set_ustack(&p->c_tss, sp);
}

View File

@@ -1,8 +1,8 @@
#include "mango/types.h"
#include <mango/memblock.h>
#include <mango/printk.h>
#include <mango/util.h>
#include <arch/e820.h>
#include <kernel/memblock.h>
#include <kernel/printk.h>
#include <kernel/types.h>
#include <kernel/util.h>
void e820_scan(multiboot_memory_map_t *mmap, size_t len)
{
@@ -36,7 +36,9 @@ void e820_scan(multiboot_memory_map_t *mmap, size_t len)
}
printk("e820: [mem 0x%016llx-0x%016llx] %s",
entry->addr, entry->addr + entry->len - 1, type);
entry->addr,
entry->addr + entry->len - 1,
type);
memblock_add(entry->addr, entry->len);
@@ -53,7 +55,12 @@ void e820_scan(multiboot_memory_map_t *mmap, size_t len)
char str_mem_total[64], str_mem_reserved[64];
data_size_to_string(mem_total, str_mem_total, sizeof str_mem_total);
data_size_to_string(mem_reserved, str_mem_reserved, sizeof str_mem_reserved);
data_size_to_string(
mem_reserved,
str_mem_reserved,
sizeof str_mem_reserved);
printk("e820: total memory: %s, hw reserved: %s", str_mem_total, str_mem_reserved);
printk("e820: total memory: %s, hw reserved: %s",
str_mem_total,
str_mem_reserved);
}

View File

@@ -1,5 +1,8 @@
#include <mango/libc/string.h>
#include <arch/gdt.h>
#include <arch/tss.h>
#include <kernel/libc/string.h>
#include <kernel/types.h>
#include <stddef.h>
static void init_entry(struct gdt_entry *entry, int access, int flags)
{
@@ -15,13 +18,42 @@ int gdt_init(struct gdt *gdt, struct gdt_ptr *gdtp)
{
memset(&gdt->g_entries[0], 0x0, sizeof gdt->g_entries[0]);
init_entry(&gdt->g_entries[1], GDT_A_PRESENT | GDT_A_CODEREAD | GDT_A_CODE, GDT_F_64BIT);
init_entry(&gdt->g_entries[2], GDT_A_PRESENT | GDT_A_DATAWRITE | GDT_A_DATA, GDT_F_64BIT);
init_entry(&gdt->g_entries[3], GDT_A_PRESENT | GDT_A_USER | GDT_A_CODEREAD | GDT_A_CODE, GDT_F_64BIT);
init_entry(&gdt->g_entries[4], GDT_A_PRESENT | GDT_A_USER | GDT_A_DATAWRITE | GDT_A_DATA, GDT_F_64BIT);
init_entry(
&gdt->g_entries[1],
GDT_A_PRESENT | GDT_A_CODEREAD | GDT_A_CODE,
GDT_F_64BIT);
init_entry(
&gdt->g_entries[2],
GDT_A_PRESENT | GDT_A_DATAWRITE | GDT_A_DATA,
GDT_F_64BIT);
init_entry(
&gdt->g_entries[3],
GDT_A_PRESENT | GDT_A_USER | GDT_A_DATAWRITE | GDT_A_DATA,
GDT_F_64BIT);
init_entry(
&gdt->g_entries[4],
GDT_A_PRESENT | GDT_A_USER | GDT_A_CODEREAD | GDT_A_CODE,
GDT_F_64BIT);
gdtp->g_ptr = (uint64_t)gdt;
gdtp->g_limit = sizeof(*gdt) - 1;
return 0;
}
void gdt_write_tss(struct gdt *gdt, struct tss *tss)
{
struct tss_gdt_entry *tss_entry = &gdt->g_tss;
virt_addr_t base = (virt_addr_t)tss;
size_t limit = sizeof *tss;
tss_entry->ge_base_low = (base & 0xFFFF);
tss_entry->ge_base_mid = (base >> 16) & 0xFF;
tss_entry->ge_base_hi = (base >> 24) & 0xFF;
tss_entry->ge_base_ext = (base >> 32) & 0xFFFFFFFF;
tss_entry->ge_limit_low = (limit & 0xFFFF);
tss_entry->ge_gran = (limit >> 16) & 0xF;
tss_entry->ge_access = 0xE9;
tss_entry->ge_reserved = 0;
}

View File

@@ -11,6 +11,41 @@ ml_hwlock_lock:
mov $1, %ecx
mfence
1: mov $0, %eax
lock cmpxchg %ecx, (%rdi)
jne 1b
pop %rbp
ret
.global ml_hwlock_unlock
.type ml_hwlock_unlock, @function
/* %rdi = pointer to ml_hwlock_t (int) */
ml_hwlock_unlock:
push %rbp
mov %rsp, %rbp
movl $0, (%rdi)
mfence
pop %rbp
ret
.global ml_hwlock_lock_irq
.type ml_hwlock_lock_irq, @function
/* %rdi = pointer to ml_hwlock_t (int) */
ml_hwlock_lock_irq:
push %rbp
mov %rsp, %rbp
mov $1, %ecx
cli
mfence
@@ -21,11 +56,12 @@ ml_hwlock_lock:
pop %rbp
ret
.global ml_hwlock_unlock
.type ml_hwlock_unlock, @function
.global ml_hwlock_unlock_irq
.type ml_hwlock_unlock_irq, @function
/* %rdi = pointer to ml_hwlock_t (int) */
ml_hwlock_unlock:
ml_hwlock_unlock_irq:
push %rbp
mov %rsp, %rbp
@@ -42,7 +78,7 @@ ml_hwlock_unlock:
/* %rdi = pointer to ml_hwlock_t (int)
%rsi = pointer to quadword to store rflags in */
ml_hwlock_lock_irqsave:
ml_hwlock_lock_irqsave:
push %rbp
mov %rsp, %rbp
@@ -62,6 +98,7 @@ ml_hwlock_lock_irqsave:
pop %rbp
ret
.global ml_hwlock_unlock_irqrestore
.type ml_hwlock_unlock_irqrestore, @function

View File

@@ -1,26 +1,27 @@
#ifndef ARCH_GDT_H_
#define ARCH_GDT_H_
#include <mango/compiler.h>
#include <arch/tss.h>
#include <kernel/compiler.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define NR_GDT_ENTRIES 5
#define NR_GDT_ENTRIES 5
#define GDT_A_PRESENT (1 << 7)
#define GDT_A_USER (3 << 5)
#define GDT_A_CODE (3 << 3)
#define GDT_A_DATA (8 << 1)
#define GDT_A_RPLK (1 << 5)
#define GDT_A_CODEREAD (1 << 1)
#define GDT_A_DATAWRITE (1 << 1)
#define GDT_A_GROWUP (1 << 5)
#define GDT_F_64BIT (0x2F)
#define GDT_F_32BIT (0x4F)
#define GDT_F_16BIT (0xF)
#define GDT_A_PRESENT (1 << 7)
#define GDT_A_USER (3 << 5)
#define GDT_A_CODE (3 << 3)
#define GDT_A_DATA (8 << 1)
#define GDT_A_RPLK (1 << 5)
#define GDT_A_CODEREAD (1 << 1)
#define GDT_A_DATAWRITE (1 << 1)
#define GDT_A_GROWUP (1 << 5)
#define GDT_F_64BIT (0x2F)
#define GDT_F_32BIT (0x4F)
#define GDT_F_16BIT (0xF)
struct gdt_entry {
uint16_t ge_limit_low;
@@ -33,6 +34,7 @@ struct gdt_entry {
struct gdt {
struct gdt_entry g_entries[NR_GDT_ENTRIES];
struct tss_gdt_entry g_tss;
} __packed;
struct gdt_ptr {
@@ -41,6 +43,7 @@ struct gdt_ptr {
} __packed;
extern int gdt_init(struct gdt *gdt, struct gdt_ptr *gdtp);
extern void gdt_write_tss(struct gdt *gdt, struct tss *tss);
extern int gdt_load(struct gdt_ptr *gdtp);
#ifdef __cplusplus

View File

@@ -1,8 +1,8 @@
#ifndef ARCH_IRQ_H_
#define ARCH_IRQ_H_
#include <mango/compiler.h>
#include <mango/queue.h>
#include <kernel/compiler.h>
#include <kernel/queue.h>
#include <stdint.h>
#ifdef __cplusplus
@@ -11,6 +11,8 @@ extern "C" {
#define NR_IDT_ENTRIES 256
struct ml_cpu_context;
enum irq_vector {
IRQ0 = 32,
IRQ1,
@@ -35,13 +37,6 @@ struct irq_hook {
int (*irq_callback)(void);
};
struct cpu_context {
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t int_no, err_no;
uint64_t rip, cs, rflags, rsp, ss;
} __packed;
struct idt_entry {
uint16_t base_low;
uint16_t selector;
@@ -64,7 +59,7 @@ struct idt_ptr {
uintptr_t i_base;
} __packed;
typedef void (*int_hook)(struct cpu_context *);
typedef void (*int_hook)(struct ml_cpu_context *);
extern int idt_init(struct idt_ptr *idtp);
extern int idt_load(struct idt_ptr *idtp);

View File

@@ -3,7 +3,8 @@
#include <stdint.h>
#define MSR_GS_BASE 0xC0000101
#define MSR_GS_BASE 0xC0000101
#define MSR_KERNEL_GS_BASE 0xC0000102
#ifdef __cplusplus
extern "C" {

View File

@@ -1,8 +1,8 @@
#ifndef ARCH_PAGING_H_
#define ARCH_PAGING_H_
#include <mango/types.h>
#include <mango/compiler.h>
#include <kernel/types.h>
#include <kernel/compiler.h>
#ifdef __cplusplus
extern "C" {

View File

@@ -0,0 +1,55 @@
#ifndef ARCH_TSS_H_
#define ARCH_TSS_H_
#include <kernel/compiler.h>
#include <kernel/types.h>
#include <stdint.h>
#define TSS_GDT_INDEX 5
struct tss {
uint32_t res0;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint64_t res1;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t res2;
uint16_t res3;
uint16_t iomap_offset;
} __packed;
struct tss_gdt_entry {
/* these fields are copied from struct gdt_entry */
uint16_t ge_limit_low;
uint16_t ge_base_low;
uint8_t ge_base_mid;
uint8_t ge_access;
uint8_t ge_gran;
uint8_t ge_base_hi;
/* these fields are specific to the TSS entry */
uint32_t ge_base_ext;
uint32_t ge_reserved;
} __packed;
struct tss_ptr {
uint16_t tss_limit;
uint64_t tss_base;
} __packed;
extern void tss_init(struct tss *tss, struct tss_ptr *ptr);
extern void tss_load(struct tss *tss);
extern virt_addr_t tss_get_kstack(struct tss *tss);
extern virt_addr_t tss_get_ustack(struct tss *tss);
extern void tss_set_kstack(struct tss *tss, virt_addr_t sp);
extern void tss_set_ustack(struct tss *tss, virt_addr_t sp);
#endif

View File

@@ -0,0 +1,77 @@
#ifndef KERNEL_X86_64_CPU_H_
#define KERNEL_X86_64_CPU_H_
#include <arch/gdt.h>
#include <arch/irq.h>
#include <arch/tss.h>
#include <kernel/types.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ML_BIG_ENDIAN 0
#define ml_cpu_block_get_id(p) ((p)->c_cpu_id)
#define ml_cpu_block_get_data(p) ((p)->c_data)
#if 0
#define ml_read_sp(sp, bp) \
asm volatile("mov %%rsp, %0" : "=r"(sp)); \
asm volatile("mov %%rbp, %0" : "=r"(bp));
#endif
struct cpu_data;
typedef struct ml_cpu_block {
struct ml_cpu_block *c_this;
struct gdt c_gdt;
struct gdt_ptr c_gdt_ptr;
struct tss c_tss;
struct tss_ptr c_tss_ptr;
struct idt_ptr c_idt_ptr;
unsigned int c_cpu_id;
struct cpu_data *c_data;
} ml_cpu_block;
struct ml_int_context {
uint64_t rip, cs, rflags, rsp, ss;
};
struct ml_cpu_context {
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t int_no, err_no;
uint64_t rip, cs, rflags, rsp, ss;
} __packed;
#define ml_cpu_pause() __asm__ __volatile__("hlt")
#define ml_cpu_relax() __asm__ __volatile__("pause")
#define ml_int_disable() __asm__ __volatile__("cli")
#define ml_int_enable() __asm__ __volatile__("sti")
extern int ml_init_bootcpu(void);
extern int ml_cpu_block_init(ml_cpu_block *p);
extern int ml_cpu_block_use(ml_cpu_block *p);
extern virt_addr_t ml_cpu_block_get_ustack(ml_cpu_block *p);
extern virt_addr_t ml_cpu_block_get_kstack(ml_cpu_block *p);
extern void ml_cpu_block_set_ustack(ml_cpu_block *p, virt_addr_t sp);
extern void ml_cpu_block_set_kstack(ml_cpu_block *p, virt_addr_t sp);
/* defined in cpu_ctrl.S */
extern void ml_halt_cpu(void);
extern ml_cpu_block *ml_this_cpu(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_X86_64_HWLOCK_H_
#define MANGO_X86_64_HWLOCK_H_
#ifndef KERNEL_X86_64_HWLOCK_H_
#define KERNEL_X86_64_HWLOCK_H_
#define ML_HWLOCK_INIT (0)
@@ -12,6 +12,9 @@ typedef int ml_hwlock_t;
extern void ml_hwlock_lock(ml_hwlock_t *lck);
extern void ml_hwlock_unlock(ml_hwlock_t *lck);
extern void ml_hwlock_lock_irq(ml_hwlock_t *lck);
extern void ml_hwlock_unlock_irq(ml_hwlock_t *lck);
extern void ml_hwlock_lock_irqsave(ml_hwlock_t *lck, unsigned long *flags);
extern void ml_hwlock_unlock_irqrestore(ml_hwlock_t *lck, unsigned long flags);

View File

@@ -0,0 +1,28 @@
#ifndef KERNEL_X86_64_INIT_H_
#define KERNEL_X86_64_INIT_H_
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define __X2(x) #x
#define __X(x) __X2(x)
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used __section( \
".initcall" __X(id) ".init") \
= (fn)
extern int ml_init(uintptr_t arg);
extern const struct framebuffer_varinfo *bootfb_varinfo(void);
extern const struct framebuffer_fixedinfo *bootfb_fixedinfo(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -0,0 +1,5 @@
#ifndef KERNEL_X86_64_IRQ_H_
#define KERNEL_X86_64_IRQ_H_
#endif

View File

@@ -0,0 +1,12 @@
#ifndef KERNEL_X86_64_PANIC_H_
#define KERNEL_X86_64_PANIC_H_
#include <stdint.h>
struct ml_cpu_context;
extern void ml_print_cpu_state(struct ml_cpu_context *ctx);
extern void ml_print_stack_trace(uintptr_t ip);
extern void ml_print_stack_trace_irq(struct ml_cpu_context *ctx);
#endif

View File

@@ -0,0 +1,11 @@
#ifndef KERNEL_X86_64_PMAP_H_
#define KERNEL_X86_64_PMAP_H_
#include <arch/paging.h>
#define ML_PMAP_INVALID ((uintptr_t)-1)
typedef pml4t_ptr_t ml_pmap_t;
typedef uint64_t ml_pfn_t;
#endif

View File

@@ -0,0 +1,31 @@
#ifndef KERNEL_X86_64_THREAD_H_
#define KERNEL_X86_64_THREAD_H_
#include <kernel/sched.h>
struct ml_cpu_context;
/* switch from one thread to another. the stack of the `to` thread must have
* been prepared in one of two ways:
* 1) a previous call to ml_thread_switch where it was the `from` thread.
* 2) a call to ml_thread_prepare_kernel_context
* the switch occurs entirely with kernel mode. a further return from an
* interrupt context is then used to return to usermode if necessary.
*/
extern void ml_thread_switch(struct thread *from, struct thread *to);
/* perform the initial transition to userspace. the stack must be prepared using
* ml_thread_prepare_user_context before this function can be used */
extern void ml_thread_switch_user(void);
/* prepare the stack so that ml_thread_switch can jump to the specified IP/SP */
extern void ml_thread_prepare_kernel_context(uintptr_t ip, uintptr_t *sp);
/* prepare the stack so that ml_thread_switch_user can jump to usermode
* with the specified IP/user SP */
extern kern_status_t ml_thread_prepare_user_context(
virt_addr_t ip,
virt_addr_t user_sp,
virt_addr_t *kernel_sp,
const uintptr_t *args,
size_t nr_args);
#endif

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_X86_64_VM_H_
#define MANGO_X86_64_VM_H_
#ifndef KERNEL_X86_64_VM_H_
#define KERNEL_X86_64_VM_H_
/* kernel higher-half base virtual address. */
#define VM_KERNEL_VOFFSET 0xFFFFFFFF80000000
@@ -11,14 +11,20 @@
#define VM_PAGEMAP_BASE 0xFFFF888000000000
#define VM_PAGEMAP_LIMIT 0xFFFFC87FFFFFFFFF
#define VM_PAGE_SIZE 0x1000
#define VM_PAGE_MASK (VM_PAGE_SIZE-1)
#define VM_PAGE_SHIFT 12
#define VM_PAGE_SIZE 0x1000
#define VM_PAGE_MASK (VM_PAGE_SIZE - 1)
#define VM_PAGE_SHIFT 12
#define VM_PAGE_MIN_ORDER VM_PAGE_4K
#define VM_PAGE_MAX_ORDER VM_PAGE_8M
#define VM_ZONE_MIN VM_ZONE_DMA
#define VM_ZONE_MAX VM_ZONE_NORMAL
#define VM_ZONE_MIN VM_ZONE_DMA
#define VM_ZONE_MAX VM_ZONE_NORMAL
#define VM_USER_BASE 0x0000000000100000
#define VM_USER_LIMIT 0x00007fffffffffff
#define VM_KERNEL_BASE 0XFFFF800000000000
#define VM_KERNEL_LIMIT 0XFFFFFFFFFFFFFFFF
#endif

View File

@@ -1,49 +0,0 @@
#ifndef MANGO_X86_64_CPU_H_
#define MANGO_X86_64_CPU_H_
#include <arch/gdt.h>
#include <arch/irq.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ML_BIG_ENDIAN 0
#define ml_cpu_block_get_id(p) ((p)->c_cpu_id)
#define ml_cpu_block_get_data(p) ((p)->c_data)
struct cpu_data;
typedef struct ml_cpu_block {
struct ml_cpu_block *c_this;
struct gdt c_gdt;
struct gdt_ptr c_gdt_ptr;
struct idt_ptr c_idt_ptr;
unsigned int c_cpu_id;
struct cpu_data *c_data;
} ml_cpu_block;
#define ml_cpu_pause() __asm__ __volatile__("hlt")
#define ml_cpu_relax() __asm__ __volatile__("pause")
#define ml_int_disable() __asm__ __volatile__("cli")
#define ml_int_enable() __asm__ __volatile__("sti")
extern int ml_init_bootcpu(void);
extern int ml_cpu_block_init(ml_cpu_block *p);
extern int ml_cpu_block_use(ml_cpu_block *p);
/* defined in cpu_ctrl.S */
extern void ml_halt_cpu(void);
extern ml_cpu_block *ml_this_cpu(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,26 +0,0 @@
#ifndef MANGO_X86_64_INIT_H_
#define MANGO_X86_64_INIT_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define __X2(x) #x
#define __X(x) __X2(x)
#define __define_initcall(fn, id) \
static initcall_t __initcall_##fn##id __used \
__section(".initcall" __X(id) ".init") = (fn)
extern int ml_init(uintptr_t arg);
extern const struct framebuffer_varinfo *bootfb_varinfo(void);
extern const struct framebuffer_fixedinfo *bootfb_fixedinfo(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,5 +0,0 @@
#ifndef MANGO_X86_64_IRQ_H_
#define MANGO_X86_64_IRQ_H_
#endif

View File

@@ -1,12 +0,0 @@
#ifndef MANGO_X86_64_PANIC_H_
#define MANGO_X86_64_PANIC_H_
#include <stdint.h>
struct cpu_context;
extern void ml_print_cpu_state(struct cpu_context *ctx);
extern void ml_print_stack_trace(uintptr_t ip);
extern void ml_print_stack_trace_irq(struct cpu_context *ctx);
#endif

View File

@@ -1,9 +0,0 @@
#ifndef MANGO_X86_64_PMAP_H_
#define MANGO_X86_64_PMAP_H_
#include <arch/paging.h>
typedef pml4t_ptr_t ml_pmap_t;
typedef uint64_t ml_pfn_t;
#endif

View File

@@ -1,10 +0,0 @@
#ifndef MANGO_X86_64_THREAD_H_
#define MANGO_X86_64_THREAD_H_
#include <mango/sched.h>
extern void switch_to(struct thread *from, struct thread *to);
extern void prepare_stack(uintptr_t ip, uintptr_t *sp);
extern void user_jump(uintptr_t ip, uintptr_t sp);
#endif

View File

@@ -1,23 +1,37 @@
#include "arch/serial.h"
#include <arch/e820.h>
#include <arch/pit.h>
#include <mango/arg.h>
#include <mango/clock.h>
#include <mango/console.h>
#include <mango/cpu.h>
#include <mango/init.h>
#include <mango/libc/stdio.h>
#include <mango/machine/cpu.h>
#include <mango/memblock.h>
#include <mango/object.h>
#include <mango/percpu.h>
#include <mango/pmap.h>
#include <mango/printk.h>
#include <mango/types.h>
#include <mango/vm.h>
#include <arch/serial.h>
#include <arch/vgacon.h>
#include <kernel/arg.h>
#include <kernel/bsp.h>
#include <kernel/clock.h>
#include <kernel/console.h>
#include <kernel/cpu.h>
#include <kernel/init.h>
#include <kernel/libc/stdio.h>
#include <kernel/machine/cpu.h>
#include <kernel/memblock.h>
#include <kernel/object.h>
#include <kernel/percpu.h>
#include <kernel/pmap.h>
#include <kernel/printk.h>
#include <kernel/types.h>
#include <kernel/vm.h>
#define PTR32(x) ((void *)((uintptr_t)(x)))
#define PTR32(x) ((void *)((uintptr_t)(x)))
/* the physical address of the start of the memblock heap.
* this is an arbirary value; the heap can start anywhere in memory.
* any reserved areas of memory (the kernel, bsp, bios data, etc) are
* automatically taken into account.
* HOWEVER, this value will dictate how much physical memory is required for
* the kernel to boot successfully.
* the value of 16MiB (0x1000000) means that all heap allocations will be
* above 16MiB, leaving the area below free for DMA operations.
* this value CAN be reduced all the way to zero to minimise the amount of
* memory required to boot, but this may leave you with no DMA memory available.
*/
#define MEMBLOCK_HEAP_START 0x1000000
static ml_cpu_block g_bootstrap_cpu = {0};
@@ -30,9 +44,9 @@ static void bootstrap_cpu_init(void)
ml_cpu_block_use(&g_bootstrap_cpu);
}
static void early_vm_init(void)
static void early_vm_init(uintptr_t reserve_end)
{
uintptr_t alloc_start = VM_KERNEL_VOFFSET;
uintptr_t alloc_start = VM_KERNEL_VOFFSET + MEMBLOCK_HEAP_START;
/* boot code mapped 2 GiB of memory from
VM_KERNEL_VOFFSET */
uintptr_t alloc_end = VM_KERNEL_VOFFSET + 0x7fffffff;
@@ -42,21 +56,22 @@ static void early_vm_init(void)
alloc_start,
alloc_end);
memblock_reserve(0x00, (uintptr_t)__pend);
memblock_reserve(0x00, reserve_end);
printk("memblock: reserved bios+kernel at [0x%016llx-0x%016llx]",
0,
(uintptr_t)__pend);
reserve_end);
}
void early_console_init(void)
{
const char *dest = arg_value("kernel.early-console");
if (!dest) {
dest = "ttyS0";
dest = "tty0";
}
if (!strcmp(dest, "tty0")) {
/* show log messages on VGA */
vgacon_init();
} else if (!strcmp(dest, "ttyS0")) {
/* write log messages to serial port */
early_serialcon_init(115200);
@@ -69,6 +84,23 @@ static void use_uniprocessor_topology(void)
cpu_set_online(0);
}
static void find_bsp(multiboot_info_t *mb, struct boot_module *out)
{
memset(out, 0x0, sizeof *out);
printk("modules=%u: %llx", mb->mods_count, mb->mods_addr);
multiboot_module_t *mods = PTR32(mb->mods_addr);
size_t nr_mods = mb->mods_count;
if (nr_mods < 1) {
return;
}
out->mod_base = mods[0].mod_start;
out->mod_size = mods[0].mod_end - mods[0].mod_start;
}
int ml_init(uintptr_t arg)
{
multiboot_info_t *mb = (multiboot_info_t *)arg;
@@ -82,7 +114,16 @@ int ml_init(uintptr_t arg)
print_kernel_banner();
early_vm_init();
struct boot_module bsp;
find_bsp(mb, &bsp);
bsp_set_location(&bsp);
uintptr_t reserve_end = (uintptr_t)__pend;
if (bsp.mod_base + bsp.mod_size > reserve_end) {
reserve_end = bsp.mod_base + bsp.mod_size;
}
early_vm_init(reserve_end);
e820_scan(PTR32(mb->mmap_addr), mb->mmap_length);
@@ -101,16 +142,20 @@ int ml_init(uintptr_t arg)
put_cpu(this_cpu);
struct vm_zone_descriptor vm_zones[] = {
{.zd_id = VM_ZONE_DMA,
.zd_node = 0,
.zd_name = "dma",
.zd_base = 0x00,
.zd_limit = 0xffffff},
{.zd_id = VM_ZONE_NORMAL,
.zd_node = 0,
.zd_name = "normal",
.zd_base = 0x1000000,
.zd_limit = UINTPTR_MAX},
{
.zd_id = VM_ZONE_DMA,
.zd_node = 0,
.zd_name = "dma",
.zd_base = 0x00,
.zd_limit = 0xffffff,
},
{
.zd_id = VM_ZONE_NORMAL,
.zd_node = 0,
.zd_name = "normal",
.zd_base = 0x1000000,
.zd_limit = UINTPTR_MAX,
},
};
vm_bootstrap(vm_zones, sizeof vm_zones / sizeof vm_zones[0]);

View File

@@ -1,4 +1,4 @@
#include <mango/init.h>
#include <kernel/init.h>
extern char __initcall0_start[];
extern char __initcall1_start[];

View File

@@ -1,14 +1,22 @@
#include <arch/irq.h>
#include <arch/msr.h>
#include <arch/ports.h>
#include <mango/cpu.h>
#include <mango/libc/string.h>
#include <mango/machine/cpu.h>
#include <mango/machine/irq.h>
#include <mango/panic.h>
#include <mango/sched.h>
#include <kernel/cpu.h>
#include <kernel/libc/string.h>
#include <kernel/machine/cpu.h>
#include <kernel/machine/irq.h>
#include <kernel/panic.h>
#include <kernel/sched.h>
#include <kernel/syscall.h>
#include <stddef.h>
#define MAX_ISR_HANDLERS 16
#define MAX_ISR_HANDLERS 16
#define PF_PRESENT 0x01u
#define PF_WRITE 0x02u
#define PF_USER 0x04u
#define PF_RESERVED_WRITE 0x08u
#define PF_IFETCH 0x10u
extern void syscall_gate(void);
extern uintptr_t pf_faultptr(void);
@@ -20,6 +28,27 @@ static struct idt idt;
static int idt_initialised = 0;
static uintptr_t int_entry_points[NR_IDT_ENTRIES];
static void set_syscall_gate(uintptr_t rip)
{
uint64_t user_cs = 0x13;
uint64_t kernel_cs = 0x08;
uintptr_t star_reg = 0xC0000081;
uintptr_t lstar_reg = 0xC0000082;
uintptr_t sfmask_reg = 0xC0000084;
uint64_t selectors = 0;
selectors |= (user_cs) << 48;
selectors |= (kernel_cs) << 32;
/* disable interrupts */
uint64_t flag_mask = 0x200;
wrmsr(star_reg, selectors);
wrmsr(lstar_reg, rip);
wrmsr(sfmask_reg, flag_mask);
}
static void set_idt_gate(
struct idt *idt,
uint8_t index,
@@ -39,7 +68,7 @@ static void set_idt_gate(
idt->i_entries[index].reserved = 0;
}
static void gpf_handler(struct cpu_context *regs)
static void gpf_handler(struct ml_cpu_context *regs)
{
int ext = regs->err_no & 1;
int table = (regs->err_no >> 1) & 0x03;
@@ -55,43 +84,33 @@ static void gpf_handler(struct cpu_context *regs)
regs->rip);
}
static void pf_handler(struct cpu_context *regs)
static void pf_handler(struct ml_cpu_context *regs)
{
enum pmap_fault_flags fault_flags = 0;
(regs->err_no & PF_PRESENT) && (fault_flags |= PMAP_FAULT_PRESENT);
(regs->err_no & PF_USER) && (fault_flags |= PMAP_FAULT_USER);
(regs->err_no & PF_WRITE) && (fault_flags |= PMAP_FAULT_WRITE);
(regs->err_no & PF_IFETCH) && (fault_flags |= PMAP_FAULT_IFETCH);
(regs->err_no & PF_RESERVED_WRITE)
&& (fault_flags |= PMAP_FAULT_BADCFG);
virt_addr_t fault_ptr = pf_faultptr();
kern_status_t status = pmap_handle_fault(fault_ptr, fault_flags);
if (status == KERN_OK) {
return;
}
panic_irq(
regs,
"page fault (%016llx %016llx %016llx)",
pf_faultptr(),
fault_ptr,
regs->rip,
regs->err_no);
}
#if 0
static void set_syscall_gate(uintptr_t rip)
{
/* sysret adds 0x10 to this to get cs, and 0x8 to get ss
* note that the CPU should force the RPL to 3 when loading
* the selector by using user_cs | 3. However, this doesn't happen
* in certain scenarios (specifically, QEMU + KVM on a Ryzen 5 1600X). */
uint64_t user_cs = 0x13;
uint64_t kernel_cs = 0x8;
uintptr_t star_reg = 0xC0000081;
uintptr_t lstar_reg = 0xC0000082;
uintptr_t sfmask_reg = 0xC0000084;
uint64_t selectors = 0;
selectors |= (user_cs) << 48;
selectors |= (kernel_cs) << 32;
/* disable interrupts */
uint64_t flag_mask = 0x200;
write_msr(star_reg, selectors);
write_msr(lstar_reg, rip);
write_msr(sfmask_reg, flag_mask);
}
#endif
static void init_pic(void)
{
// Remap the PIC
@@ -129,6 +148,8 @@ int idt_init(struct idt_ptr *ptr)
init_global_idt();
}
set_syscall_gate((uintptr_t)syscall_gate);
ptr->i_limit = sizeof(idt) - 1;
ptr->i_base = (uintptr_t)&idt;
@@ -141,7 +162,7 @@ int idt_load(struct idt_ptr *ptr)
return 0;
}
void isr_dispatch(struct cpu_context *regs)
void isr_dispatch(struct ml_cpu_context *regs)
{
int_hook h = isr_handlers[regs->int_no];
if (h) {
@@ -160,7 +181,7 @@ void irq_ack(unsigned int vec)
outportb(0x20, 0x20);
}
void irq_dispatch(struct cpu_context *regs)
void irq_dispatch(struct ml_cpu_context *regs)
{
end_charge_period();
@@ -178,8 +199,40 @@ void irq_dispatch(struct cpu_context *regs)
start_charge_period();
}
void syscall_dispatch(struct cpu_context *regs)
void syscall_dispatch(struct ml_cpu_context *regs)
{
unsigned int sysid = regs->rax;
virt_addr_t syscall_impl = syscall_get_function(sysid);
if (syscall_impl == 0) {
regs->rax = KERN_UNSUPPORTED;
return;
}
#define SYSCALL_SIGNATURE(...) \
intptr_t (*__VA_ARGS__)( \
uintptr_t, \
uintptr_t, \
uintptr_t, \
uintptr_t, \
uintptr_t, \
uintptr_t, \
uintptr_t, \
uintptr_t)
SYSCALL_SIGNATURE(fn) = (SYSCALL_SIGNATURE())syscall_impl;
ml_int_enable();
regs->rax
= fn(regs->rdi,
regs->rsi,
regs->rdx,
regs->r12,
regs->r8,
regs->r9,
regs->r13,
regs->r14);
ml_int_disable();
}
void hook_irq(enum irq_vector vec, struct irq_hook *hook)
@@ -194,263 +247,263 @@ void unhook_irq(enum irq_vector vec, struct irq_hook *hook)
queue_delete(hook_queue, &hook->irq_entry);
}
extern void _isr0();
extern void _isr1();
extern void _isr2();
extern void _isr3();
extern void _isr4();
extern void _isr5();
extern void _isr6();
extern void _isr7();
extern void _isr8();
extern void _isr9();
extern void _isr10();
extern void _isr11();
extern void _isr12();
extern void _isr13();
extern void _isr14();
extern void _isr15();
extern void _isr16();
extern void _isr17();
extern void _isr18();
extern void _isr19();
extern void _isr20();
extern void _isr21();
extern void _isr22();
extern void _isr23();
extern void _isr24();
extern void _isr25();
extern void _isr26();
extern void _isr27();
extern void _isr28();
extern void _isr29();
extern void _isr30();
extern void _isr31();
extern void _isr0(void);
extern void _isr1(void);
extern void _isr2(void);
extern void _isr3(void);
extern void _isr4(void);
extern void _isr5(void);
extern void _isr6(void);
extern void _isr7(void);
extern void _isr8(void);
extern void _isr9(void);
extern void _isr10(void);
extern void _isr11(void);
extern void _isr12(void);
extern void _isr13(void);
extern void _isr14(void);
extern void _isr15(void);
extern void _isr16(void);
extern void _isr17(void);
extern void _isr18(void);
extern void _isr19(void);
extern void _isr20(void);
extern void _isr21(void);
extern void _isr22(void);
extern void _isr23(void);
extern void _isr24(void);
extern void _isr25(void);
extern void _isr26(void);
extern void _isr27(void);
extern void _isr28(void);
extern void _isr29(void);
extern void _isr30(void);
extern void _isr31(void);
extern void _irq0();
extern void _irq1();
extern void _irq2();
extern void _irq3();
extern void _irq4();
extern void _irq5();
extern void _irq6();
extern void _irq7();
extern void _irq8();
extern void _irq9();
extern void _irq10();
extern void _irq11();
extern void _irq12();
extern void _irq13();
extern void _irq14();
extern void _irq15();
extern void _irq16();
extern void _irq17();
extern void _irq18();
extern void _irq19();
extern void _irq20();
extern void _irq21();
extern void _irq22();
extern void _irq23();
extern void _irq24();
extern void _irq25();
extern void _irq26();
extern void _irq27();
extern void _irq28();
extern void _irq29();
extern void _irq30();
extern void _irq31();
extern void _irq32();
extern void _irq33();
extern void _irq34();
extern void _irq35();
extern void _irq36();
extern void _irq37();
extern void _irq38();
extern void _irq39();
extern void _irq40();
extern void _irq41();
extern void _irq42();
extern void _irq43();
extern void _irq44();
extern void _irq45();
extern void _irq46();
extern void _irq47();
extern void _irq48();
extern void _irq49();
extern void _irq50();
extern void _irq51();
extern void _irq52();
extern void _irq53();
extern void _irq54();
extern void _irq55();
extern void _irq56();
extern void _irq57();
extern void _irq58();
extern void _irq59();
extern void _irq60();
extern void _irq61();
extern void _irq62();
extern void _irq63();
extern void _irq64();
extern void _irq65();
extern void _irq66();
extern void _irq67();
extern void _irq68();
extern void _irq69();
extern void _irq70();
extern void _irq71();
extern void _irq72();
extern void _irq73();
extern void _irq74();
extern void _irq75();
extern void _irq76();
extern void _irq77();
extern void _irq78();
extern void _irq79();
extern void _irq80();
extern void _irq81();
extern void _irq82();
extern void _irq83();
extern void _irq84();
extern void _irq85();
extern void _irq86();
extern void _irq87();
extern void _irq88();
extern void _irq89();
extern void _irq90();
extern void _irq91();
extern void _irq92();
extern void _irq93();
extern void _irq94();
extern void _irq95();
extern void _irq96();
extern void _irq97();
extern void _irq98();
extern void _irq99();
extern void _irq100();
extern void _irq101();
extern void _irq102();
extern void _irq103();
extern void _irq104();
extern void _irq105();
extern void _irq106();
extern void _irq107();
extern void _irq108();
extern void _irq109();
extern void _irq110();
extern void _irq111();
extern void _irq112();
extern void _irq113();
extern void _irq114();
extern void _irq115();
extern void _irq116();
extern void _irq117();
extern void _irq118();
extern void _irq119();
extern void _irq120();
extern void _irq121();
extern void _irq122();
extern void _irq123();
extern void _irq124();
extern void _irq125();
extern void _irq126();
extern void _irq127();
extern void _irq128();
extern void _irq129();
extern void _irq130();
extern void _irq131();
extern void _irq132();
extern void _irq133();
extern void _irq134();
extern void _irq135();
extern void _irq136();
extern void _irq137();
extern void _irq138();
extern void _irq139();
extern void _irq140();
extern void _irq141();
extern void _irq142();
extern void _irq143();
extern void _irq144();
extern void _irq145();
extern void _irq146();
extern void _irq147();
extern void _irq148();
extern void _irq149();
extern void _irq150();
extern void _irq151();
extern void _irq152();
extern void _irq153();
extern void _irq154();
extern void _irq155();
extern void _irq156();
extern void _irq157();
extern void _irq158();
extern void _irq159();
extern void _irq160();
extern void _irq161();
extern void _irq162();
extern void _irq163();
extern void _irq164();
extern void _irq165();
extern void _irq166();
extern void _irq167();
extern void _irq168();
extern void _irq169();
extern void _irq170();
extern void _irq171();
extern void _irq172();
extern void _irq173();
extern void _irq174();
extern void _irq175();
extern void _irq176();
extern void _irq177();
extern void _irq178();
extern void _irq179();
extern void _irq180();
extern void _irq181();
extern void _irq182();
extern void _irq183();
extern void _irq184();
extern void _irq185();
extern void _irq186();
extern void _irq187();
extern void _irq188();
extern void _irq189();
extern void _irq190();
extern void _irq191();
extern void _irq192();
extern void _irq193();
extern void _irq194();
extern void _irq195();
extern void _irq196();
extern void _irq197();
extern void _irq198();
extern void _irq199();
extern void _irq200();
extern void _irq201();
extern void _irq202();
extern void _irq203();
extern void _irq204();
extern void _irq205();
extern void _irq206();
extern void _irq207();
extern void _irq208();
extern void _irq209();
extern void _irq210();
extern void _irq211();
extern void _irq212();
extern void _irq213();
extern void _irq214();
extern void _irq215();
extern void _irq216();
extern void _irq217();
extern void _irq218();
extern void _irq219();
extern void _irq220();
extern void _irq221();
extern void _irq222();
extern void _irq223();
extern void _irq0(void);
extern void _irq1(void);
extern void _irq2(void);
extern void _irq3(void);
extern void _irq4(void);
extern void _irq5(void);
extern void _irq6(void);
extern void _irq7(void);
extern void _irq8(void);
extern void _irq9(void);
extern void _irq10(void);
extern void _irq11(void);
extern void _irq12(void);
extern void _irq13(void);
extern void _irq14(void);
extern void _irq15(void);
extern void _irq16(void);
extern void _irq17(void);
extern void _irq18(void);
extern void _irq19(void);
extern void _irq20(void);
extern void _irq21(void);
extern void _irq22(void);
extern void _irq23(void);
extern void _irq24(void);
extern void _irq25(void);
extern void _irq26(void);
extern void _irq27(void);
extern void _irq28(void);
extern void _irq29(void);
extern void _irq30(void);
extern void _irq31(void);
extern void _irq32(void);
extern void _irq33(void);
extern void _irq34(void);
extern void _irq35(void);
extern void _irq36(void);
extern void _irq37(void);
extern void _irq38(void);
extern void _irq39(void);
extern void _irq40(void);
extern void _irq41(void);
extern void _irq42(void);
extern void _irq43(void);
extern void _irq44(void);
extern void _irq45(void);
extern void _irq46(void);
extern void _irq47(void);
extern void _irq48(void);
extern void _irq49(void);
extern void _irq50(void);
extern void _irq51(void);
extern void _irq52(void);
extern void _irq53(void);
extern void _irq54(void);
extern void _irq55(void);
extern void _irq56(void);
extern void _irq57(void);
extern void _irq58(void);
extern void _irq59(void);
extern void _irq60(void);
extern void _irq61(void);
extern void _irq62(void);
extern void _irq63(void);
extern void _irq64(void);
extern void _irq65(void);
extern void _irq66(void);
extern void _irq67(void);
extern void _irq68(void);
extern void _irq69(void);
extern void _irq70(void);
extern void _irq71(void);
extern void _irq72(void);
extern void _irq73(void);
extern void _irq74(void);
extern void _irq75(void);
extern void _irq76(void);
extern void _irq77(void);
extern void _irq78(void);
extern void _irq79(void);
extern void _irq80(void);
extern void _irq81(void);
extern void _irq82(void);
extern void _irq83(void);
extern void _irq84(void);
extern void _irq85(void);
extern void _irq86(void);
extern void _irq87(void);
extern void _irq88(void);
extern void _irq89(void);
extern void _irq90(void);
extern void _irq91(void);
extern void _irq92(void);
extern void _irq93(void);
extern void _irq94(void);
extern void _irq95(void);
extern void _irq96(void);
extern void _irq97(void);
extern void _irq98(void);
extern void _irq99(void);
extern void _irq100(void);
extern void _irq101(void);
extern void _irq102(void);
extern void _irq103(void);
extern void _irq104(void);
extern void _irq105(void);
extern void _irq106(void);
extern void _irq107(void);
extern void _irq108(void);
extern void _irq109(void);
extern void _irq110(void);
extern void _irq111(void);
extern void _irq112(void);
extern void _irq113(void);
extern void _irq114(void);
extern void _irq115(void);
extern void _irq116(void);
extern void _irq117(void);
extern void _irq118(void);
extern void _irq119(void);
extern void _irq120(void);
extern void _irq121(void);
extern void _irq122(void);
extern void _irq123(void);
extern void _irq124(void);
extern void _irq125(void);
extern void _irq126(void);
extern void _irq127(void);
extern void _irq128(void);
extern void _irq129(void);
extern void _irq130(void);
extern void _irq131(void);
extern void _irq132(void);
extern void _irq133(void);
extern void _irq134(void);
extern void _irq135(void);
extern void _irq136(void);
extern void _irq137(void);
extern void _irq138(void);
extern void _irq139(void);
extern void _irq140(void);
extern void _irq141(void);
extern void _irq142(void);
extern void _irq143(void);
extern void _irq144(void);
extern void _irq145(void);
extern void _irq146(void);
extern void _irq147(void);
extern void _irq148(void);
extern void _irq149(void);
extern void _irq150(void);
extern void _irq151(void);
extern void _irq152(void);
extern void _irq153(void);
extern void _irq154(void);
extern void _irq155(void);
extern void _irq156(void);
extern void _irq157(void);
extern void _irq158(void);
extern void _irq159(void);
extern void _irq160(void);
extern void _irq161(void);
extern void _irq162(void);
extern void _irq163(void);
extern void _irq164(void);
extern void _irq165(void);
extern void _irq166(void);
extern void _irq167(void);
extern void _irq168(void);
extern void _irq169(void);
extern void _irq170(void);
extern void _irq171(void);
extern void _irq172(void);
extern void _irq173(void);
extern void _irq174(void);
extern void _irq175(void);
extern void _irq176(void);
extern void _irq177(void);
extern void _irq178(void);
extern void _irq179(void);
extern void _irq180(void);
extern void _irq181(void);
extern void _irq182(void);
extern void _irq183(void);
extern void _irq184(void);
extern void _irq185(void);
extern void _irq186(void);
extern void _irq187(void);
extern void _irq188(void);
extern void _irq189(void);
extern void _irq190(void);
extern void _irq191(void);
extern void _irq192(void);
extern void _irq193(void);
extern void _irq194(void);
extern void _irq195(void);
extern void _irq196(void);
extern void _irq197(void);
extern void _irq198(void);
extern void _irq199(void);
extern void _irq200(void);
extern void _irq201(void);
extern void _irq202(void);
extern void _irq203(void);
extern void _irq204(void);
extern void _irq205(void);
extern void _irq206(void);
extern void _irq207(void);
extern void _irq208(void);
extern void _irq209(void);
extern void _irq210(void);
extern void _irq211(void);
extern void _irq212(void);
extern void _irq213(void);
extern void _irq214(void);
extern void _irq215(void);
extern void _irq216(void);
extern void _irq217(void);
extern void _irq218(void);
extern void _irq219(void);
extern void _irq220(void);
extern void _irq221(void);
extern void _irq222(void);
extern void _irq223(void);
static uintptr_t int_entry_points[NR_IDT_ENTRIES] = {
[0] = (uintptr_t)_isr0, [1] = (uintptr_t)_isr1,

View File

@@ -332,80 +332,115 @@ IRQ 223, 255
isr_common_stub:
PUSH_REGS
# When ISR occurs in Ring 3, CPU sets %ss (and other non-code selectors)
# to 0.
mov %ss, %ax
cmp $0, %ax
jne isr_skipgs1
mov $0x10, %ax
mov %ax, %ss
swapgs
isr_skipgs1:
mov %rsp, %rdi
call isr_dispatch
POP_REGS
add $16, %rsp
cmpq $0x1b, 32(%rsp)
jne isr_skipgs2
swapgs
isr_skipgs2:
iretq
.global irq_common_stub
.type irq_common_stub, @function
irq_common_stub:
PUSH_REGS
# When IRQ occurs in Ring 3, CPU sets %ss (and other non-code selectors)
# to 0.
mov %ss, %ax
cmp $0, %ax
jne irq_skipgs1
mov $0x10, %ax
mov %ax, %ss
swapgs
irq_skipgs1:
mov %rsp, %rdi
call irq_dispatch
POP_REGS
add $16, %rsp
cmpq $0x1b, 32(%rsp)
jne isr_skipgs2
swapgs
irq_skipgs2:
iretq
.global syscall_gate
.type syscall_gate, @function
.extern syscall_dispatch
.type syscall_dispatch, @function
syscall_gate:
swapgs
movq %rsp, %gs:20 # GS+20 = rsp2 in the current TSS block (user stack storage)
movq %gs:4, %rsp # GS+4 = rsp0 in the current TSS block (per-thread kstack)
# start building a pf_cpu_context
movq %rsp, %gs:94 # GS+20 = rsp2 in the current TSS block (user stack storage)
movq %gs:78, %rsp # GS+4 = rsp0 in the current TSS block (per-thread kstack)
# start building a ml_cpu_context
pushq $0x1b
pushq %gs:20
pushq %gs:94
push %r11
push $0x23
push %rcx
pushq $0
pushq $0x80
PUSH_REGS
mov %rsp, %rdi
# switch back to user gs while in syscall_dispatch. Interrupts are enabled in syscall_dispatch,
# and if the task gets pre-empted, the incoming task will expect %gs to have its usermode value.
swapgs
call syscall_dispatch
POP_REGS
add $16, %rsp
pop %rcx
add $8, %rsp
pop %r11
add $16, %rsp
movq %gs:94, %rsp # GS+20 = rsp2 in the current TSS block
swapgs
movq %gs:20, %rsp # GS+20 = rsp2 in the current TSS block
swapgs
# back to usermode
sysretq
.global pf_faultptr
.type pf_faultptr, @function
pf_faultptr:
mov %cr2, %rax
ret

View File

@@ -1,24 +1,25 @@
#include "mango/machine/panic.h"
#include "mango/vm.h"
#include <mango/printk.h>
#include <mango/libc/stdio.h>
#include <arch/irq.h>
#include <kernel/libc/stdio.h>
#include <kernel/machine/cpu.h>
#include <kernel/machine/panic.h>
#include <kernel/printk.h>
#include <kernel/vm.h>
#define R_CF 0
#define R_PF 2
#define R_AF 4
#define R_ZF 6
#define R_SF 7
#define R_TF 8
#define R_IF 9
#define R_DF 10
#define R_OF 11
#define R_NT 14
#define R_VM 17
#define R_AC 18
#define R_CF 0
#define R_PF 2
#define R_AF 4
#define R_ZF 6
#define R_SF 7
#define R_TF 8
#define R_IF 9
#define R_DF 10
#define R_OF 11
#define R_NT 14
#define R_VM 17
#define R_AC 18
#define R_VIF 19
#define R_VIP 20
#define R_ID 21
#define R_ID 21
#define R_MAX 21
struct stack_frame {
@@ -82,7 +83,11 @@ static void print_rflags(uintptr_t rflags)
if (rflags & (1 << i)) {
const char *name = pf_rfl_name(i);
if (name) {
buf_i += snprintf(buf + buf_i, sizeof(buf) - buf_i, " %s", name);
buf_i += snprintf(
buf + buf_i,
sizeof(buf) - buf_i,
" %s",
name);
}
}
}
@@ -91,30 +96,43 @@ static void print_rflags(uintptr_t rflags)
printk(buf);
}
void ml_print_cpu_state(struct cpu_context *ctx)
void ml_print_cpu_state(struct ml_cpu_context *ctx)
{
printk("cpu state:");
if (ctx) {
printk(" rax %016llx rbx %016llx rcx %016llx",
ctx->rax, ctx->rbx, ctx->rcx);
ctx->rax,
ctx->rbx,
ctx->rcx);
printk(" rdx %016llx rsi %016llx rdi %016llx",
ctx->rdx, ctx->rsi, ctx->rdi);
ctx->rdx,
ctx->rsi,
ctx->rdi);
printk(" rsp %016llx rbp %016llx r8 %016llx",
ctx->rsp, ctx->rbp, ctx->r8);
ctx->rsp,
ctx->rbp,
ctx->r8);
printk(" r9 %016llx r10 %016llx r11 %016llx",
ctx->r9, ctx->r10, ctx->r11);
ctx->r9,
ctx->r10,
ctx->r11);
printk(" r12 %016llx r13 %016llx r14 %016llx",
ctx->r12, ctx->r13, ctx->r14);
ctx->r12,
ctx->r13,
ctx->r14);
printk(" r15 %016llx rip %016llx cs %04x ss %04x",
ctx->r15, ctx->rip, ctx->cs, ctx->ss);
ctx->r15,
ctx->rip,
ctx->cs,
ctx->ss);
print_rflags(ctx->rflags);
}
uintptr_t cr0 = 0, cr2 = 0, cr3 = 0, cr4 = 0;
asm volatile("mov %%cr0, %%rax" : "=a" (cr0));
asm volatile("mov %%cr2, %%rax" : "=a" (cr2));
asm volatile("mov %%cr3, %%rax" : "=a" (cr3));
asm volatile("mov %%cr4, %%rax" : "=a" (cr4));
asm volatile("mov %%cr0, %%rax" : "=a"(cr0));
asm volatile("mov %%cr2, %%rax" : "=a"(cr2));
asm volatile("mov %%cr3, %%rax" : "=a"(cr3));
asm volatile("mov %%cr4, %%rax" : "=a"(cr4));
printk(" cr0 %016llx cr2 %016llx", cr0, cr2);
printk(" cr3 %016llx cr4 %016llx", cr3, cr4);
}
@@ -124,22 +142,27 @@ static void print_stack_item(uintptr_t addr)
if (!addr) {
return;
}
char buf[64];
size_t i = 0;
i += snprintf(buf, sizeof(buf), " [<%p>] ", addr);
size_t offset = 0;
char name[128];
int found = -1;
if (found == 0 && name[0] != '\0') {
i += snprintf(buf + i, sizeof(buf) - i, "%s+0x%lx", name, offset);
i += snprintf(
buf + i,
sizeof(buf) - i,
"%s+0x%lx",
name,
offset);
} else {
i += snprintf(buf + i, sizeof(buf) - i, "?");
}
printk("%s", buf);
}
@@ -147,20 +170,19 @@ static void print_stack_trace(uintptr_t ip, uintptr_t *bp)
{
struct stack_frame *stk = (struct stack_frame *)bp;
printk("call trace:");
print_stack_item(ip);
int max_frames = 10, current_frame = 0;
while (1) {
if (!vm_virt_to_phys(stk) ||
bp == NULL ||
current_frame > max_frames) {
if (!vm_virt_to_phys(stk) || bp == NULL
|| current_frame > max_frames) {
break;
}
uintptr_t addr = stk->rip;
print_stack_item(addr);
stk = (struct stack_frame *)stk->rbp;
current_frame++;
}
@@ -169,11 +191,11 @@ static void print_stack_trace(uintptr_t ip, uintptr_t *bp)
void ml_print_stack_trace(uintptr_t ip)
{
uintptr_t *bp;
asm volatile("mov %%rbp, %0" : "=r" (bp));
asm volatile("mov %%rbp, %0" : "=r"(bp));
print_stack_trace(ip, bp);
}
void ml_print_stack_trace_irq(struct cpu_context *ctx)
void ml_print_stack_trace_irq(struct ml_cpu_context *ctx)
{
print_stack_trace(ctx->rip, (uintptr_t *)ctx->rbp);
}

View File

@@ -1,8 +1,8 @@
#include <arch/irq.h>
#include <arch/ports.h>
#include <mango/clock.h>
#include <mango/cpu.h>
#include <mango/printk.h>
#include <kernel/clock.h>
#include <kernel/cpu.h>
#include <kernel/printk.h>
#define PIT_COUNTER0 0x40
#define PIT_CMD 0x43

View File

@@ -1,20 +1,24 @@
#include <mango/types.h>
#include <mango/memblock.h>
#include <mango/vm.h>
#include <mango/printk.h>
#include <kernel/compiler.h>
#include <kernel/libc/stdio.h>
#include <kernel/memblock.h>
#include <kernel/pmap.h>
#include <kernel/printk.h>
#include <kernel/sched.h>
#include <kernel/types.h>
#include <kernel/vm-object.h>
#include <kernel/vm-region.h>
#include <kernel/vm.h>
#include <mango/status.h>
#include <mango/compiler.h>
#include <mango/pmap.h>
/* some helpful datasize constants */
#define C_1GiB 0x40000000ULL
#define C_2GiB (2 * C_1GiB)
#define C_1GiB 0x40000000ULL
#define C_2GiB (2 * C_1GiB)
#define BAD_INDEX ((unsigned int)-1)
#define PTR_TO_ENTRY(x) (((x) & ~VM_PAGE_MASK) | PTE_PRESENT | PTE_RW)
#define BAD_INDEX ((unsigned int)-1)
#define PTR_TO_ENTRY(x) (((x) & ~VM_PAGE_MASK) | PTE_PRESENT | PTE_RW | PTE_USR)
#define ENTRY_TO_PTR(x) ((x) & ~VM_PAGE_MASK)
#define PFN(x) ((x) >> VM_PAGE_SHIFT)
#define PFN(x) ((x) >> VM_PAGE_SHIFT)
static int can_use_gbpages = 0;
static pmap_t kernel_pmap;
@@ -33,24 +37,26 @@ static size_t ps_size(enum page_size ps)
}
}
static pmap_t alloc_pmap()
static pmap_t alloc_pmap(void)
{
struct pml4t *p = kzalloc(sizeof *p, 0);
return vm_virt_to_phys(p);
}
static pte_t make_pte(pfn_t pfn, enum vm_prot prot, enum page_size size)
static pte_t make_pte(pfn_t pfn, vm_prot_t prot, enum page_size size)
{
pte_t v = pfn;
switch (size) {
case PS_1G:
/* pfn_t is in terms of 4KiB pages, convert to 1GiB page frame number */
/* pfn_t is in terms of 4KiB pages, convert to 1GiB page frame
* number */
pfn >>= 18;
v = (pfn & 0x3FFFFF) << 30;
break;
case PS_2M:
/* pfn_t is in terms of 4KiB pages, convert to 2MiB page frame number */
/* pfn_t is in terms of 4KiB pages, convert to 2MiB page frame
* number */
pfn >>= 9;
v = (pfn & 0x7FFFFFFF) << 21;
break;
@@ -129,14 +135,15 @@ static void delete_pdir(phys_addr_t pd)
kfree(pdir);
}
static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum page_size size)
static kern_status_t do_pmap_add(
pmap_t pmap,
virt_addr_t pv,
pfn_t pfn,
vm_prot_t prot,
enum page_size size)
{
uintptr_t pv = (uintptr_t)p;
unsigned int
pml4t_index = BAD_INDEX,
pdpt_index = BAD_INDEX,
pd_index = BAD_INDEX,
pt_index = BAD_INDEX;
unsigned int pml4t_index = BAD_INDEX, pdpt_index = BAD_INDEX,
pd_index = BAD_INDEX, pt_index = BAD_INDEX;
switch (size) {
case PS_4K:
@@ -171,16 +178,19 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot p
struct pdpt *pdpt = NULL;
if (!pml4t->p_entries[pml4t_index]) {
pdpt = kzalloc(sizeof *pdpt, 0);
pml4t->p_entries[pml4t_index] = PTR_TO_ENTRY(vm_virt_to_phys(pdpt));
pml4t->p_entries[pml4t_index]
= PTR_TO_ENTRY(vm_virt_to_phys(pdpt));
} else {
pdpt = vm_phys_to_virt(ENTRY_TO_PTR(pml4t->p_entries[pml4t_index]));
pdpt = vm_phys_to_virt(
ENTRY_TO_PTR(pml4t->p_entries[pml4t_index]));
}
/* if we're mapping a 1GiB page, we stop here */
if (size == PS_1G) {
if (pdpt->p_entries[pdpt_index] != 0) {
/* this slot points to a pdir, delete it.
if this slot points to a hugepage, this does nothing */
if this slot points to a hugepage, this does nothing
*/
delete_pdir(pdpt->p_entries[pdpt_index]);
}
@@ -189,22 +199,25 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot p
return KERN_OK;
}
/* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */
struct pdir *pdir = NULL;
if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) {
if (!pdpt->p_entries[pdpt_index]
|| pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) {
/* entry is null, or points to a hugepage */
pdir = kzalloc(sizeof *pdir, 0);
pdpt->p_entries[pdpt_index] = PTR_TO_ENTRY(vm_virt_to_phys(pdir));
pdpt->p_entries[pdpt_index]
= PTR_TO_ENTRY(vm_virt_to_phys(pdir));
} else {
pdir = vm_phys_to_virt(ENTRY_TO_PTR(pdpt->p_entries[pdpt_index]));
pdir = vm_phys_to_virt(
ENTRY_TO_PTR(pdpt->p_entries[pdpt_index]));
}
/* if we're mapping a 2MiB page, we stop here */
if (size == PS_2M) {
if (pdir->p_entries[pd_index] != 0) {
/* this slot points to a ptab, delete it.
if this slot points to a hugepage, this does nothing */
if this slot points to a hugepage, this does nothing
*/
delete_ptab(pdir->p_entries[pd_index]);
}
@@ -214,7 +227,8 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot p
/* 4. traverse PDIR, get PTAB (optional, 4K only) */
struct ptab *ptab = NULL;
if (!pdir->p_entries[pd_index] || pdir->p_pages[pd_index] & PTE_PAGESIZE) {
if (!pdir->p_entries[pd_index]
|| pdir->p_pages[pd_index] & PTE_PAGESIZE) {
/* entry is null, or points to a hugepage */
ptab = kzalloc(sizeof *ptab, 0);
pdir->p_entries[pd_index] = PTR_TO_ENTRY(vm_virt_to_phys(ptab));
@@ -234,7 +248,8 @@ pmap_t get_kernel_pmap(void)
void pmap_bootstrap(void)
{
can_use_gbpages = gigabyte_pages();
printk("pmap: gigabyte pages %sabled", can_use_gbpages == 1 ? "en" : "dis");
printk("pmap: gigabyte pages %sabled",
can_use_gbpages == 1 ? "en" : "dis");
enable_nx();
printk("pmap: NX protection enabled");
@@ -249,18 +264,21 @@ void pmap_bootstrap(void)
/* map 2GiB at the end of the address space to
replace the mapping created by start_32 and allow access to
the kernel and memblock-allocated data. */
uintptr_t vbase = VM_KERNEL_VOFFSET;
virt_addr_t vbase = VM_KERNEL_VOFFSET;
for (size_t i = 0; i < C_2GiB; i += hugepage_sz) {
do_pmap_add(kernel_pmap,
(void *)(vbase + i),
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_SVR,
hugepage);
do_pmap_add(
kernel_pmap,
vbase + i,
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC
| VM_PROT_SVR,
hugepage);
}
phys_addr_t pmem_limit = 0x0;
struct memblock_iter it;
for_each_mem_range(&it, 0x00, UINTPTR_MAX) {
for_each_mem_range(&it, 0x00, UINTPTR_MAX)
{
if (it.it_limit > pmem_limit) {
pmem_limit = it.it_limit;
}
@@ -268,10 +286,13 @@ void pmap_bootstrap(void)
vbase = VM_PAGEMAP_BASE;
for (size_t i = 0; i < pmem_limit; i += hugepage_sz) {
do_pmap_add(kernel_pmap,
(void *)(vbase + i),
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_SVR | VM_PROT_NOCACHE, hugepage);
do_pmap_add(
kernel_pmap,
vbase + i,
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_SVR
| VM_PROT_NOCACHE,
hugepage);
}
pmap_switch(kernel_pmap);
@@ -279,15 +300,84 @@ void pmap_bootstrap(void)
pmap_t pmap_create(void)
{
return 0;
pmap_t pmap = alloc_pmap();
pmap_t kernel_pmap = get_kernel_pmap();
struct pml4t *pml4t = vm_phys_to_virt(pmap);
struct pml4t *kernel_pml4t = vm_phys_to_virt(kernel_pmap);
for (unsigned int i = 256; i < 512; i++) {
pml4t->p_entries[i] = kernel_pml4t->p_entries[i];
}
return pmap;
}
void pmap_destroy(pmap_t pmap)
{
}
kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum pmap_flags flags)
static void log_fault(virt_addr_t fault_addr, enum pmap_fault_flags flags)
{
char flag_str[128] = {0};
size_t p = 0;
if (flags & PMAP_FAULT_PRESENT) {
p += snprintf(flag_str + p, sizeof flag_str - p, " PRESENT");
} else {
p += snprintf(flag_str + p, sizeof flag_str - p, " MISSING");
}
if (flags & PMAP_FAULT_USER) {
p += snprintf(flag_str + p, sizeof flag_str - p, " USER");
} else {
p += snprintf(flag_str + p, sizeof flag_str - p, " SVR");
}
if (flags & PMAP_FAULT_WRITE) {
p += snprintf(flag_str + p, sizeof flag_str - p, " WRITE");
} else {
p += snprintf(flag_str + p, sizeof flag_str - p, " READ");
}
if (flags & PMAP_FAULT_IFETCH) {
p += snprintf(flag_str + p, sizeof flag_str - p, " IFETCH");
}
if (flags & PMAP_FAULT_BADCFG) {
p += snprintf(flag_str + p, sizeof flag_str - p, " BADCFG");
}
printk("pmap: fault at 0x%llx (%s)", fault_addr, flag_str);
}
kern_status_t pmap_handle_fault(
virt_addr_t fault_addr,
enum pmap_fault_flags flags)
{
// log_fault(fault_addr, flags);
if (flags & PMAP_FAULT_PRESENT) {
return KERN_FATAL_ERROR;
}
struct task *task = current_task();
struct vm_region *space = task->t_address_space;
unsigned long lock_flags;
vm_region_lock_irqsave(space, &lock_flags);
kern_status_t status = vm_region_demand_map(space, fault_addr, flags);
vm_region_unlock_irqrestore(space, lock_flags);
return status;
}
kern_status_t pmap_add(
pmap_t pmap,
virt_addr_t p,
pfn_t pfn,
vm_prot_t prot,
enum pmap_flags flags)
{
enum page_size ps = PS_4K;
if (flags & PMAP_HUGEPAGE) {
@@ -297,17 +387,23 @@ kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum
return do_pmap_add(pmap, p, pfn, prot, ps);
}
kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, enum vm_prot prot, enum pmap_flags flags)
kern_status_t pmap_add_block(
pmap_t pmap,
virt_addr_t p,
pfn_t pfn,
size_t len,
vm_prot_t prot,
enum pmap_flags flags)
{
return KERN_OK;
}
kern_status_t pmap_remove(pmap_t pmap, void *p)
kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p)
{
return KERN_OK;
}
kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len)
kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len)
{
return KERN_OK;
}

View File

@@ -1,39 +1,50 @@
#include <stdint.h>
uint8_t inportb(uint16_t port) {
uint8_t data;
__asm__ __volatile__("inb %1, %0" : "=a"(data) : "dN"(port));
return data;
uint8_t inportb(uint16_t port)
{
uint8_t data;
__asm__ __volatile__("inb %1, %0" : "=a"(data) : "dN"(port));
return data;
}
void outportb(uint16_t port, uint8_t data) {
__asm__ __volatile__("outb %1, %0" : : "dN"(port), "a"(data));
void outportb(uint16_t port, uint8_t data)
{
__asm__ __volatile__("outb %1, %0" : : "dN"(port), "a"(data));
}
uint16_t inportw(uint16_t port) {
uint16_t data;
__asm__ __volatile__("inw %1, %0" : "=a"(data) : "dN"(port));
return data;
uint16_t inportw(uint16_t port)
{
uint16_t data;
__asm__ __volatile__("inw %1, %0" : "=a"(data) : "dN"(port));
return data;
}
void outportw(uint16_t port, uint16_t data) {
__asm__ __volatile__("outw %1, %0" : : "dN"(port), "a"(data));
void outportw(uint16_t port, uint16_t data)
{
__asm__ __volatile__("outw %1, %0" : : "dN"(port), "a"(data));
}
uint32_t inportl(uint16_t port) {
uint32_t data;
__asm__ __volatile__("inl %%dx, %%eax" : "=a"(data) : "dN"(port));
return data;
uint32_t inportl(uint16_t port)
{
uint32_t data;
__asm__ __volatile__("inl %%dx, %%eax" : "=a"(data) : "dN"(port));
return data;
}
void outportl(uint16_t port, uint32_t data) {
__asm__ __volatile__("outl %%eax, %%dx" : : "dN"(port), "a"(data));
void outportl(uint16_t port, uint32_t data)
{
__asm__ __volatile__("outl %%eax, %%dx" : : "dN"(port), "a"(data));
}
void outportsw(uint16_t port, void *data, uint32_t size) {
__asm__ __volatile__("rep outsw" : "+S"(data), "+c"(size) : "d"(port));
void outportsw(uint16_t port, void *data, uint32_t size)
{
__asm__ __volatile__("rep outsw" : "+S"(data), "+c"(size) : "d"(port));
}
void inportsw(uint16_t port, unsigned char *data, unsigned long size) {
__asm__ __volatile__("rep insw" : "+D"(data), "+c"(size) : "d"(port) : "memory");
void inportsw(uint16_t port, unsigned char *data, unsigned long size)
{
__asm__ __volatile__("rep insw"
: "+D"(data), "+c"(size)
: "d"(port)
: "memory");
}

View File

@@ -1,11 +1,8 @@
#include <arch/irq.h>
#include <arch/ports.h>
#include <arch/serial.h>
#include <mango/device.h>
#include <mango/kext.h>
#include <mango/libc/stdio.h>
#include <mango/printk.h>
#include <mango/tty.h>
#include <kernel/libc/stdio.h>
#include <kernel/printk.h>
#define COM1 0x3F8
#define COM2 0x2F8

View File

@@ -1,7 +1,21 @@
find_program(QEMU qemu-system-x86_64)
set(DEBUG_SESSION ${CMAKE_SOURCE_DIR}/tools/kernel-debug/debug_session.sh)
set(DEBUG_CFG_GDB ${CMAKE_SOURCE_DIR}/tools/kernel-debug/gdb_session_init)
set(DEBUG_CFG_LLDB ${CMAKE_SOURCE_DIR}/tools/kernel-debug/lldb_session_init)
add_custom_target(run
USES_TERMINAL
COMMAND ${QEMU}
-kernel $<TARGET_FILE:${kernel_exe_name}>
-serial stdio)
# LLDB DEPENDS ON AN ELF64 EXECUTABLE.
# .DBG FILE MUST NOT BE PATCHED WITH E64PATCH!
add_custom_target(debug
USES_TERMINAL
COMMAND ${DEBUG_SESSION}
${DEBUG_CFG_GDB}
${DEBUG_CFG_LLDB}
${QEMU} -kernel $<TARGET_FILE:${kernel_exe_name}>
-S -s
-monitor stdio)

View File

@@ -1,12 +1,24 @@
#include <mango/machine/thread.h>
#include <kernel/machine/cpu.h>
#include <kernel/machine/thread.h>
#define MAX_REG_ARGS 6
#define REG_ARG_0 rdi
#define REG_ARG_1 rsi
#define REG_ARG_2 rdx
#define REG_ARG_3 rcx
#define REG_ARG_4 r8
#define REG_ARG_5 r9
/* this is the context information restored by ml_thread_switch.
* since ml_thread_switch only jumps to kernel-mode, IRETQ isn't used,
* and the extra register values needed by IRETQ aren't present. */
struct thread_ctx {
uint64_t r15, r14, r13, r12, r11, r10, r9, r8;
uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax;
uint64_t rfl;
} __packed;
void prepare_stack(uintptr_t ip, uintptr_t *sp)
void ml_thread_prepare_kernel_context(uintptr_t ip, uintptr_t *sp)
{
(*sp) -= sizeof(uintptr_t);
uintptr_t *dest_ip = (uintptr_t *)(*sp);
@@ -18,3 +30,50 @@ void prepare_stack(uintptr_t ip, uintptr_t *sp)
ctx->rfl = 0x202;
}
extern kern_status_t ml_thread_prepare_user_context(
virt_addr_t ip,
virt_addr_t user_sp,
virt_addr_t *kernel_sp,
const uintptr_t *args,
size_t nr_args)
{
(*kernel_sp) -= sizeof(struct ml_cpu_context);
struct ml_cpu_context *ctx = (struct ml_cpu_context *)(*kernel_sp);
memset(ctx, 0x0, sizeof *ctx);
ctx->rip = ip;
ctx->rsp = user_sp;
ctx->ss = 0x1b;
ctx->cs = 0x23;
ctx->rflags = 0x202;
ctx->rdi = 0; // arg 0
ctx->rsi = 0; // arg 1
for (size_t i = 0; i < nr_args; i++) {
switch (i) {
case 0:
ctx->REG_ARG_0 = args[i];
break;
case 1:
ctx->REG_ARG_1 = args[i];
break;
case 2:
ctx->REG_ARG_2 = args[i];
break;
case 3:
ctx->REG_ARG_3 = args[i];
break;
case 4:
ctx->REG_ARG_4 = args[i];
break;
case 5:
ctx->REG_ARG_5 = args[i];
break;
default:
return KERN_INVALID_ARGUMENT;
}
}
return KERN_OK;
}

View File

@@ -1,13 +1,13 @@
.code64
.code64
.extern THREAD_sp
.global switch_to
.type switch_to, @function
.extern THREAD_sp
.global ml_thread_switch
.type ml_thread_switch, @function
// %rdi = (struct thread *) current thread.
// %rsi = (struct thread *) next thread.
switch_to:
ml_thread_switch:
pushfq
push %rax
@@ -26,7 +26,7 @@ switch_to:
push %r13
push %r14
push %r15
movq %rsp, THREAD_sp(%rdi)
movq THREAD_sp(%rsi), %rsp
@@ -50,3 +50,28 @@ switch_to:
popfq
ret
.global ml_thread_switch_user
.type ml_thread_switch_user, @function
ml_thread_switch_user:
pop %r15
pop %r14
pop %r13
pop %r12
pop %r11
pop %r10
pop %r9
pop %r8
pop %rdi
pop %rsi
pop %rbp
add $8, %rsp
pop %rbx
pop %rdx
pop %rcx
pop %rax
add $16, %rsp
swapgs
iretq

View File

@@ -1,11 +1,13 @@
# the name of the target operating system
set(CMAKE_SYSTEM_NAME Magenta)
set(CMAKE_SYSTEM_NAME Mango)
# which compilers to use for C and C++
set(CMAKE_C_COMPILER x86_64-elf-gcc)
set(CMAKE_ASM_COMPILER x86_64-elf-gcc)
set(CMAKE_CXX_COMPILER x86_64-elf-g++)
set(CMAKE_STRIP x86_64-elf-strip)
set(CMAKE_C_COMPILER_WORKS TRUE)
set(CMAKE_CXX_COMPILER_WORKS TRUE)

43
arch/x86_64/tss.c Normal file
View File

@@ -0,0 +1,43 @@
#include <arch/gdt.h>
#include <arch/tss.h>
#include <kernel/libc/string.h>
static void tss_flush(int index)
{
index *= sizeof(struct gdt_entry);
index |= 3;
asm volatile("mov %0, %%eax; ltr %%ax" ::"r"(index));
}
void tss_init(struct tss *tss, struct tss_ptr *ptr)
{
memset(tss, 0x0, sizeof *tss);
ptr->tss_base = (uint64_t)tss;
ptr->tss_limit = (uint16_t)sizeof *tss;
}
void tss_load(struct tss *tss)
{
tss_flush(TSS_GDT_INDEX);
}
virt_addr_t tss_get_kstack(struct tss *tss)
{
return tss->rsp0;
}
virt_addr_t tss_get_ustack(struct tss *tss)
{
return tss->rsp2;
}
void tss_set_kstack(struct tss *tss, virt_addr_t sp)
{
tss->rsp0 = sp;
}
void tss_set_ustack(struct tss *tss, virt_addr_t sp)
{
tss->rsp2 = sp;
}

126
arch/x86_64/vga.c Normal file
View File

@@ -0,0 +1,126 @@
#include <arch/irq.h>
#include <arch/ports.h>
#include <arch/serial.h>
#include <kernel/libc/stdio.h>
#include <kernel/machine/vm.h>
#include <kernel/printk.h>
struct vga_console {
uint16_t *vga_framebuffer;
unsigned int vga_cursor_x, vga_cursor_y;
unsigned int vga_screen_width, vga_screen_height;
uint8_t vga_attrib;
};
static struct vga_console vga_con = {
.vga_attrib = 0x0F,
.vga_screen_width = 80,
.vga_screen_height = 25,
.vga_framebuffer = (uint16_t *)0xffffffff800b8000,
};
static void vga_console_clear(struct vga_console *con)
{
size_t len = con->vga_screen_width * con->vga_screen_height;
for (size_t i = 0; i < len; i++) {
con->vga_framebuffer[i] = (uint16_t)con->vga_attrib << 8;
}
con->vga_cursor_x = 0;
con->vga_cursor_y = 0;
}
static void vga_console_show_cursor(struct vga_console *con)
{
size_t start = 0, end = 15;
outportb(0x3D4, 0x0A);
outportb(0x3D5, (inportb(0x3D5) & 0xC0) | start);
outportb(0x3D4, 0x0B);
outportb(0x3D5, (inportb(0x3D5) & 0xE0) | end);
}
static void vga_console_update_cursor(struct vga_console *con)
{
uint16_t pos
= con->vga_cursor_y * con->vga_screen_width + con->vga_cursor_x;
outportb(0x3D4, 0x0F);
outportb(0x3D5, (uint8_t)(pos & 0xFF));
outportb(0x3D4, 0x0E);
outportb(0x3D5, (uint8_t)((pos >> 8) & 0xFF));
}
static void vga_console_scroll(struct vga_console *con)
{
uint16_t *src = &con->vga_framebuffer[con->vga_screen_width];
uint16_t *dest = &con->vga_framebuffer[0];
size_t len = (con->vga_screen_height - 1) * con->vga_screen_width
* sizeof *con->vga_framebuffer;
memcpy(dest, src, len);
dest = &con->vga_framebuffer
[(con->vga_screen_height - 1) * con->vga_screen_width];
len = con->vga_screen_width;
for (size_t i = 0; i < len; i++) {
dest[i] = (uint16_t)con->vga_attrib << 8;
}
con->vga_cursor_x = 0;
con->vga_cursor_y = con->vga_screen_height - 1;
}
static void vga_console_putc(struct vga_console *con, char c)
{
switch (c) {
case '\n':
con->vga_cursor_x = 0;
con->vga_cursor_y++;
break;
case '\r':
con->vga_cursor_x = 0;
break;
default:
con->vga_framebuffer
[con->vga_cursor_y * con->vga_screen_width
+ con->vga_cursor_x]
= ((uint16_t)con->vga_attrib << 8) | c;
con->vga_cursor_x++;
break;
}
if (con->vga_cursor_x >= con->vga_screen_width) {
con->vga_cursor_x = 0;
con->vga_cursor_y++;
}
if (con->vga_cursor_y >= con->vga_screen_height) {
vga_console_scroll(con);
}
}
static void vgacon_write(struct console *con, const char *s, unsigned int len)
{
for (unsigned int i = 0; i < len; i++) {
vga_console_putc(&vga_con, s[i]);
}
vga_console_update_cursor(&vga_con);
}
static struct console vgacon = {
.c_name = "vgacon",
.c_flags = CON_BOOT,
.c_write = vgacon_write,
.c_lock = SPIN_LOCK_INIT,
};
void vgacon_init(void)
{
vga_console_clear(&vga_con);
vga_console_show_cursor(&vga_con);
console_register(&vgacon);
}

39
build.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
arch=$1
tools_src_dir="$(pwd)/tools"
kernel_src_dir="$(pwd)"
tools_build_dir="$(pwd)/build/tools"
kernel_build_dir="$(pwd)/build"
bin_dir="$kernel_build_dir/bin"
lib_dir="$kernel_build_dir/lib"
if [[ -z "$arch" ]]; then
echo "No architecture specified."
exit -1
fi
rm -rf $kernel_build_dir
mkdir -p $tools_build_dir
mkdir -p $kernel_build_dir
pushd $tools_build_dir
cmake \
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY="$bin_dir" \
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY="$lib_dir" \
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY="$lib_dir" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
$tools_src_dir
ninja
popd
pushd $kernel_build_dir
cmake \
-DBUILD_TOOLS_DIR="$bin_dir" \
-DCMAKE_TOOLCHAIN_FILE="$kernel_src_dir/arch/$arch/toolchain.cmake" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
$kernel_src_dir
ninja
popd

59
doc/kernel-interface.txt Executable file
View File

@@ -0,0 +1,59 @@
=== KERNEL TYPES ====
kern_handle_t
kern_status_t
kern_txnid_t
kern_clock_t
kern_msg_t
=== KERNEL ENUMS ====
kern_status_t:
KERN_SUCCESS
KERN_BAD_HANDLE
clockid_t:
CLOCK_REALTIME
CLOCK_MONOTONIC
=== KERNEL STRUCTS ====
kern_msg_t {
void *buf;
size_t len;
kern_handle_t *handles;
size_t nhandles
}
=== KERNEL OBJECTS ====
port
timer
address_space
page_buf
task
thread
event
=== KERNEL SYSTEM CALLS ====
handle_close
port_create
port_publish
port_connect
msg_send
msg_recv
msg_read
msg_write
timer_create
timer_arm
clock_gettime
task_get
task_move_handle

View File

@@ -1,5 +1,5 @@
#include <mango/libc/string.h>
#include <mango/bitmap.h>
#include <kernel/bitmap.h>
#include <kernel/libc/string.h>
void bitmap_zero(unsigned long *map, unsigned long nbits)
{
@@ -25,7 +25,7 @@ void bitmap_set(unsigned long *map, unsigned long bit)
void bitmap_clear(unsigned long *map, unsigned long bit)
{
unsigned long index = bit / BITS_PER_WORD;
unsigned long offset = bit & (BITS_PER_WORD - 1);
unsigned long offset = (BITS_PER_WORD - bit - 1) & (BITS_PER_WORD - 1);
unsigned long mask = 1ul << offset;
map[index] &= ~mask;
@@ -38,7 +38,6 @@ bool bitmap_check(unsigned long *map, unsigned long bit)
unsigned long mask = 1ul << offset;
return (map[index] & mask) != 0 ? true : false;
}
unsigned int bitmap_count_set(unsigned long *map, unsigned long nbits)

View File

@@ -57,7 +57,7 @@
provide a comparator function.
*/
#include <mango/btree.h>
#include <kernel/btree.h>
#include <stddef.h>
#define MAX(a, b) ((a) > (b) ? (a) : (b))

View File

@@ -1,4 +1,4 @@
#include <mango/queue.h>
#include <kernel/queue.h>
size_t queue_length(struct queue *q)
{

View File

@@ -1,5 +1,5 @@
#include <mango/ringbuffer.h>
#include <mango/sched.h>
#include <kernel/ringbuffer.h>
#include <kernel/sched.h>
size_t ringbuffer_unread(struct ringbuffer *ring_buffer)
{

View File

@@ -1,8 +1,8 @@
#ifndef MANGO_ARG_H_
#define MANGO_ARG_H_
#ifndef KERNEL_ARG_H_
#define KERNEL_ARG_H_
#include <mango/types.h>
#include <stdbool.h>
#include <mango/status.h>
#define CMDLINE_MAX 4096

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_BITMAP_H_
#define MANGO_BITMAP_H_
#ifndef KERNEL_BITMAP_H_
#define KERNEL_BITMAP_H_
#include <stdbool.h>

38
include/kernel/bsp.h Normal file
View File

@@ -0,0 +1,38 @@
#ifndef KERNEL_BSP_H_
#define KERNEL_BSP_H_
#include <kernel/compiler.h>
#include <mango/status.h>
#include <kernel/types.h>
#include <stddef.h>
#include <stdint.h>
#define BSP_MAGIC 0xcafebabe
struct task;
struct bsp_trailer {
/* these fields are stored in big endian in the package itself */
uint32_t bsp_magic;
uint64_t bsp_fs_offset;
uint32_t bsp_fs_len;
uint64_t bsp_exec_offset;
uint32_t bsp_exec_len;
uint64_t bsp_text_faddr, bsp_text_vaddr, bsp_text_size;
uint64_t bsp_data_faddr, bsp_data_vaddr, bsp_data_size;
uint64_t bsp_exec_entry;
} __packed;
struct bsp {
/* the values in this struct are stored in host byte order */
struct bsp_trailer bsp_trailer;
struct vm_object *bsp_vmo;
};
extern void bsp_set_location(const struct boot_module *mod);
extern void bsp_get_location(struct boot_module *out);
extern kern_status_t bsp_load(struct bsp *bsp, const struct boot_module *mod);
extern kern_status_t bsp_launch_async(struct bsp *bsp, struct task *task);
#endif

View File

@@ -20,99 +20,130 @@
software without specific prior written permission.
*/
#ifndef MANGO_BTREE_H_
#define MANGO_BTREE_H_
#ifndef KERNEL_BTREE_H_
#define KERNEL_BTREE_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/* if your custom structure contains a struct btree_node (i.e. it can be part of a btree),
you can use this macro to convert a struct btree_node* to a your_type*
/* if your custom structure contains a struct btree_node (i.e. it can be part of
a btree), you can use this macro to convert a struct btree_node* to a
your_type*
@param t the name of your custom type (something that can be passed to offsetof)
@param m the name of the struct btree_node member variable within your custom type.
@param v the struct btree_node pointer that you wish to convert. if this is NULL, NULL will be returned.
@param t the name of your custom type (something that can be passed to
offsetof)
@param m the name of the struct btree_node member variable within your custom
type.
@param v the struct btree_node pointer that you wish to convert. if this is
NULL, NULL will be returned.
*/
#define BTREE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
#define BTREE_CONTAINER(t, m, v) \
((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
/* defines a simple node insertion function.
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
this function assumes that your nodes have simple integer keys that can be
compared with the usual operators.
EXAMPLE:
if you have a tree node type like this:
struct my_tree_node {
int key;
struct btree_node base;
struct btree_node base;
}
You would use the following call to generate an insert function for a tree with this node type:
You would use the following call to generate an insert function for a tree
with this node type:
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key, my_tree_node_insert);
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key,
my_tree_node_insert);
Which would emit a function defined like:
static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node);
static void my_tree_node_insert(struct btree *tree, struct my_tree_node
*node);
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
@param container_node_member the name of the struct btree_node member variable within your custom type.
@param container_key_member the name of the key member variable within your custom type.
@param node_type your custom tree node type. usually a structure that
contains a struct btree_node member.
@param container_node_member the name of the struct btree_node member
variable within your custom type.
@param container_key_member the name of the key member variable within your
custom type.
@param function_name the name of the function to generate.
*/
#define BTREE_DEFINE_SIMPLE_INSERT(node_type, container_node_member, container_key_member, function_name) \
void function_name(struct btree *tree, node_type *node) \
{ \
if (!tree->b_root) { \
tree->b_root = &node->container_node_member; \
btree_insert_fixup(tree, &node->container_node_member); \
return; \
} \
\
struct btree_node *cur = tree->b_root; \
while (1) { \
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
struct btree_node *next = NULL; \
\
if (node->container_key_member > cur_node->container_key_member) { \
next = btree_right(cur); \
\
if (!next) { \
btree_put_right(cur, &node->container_node_member); \
break; \
} \
} else if (node->container_key_member < cur_node->container_key_member) { \
next = btree_left(cur); \
\
if (!next) { \
btree_put_left(cur, &node->container_node_member); \
break; \
} \
} else { \
return; \
} \
\
cur = next; \
} \
\
btree_insert_fixup(tree, &node->container_node_member); \
#define BTREE_DEFINE_SIMPLE_INSERT( \
node_type, \
container_node_member, \
container_key_member, \
function_name) \
void function_name(struct btree *tree, node_type *node) \
{ \
if (!tree->b_root) { \
tree->b_root = &node->container_node_member; \
btree_insert_fixup( \
tree, \
&node->container_node_member); \
return; \
} \
\
struct btree_node *cur = tree->b_root; \
while (1) { \
node_type *cur_node = BTREE_CONTAINER( \
node_type, \
container_node_member, \
cur); \
struct btree_node *next = NULL; \
\
if (node->container_key_member \
> cur_node->container_key_member) { \
next = btree_right(cur); \
\
if (!next) { \
btree_put_right( \
cur, \
&node->container_node_member); \
break; \
} \
} else if ( \
node->container_key_member \
< cur_node->container_key_member) { \
next = btree_left(cur); \
\
if (!next) { \
btree_put_left( \
cur, \
&node->container_node_member); \
break; \
} \
} else { \
return; \
} \
\
cur = next; \
} \
\
btree_insert_fixup(tree, &node->container_node_member); \
}
/* defines a node insertion function.
this function should be used for trees with complex node keys that cannot be directly compared.
a comparator for your keys must be supplied.
this function should be used for trees with complex node keys that cannot be
directly compared. a comparator for your keys must be supplied.
EXAMPLE:
if you have a tree node type like this:
struct my_tree_node {
complex_key_t key;
struct btree_node base;
struct btree_node base;
}
You would need to define a comparator function or macro with the following signature:
You would need to define a comparator function or macro with the following
signature:
int my_comparator(struct my_tree_node *a, struct my_tree_node *b);
@@ -122,102 +153,136 @@ extern "C" {
return 0 if a == b
return 1 if a > b
You would use the following call to generate an insert function for a tree with this node type:
You would use the following call to generate an insert function for a tree
with this node type:
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert, my_comparator);
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert,
my_comparator);
Which would emit a function defined like:
static void my_tree_node_insert(struct btree *tree, struct my_tree_node *node);
static void my_tree_node_insert(struct btree *tree, struct my_tree_node
*node);
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
@param container_node_member the name of the struct btree_node member variable within your custom type.
@param container_key_member the name of the key member variable within your custom type.
@param node_type your custom tree node type. usually a structure that
contains a struct btree_node member.
@param container_node_member the name of the struct btree_node member
variable within your custom type.
@param container_key_member the name of the key member variable within your
custom type.
@param function_name the name of the function to generate.
@param comparator the name of a comparator function or functional-macro that conforms to the
requirements listed above.
@param comparator the name of a comparator function or functional-macro that
conforms to the requirements listed above.
*/
#define BTREE_DEFINE_INSERT(node_type, container_node_member, container_key_member, function_name, comparator) \
void function_name(struct btree *tree, node_type *node) \
{ \
if (!tree->b_root) { \
tree->b_root = &node->container_node_member; \
btree_insert_fixup(tree, &node->container_node_member); \
return; \
} \
\
struct btree_node *cur = tree->b_root; \
while (1) { \
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
struct btree_node *next = NULL; \
int cmp = comparator(node, cur_node); \
\
if (cmp == 1) { \
next = btree_right(cur); \
\
if (!next) { \
btree_put_right(cur, &node->container_node_member); \
break; \
} \
} else if (cmp == -1) { \
next = btree_left(cur); \
\
if (!next) { \
btree_put_left(cur, &node->container_node_member); \
break; \
} \
} else { \
return; \
} \
\
cur = next; \
} \
\
btree_insert_fixup(tree, &node->container_node_member); \
#define BTREE_DEFINE_INSERT( \
node_type, \
container_node_member, \
container_key_member, \
function_name, \
comparator) \
void function_name(struct btree *tree, node_type *node) \
{ \
if (!tree->b_root) { \
tree->b_root = &node->container_node_member; \
btree_insert_fixup( \
tree, \
&node->container_node_member); \
return; \
} \
\
struct btree_node *cur = tree->b_root; \
while (1) { \
node_type *cur_node = BTREE_CONTAINER( \
node_type, \
container_node_member, \
cur); \
struct btree_node *next = NULL; \
int cmp = comparator(node, cur_node); \
\
if (cmp == 1) { \
next = btree_right(cur); \
\
if (!next) { \
btree_put_right( \
cur, \
&node->container_node_member); \
break; \
} \
} else if (cmp == -1) { \
next = btree_left(cur); \
\
if (!next) { \
btree_put_left( \
cur, \
&node->container_node_member); \
break; \
} \
} else { \
return; \
} \
\
cur = next; \
} \
\
btree_insert_fixup(tree, &node->container_node_member); \
}
/* defines a simple tree search function.
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
this function assumes that your nodes have simple integer keys that can be
compared with the usual operators.
EXAMPLE:
if you have a tree node type like this:
struct my_tree_node {
int key;
struct btree_node base;
struct btree_node base;
}
You would use the following call to generate a search function for a tree with this node type:
You would use the following call to generate a search function for a tree
with this node type:
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key, my_tree_node_get);
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key,
my_tree_node_get);
Which would emit a function defined like:
static struct my_tree_node *my_tree_node_get(struct btree *tree, int key);
@param node_type your custom tree node type. usually a structure that contains a struct btree_node member.
@param key_type the type name of the key embedded in your custom tree node type. this type must be
compatible with the builtin comparison operators.
@param container_node_member the name of the struct btree_node member variable within your custom type.
@param container_key_member the name of the key member variable within your custom type.
@param node_type your custom tree node type. usually a structure that
contains a struct btree_node member.
@param key_type the type name of the key embedded in your custom tree node
type. this type must be compatible with the builtin comparison operators.
@param container_node_member the name of the struct btree_node member
variable within your custom type.
@param container_key_member the name of the key member variable within your
custom type.
@param function_name the name of the function to generate.
*/
#define BTREE_DEFINE_SIMPLE_GET(node_type, key_type, container_node_member, container_key_member, function_name) \
node_type *function_name(struct btree *tree, key_type key) \
{ \
struct btree_node *cur = tree->b_root; \
while (cur) { \
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
if (key > cur_node->container_key_member) { \
cur = btree_right(cur); \
} else if (key < cur_node->container_key_member) { \
cur = btree_left(cur); \
} else { \
return cur_node; \
} \
} \
\
return NULL; \
#define BTREE_DEFINE_SIMPLE_GET( \
node_type, \
key_type, \
container_node_member, \
container_key_member, \
function_name) \
node_type *function_name(struct btree *tree, key_type key) \
{ \
struct btree_node *cur = tree->b_root; \
while (cur) { \
node_type *cur_node = BTREE_CONTAINER( \
node_type, \
container_node_member, \
cur); \
if (key > cur_node->container_key_member) { \
cur = btree_right(cur); \
} else if (key < cur_node->container_key_member) { \
cur = btree_left(cur); \
} else { \
return cur_node; \
} \
} \
\
return NULL; \
}
/* perform an in-order traversal of a binary tree
@@ -230,7 +295,7 @@ extern "C" {
struct my_tree_node {
int key;
struct btree_node base;
struct btree_node base;
}
and you want to do something like:
@@ -241,15 +306,23 @@ extern "C" {
btree_foreach (struct my_tree_node, node, &my_tree, base) { ... }
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
@param iter_type the type name of the iterator variable. this should be the
tree's node type, and shouldn't be a pointer.
@param iter_name the name of the iterator variable.
@param tree_name a pointer to the tree to traverse.
@param node_member the name of the struct btree_node member variable within the tree node type.
@param node_member the name of the struct btree_node member variable within
the tree node type.
*/
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_first(tree_name)); \
iter_name; \
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_next(&((iter_name)->node_member))))
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
for (iter_type *iter_name = BTREE_CONTAINER( \
iter_type, \
node_member, \
btree_first(tree_name)); \
iter_name; \
iter_name = BTREE_CONTAINER( \
iter_type, \
node_member, \
btree_next(&((iter_name)->node_member))))
/* perform an reverse in-order traversal of a binary tree
@@ -261,7 +334,7 @@ extern "C" {
struct my_tree_node {
int key;
struct btree_node base;
struct btree_node base;
}
and you want to do something like:
@@ -272,35 +345,43 @@ extern "C" {
btree_foreach_r (struct my_tree_node, node, &my_tree, base) { ... }
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
@param iter_type the type name of the iterator variable. this should be the
tree's node type, and shouldn't be a pointer.
@param iter_name the name of the iterator variable.
@param tree_name a pointer to the tree to traverse.
@param node_member the name of the struct btree_node member variable within the tree node type.
@param node_member the name of the struct btree_node member variable within
the tree node type.
*/
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
iter_name; \
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_prev(&((iter_name)->node_member))))
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
for (iter_type *iter_name \
= BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
iter_name; \
iter_name = BTREE_CONTAINER( \
iter_type, \
node_member, \
btree_prev(&((iter_name)->node_member))))
/* binary tree nodes. this *cannot* be used directly. you need to define a custom node type
that contains a member variable of type struct btree_node.
/* binary tree nodes. this *cannot* be used directly. you need to define a
custom node type that contains a member variable of type struct btree_node.
you would then use the supplied macros to define functions to manipulate your custom binary tree.
you would then use the supplied macros to define functions to manipulate your
custom binary tree.
*/
struct btree_node {
struct btree_node *b_parent, *b_left, *b_right;
unsigned short b_height;
};
/* binary tree. unlike struct btree_node, you can define variables of type struct btree. */
/* binary tree. unlike struct btree_node, you can define variables of type
* struct btree. */
struct btree {
struct btree_node *b_root;
};
/* re-balance a binary tree after an insertion operation.
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or similar,
this function will automatically called for you.
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or
similar, this function will automatically called for you.
@param tree the tree to re-balance.
@param node the node that was just inserted into the tree.
@@ -316,29 +397,42 @@ extern void btree_delete(struct btree *tree, struct btree_node *node);
/* get the first node in a binary tree.
this will be the node with the smallest key (i.e. the node that is furthest-left from the root)
this will be the node with the smallest key (i.e. the node that is
furthest-left from the root)
*/
extern struct btree_node *btree_first(struct btree *tree);
/* get the last node in a binary tree.
this will be the node with the largest key (i.e. the node that is furthest-right from the root)
this will be the node with the largest key (i.e. the node that is
furthest-right from the root)
*/
extern struct btree_node *btree_last(struct btree *tree);
/* for any binary tree node, this function returns the node with the next-largest key value */
/* for any binary tree node, this function returns the node with the
* next-largest key value */
extern struct btree_node *btree_next(struct btree_node *node);
/* for any binary tree node, this function returns the node with the next-smallest key value */
/* for any binary tree node, this function returns the node with the
* next-smallest key value */
extern struct btree_node *btree_prev(struct btree_node *node);
static inline bool btree_empty(const struct btree *tree)
{
return tree->b_root == NULL;
}
/* sets `child` as the immediate left-child of `parent` */
static inline void btree_put_left(struct btree_node *parent, struct btree_node *child)
static inline void btree_put_left(
struct btree_node *parent,
struct btree_node *child)
{
parent->b_left = child;
child->b_parent = parent;
}
/* sets `child` as the immediate right-child of `parent` */
static inline void btree_put_right(struct btree_node *parent, struct btree_node *child)
static inline void btree_put_right(
struct btree_node *parent,
struct btree_node *child)
{
parent->b_right = child;
child->b_parent = parent;

55
include/kernel/channel.h Normal file
View File

@@ -0,0 +1,55 @@
#ifndef KERNEL_CHANNEL_H_
#define KERNEL_CHANNEL_H_
#include <kernel/object.h>
#include <kernel/sched.h>
struct msg;
struct channel {
struct object c_base;
unsigned int c_id;
struct waitqueue c_wq;
struct btree c_msg;
struct btree_node c_node;
};
extern kern_status_t channel_type_init(void);
extern struct channel *channel_cast(struct object *obj);
extern struct channel *channel_create(void);
extern kern_status_t channel_enqueue_msg(
struct channel *channel,
struct msg *msg);
extern kern_status_t channel_recv_msg(
struct channel *channel,
kern_msg_t *out_msg,
unsigned long *irq_flags);
extern kern_status_t channel_reply_msg(
struct channel *channel,
msgid_t id,
const kern_msg_t *reply,
unsigned long *irq_flags);
extern kern_status_t channel_read_msg(
struct channel *channel,
msgid_t msg,
size_t offset,
struct vm_region *dest_region,
const kern_iovec_t *dest_iov,
size_t dest_iov_count,
size_t *nr_read);
extern kern_status_t channel_write_msg(
struct channel *channel,
msgid_t msg,
size_t offset,
struct vm_region *src_region,
const kern_iovec_t *src_iov,
size_t src_iov_count,
size_t *nr_written);
DEFINE_OBJECT_LOCK_FUNCTION(channel, c_base)
#endif

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_CLOCK_H_
#define MANGO_CLOCK_H_
#ifndef KERNEL_CLOCK_H_
#define KERNEL_CLOCK_H_
#include <stdint.h>

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_COMPILER_H_
#define MANGO_COMPILER_H_
#ifndef KERNEL_COMPILER_H_
#define KERNEL_COMPILER_H_
#ifdef __cplusplus
template <typename T>

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_CONSOLE_H_
#define MANGO_CONSOLE_H_
#ifndef KERNEL_CONSOLE_H_
#define KERNEL_CONSOLE_H_
/* The console system
@@ -14,9 +14,10 @@
representing a serial port may allow both sending AND receiving over the
port.
*/
#include <mango/queue.h>
#include <mango/locks.h>
#include <kernel/locks.h>
#include <kernel/queue.h>
#include <mango/status.h>
#include <mango/types.h>
#ifdef __cplusplus
extern "C" {

View File

@@ -1,10 +1,10 @@
#ifndef MANGO_CPU_H_
#define MANGO_CPU_H_
#ifndef KERNEL_CPU_H_
#define KERNEL_CPU_H_
#include <mango/types.h>
#include <mango/machine/cpu.h>
#include <kernel/types.h>
#include <kernel/machine/cpu.h>
#include <stdint.h>
#include <mango/sched.h>
#include <kernel/sched.h>
#ifdef __cplusplus
extern "C" {

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_FB_H_
#define MANGO_FB_H_
#ifndef KERNEL_FB_H_
#define KERNEL_FB_H_
#include <stdint.h>

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_FLAGS_H_
#define MANGO_FLAGS_H_
#ifndef KERNEL_FLAGS_H_
#define KERNEL_FLAGS_H_
#include <stdint.h>

69
include/kernel/handle.h Normal file
View File

@@ -0,0 +1,69 @@
#ifndef KERNEL_HANDLE_H_
#define KERNEL_HANDLE_H_
#include <kernel/bitmap.h>
#include <mango/status.h>
#include <mango/types.h>
#include <stddef.h>
#include <stdint.h>
/* subtract 32 bytes to account for the handle bitmap */
#define HANDLES_PER_TABLE ((4096 - 32) / sizeof(struct handle))
#define REFS_PER_TABLE ((4096 - 64) / sizeof(struct handle_table *))
typedef uint32_t kern_handle_t;
typedef uintptr_t handle_flags_t;
struct task;
struct object;
struct vm_region;
struct handle_list;
struct handle {
struct object *h_object;
handle_flags_t h_flags;
};
struct handle_table {
union {
struct {
/* bitmap tracks which bits in t_handle_list are
* allocated */
DECLARE_BITMAP(t_handle_map, HANDLES_PER_TABLE);
struct handle t_handle_list[HANDLES_PER_TABLE];
} t_handles;
struct {
/* bitmap tracks which sub-tables are fully-allocated */
DECLARE_BITMAP(t_subtable_map, REFS_PER_TABLE);
struct handle_table *t_subtable_list[REFS_PER_TABLE];
} t_subtables;
};
};
extern struct handle_table *handle_table_create(void);
extern void handle_table_destroy(struct handle_table *tab);
extern kern_status_t handle_table_alloc_handle(
struct handle_table *tab,
struct handle **out_slot,
kern_handle_t *out_handle);
extern kern_status_t handle_table_free_handle(
struct handle_table *tab,
kern_handle_t handle);
extern struct handle *handle_table_get_handle(
struct handle_table *tab,
kern_handle_t handle);
extern kern_status_t handle_table_transfer(
struct vm_region *dst_region,
struct handle_table *dst,
kern_msg_handle_t *dst_handles,
size_t dst_handles_max,
struct vm_region *src_region,
struct handle_table *src,
kern_msg_handle_t *src_handles,
size_t src_handles_count);
#endif

View File

@@ -1,8 +1,8 @@
#ifndef MANGO_INIT_H_
#define MANGO_INIT_H_
#ifndef KERNEL_INIT_H_
#define KERNEL_INIT_H_
#include <mango/compiler.h>
#include <mango/machine/init.h>
#include <kernel/compiler.h>
#include <kernel/machine/init.h>
#ifdef __cplusplus
extern "C" {

View File

@@ -1,8 +1,8 @@
#ifndef MANGO_INPUT_H_
#define MANGO_INPUT_H_
#ifndef KERNEL_INPUT_H_
#define KERNEL_INPUT_H_
#include <stdint.h>
#include <mango/queue.h>
#include <kernel/queue.h>
#include <mango/status.h>
enum input_event_hook_flags {

31
include/kernel/iovec.h Normal file
View File

@@ -0,0 +1,31 @@
#ifndef KERNEL_IOVEC_H_
#define KERNEL_IOVEC_H_
#include <mango/types.h>
#include <stddef.h>
struct iovec_iterator {
/* if this is set, we are iterating over a list of iovecs stored in
* userspace, and must go through this region to retrieve the data. */
struct vm_region *it_region;
const kern_iovec_t *it_vecs;
size_t it_nr_vecs;
size_t it_vec_ptr;
virt_addr_t it_base;
size_t it_len;
};
extern void iovec_iterator_begin(
struct iovec_iterator *it,
const kern_iovec_t *vecs,
size_t nr_vecs);
extern void iovec_iterator_begin_user(
struct iovec_iterator *it,
struct vm_region *address_space,
const kern_iovec_t *vecs,
size_t nr_vecs);
extern void iovec_iterator_seek(struct iovec_iterator *it, size_t nr_bytes);
#endif

61
include/kernel/locks.h Normal file
View File

@@ -0,0 +1,61 @@
#ifndef KERNEL_LOCKS_H_
#define KERNEL_LOCKS_H_
#include <kernel/compiler.h>
#include <kernel/machine/hwlock.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef __aligned(8) ml_hwlock_t spin_lock_t;
#define SPIN_LOCK_INIT ML_HWLOCK_INIT
#define spin_lock(lck) ml_hwlock_lock(lck);
#define spin_unlock(lck) ml_hwlock_unlock(lck);
#define spin_lock_irq(lck) ml_hwlock_lock_irq(lck);
#define spin_unlock_irq(lck) ml_hwlock_unlock_irq(lck);
#define spin_lock_irqsave(lck, flags) ml_hwlock_lock_irqsave(lck, flags);
#define spin_unlock_irqrestore(lck, flags) \
ml_hwlock_unlock_irqrestore(lck, flags);
static inline void spin_lock_pair_irqsave(
spin_lock_t *a,
spin_lock_t *b,
unsigned long *flags)
{
if (a == b) {
spin_lock_irqsave(a, flags);
} else if (a < b) {
spin_lock_irqsave(a, flags);
spin_lock(b);
} else {
spin_lock_irqsave(b, flags);
spin_lock(a);
}
}
static inline void spin_unlock_pair_irqrestore(
spin_lock_t *a,
spin_lock_t *b,
unsigned long flags)
{
if (a == b) {
spin_unlock_irqrestore(a, flags);
} else if (a < b) {
spin_unlock(b);
spin_unlock_irqrestore(a, flags);
} else {
spin_unlock(a);
spin_unlock_irqrestore(b, flags);
}
}
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -19,24 +19,25 @@
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
*/
#ifndef MANGO_MEMBLOCK_H_
#define MANGO_MEMBLOCK_H_
#ifndef KERNEL_MEMBLOCK_H_
#define KERNEL_MEMBLOCK_H_
#include <stddef.h>
#include <kernel/types.h>
#include <limits.h>
#include <mango/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
#define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \
(i)->__idx != ULLONG_MAX; \
__next_memory_region(i, type_a, type_b, p_start, p_end))
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
for ((i)->__idx = 0, \
__next_memory_region(i, type_a, type_b, p_start, p_end); \
(i)->__idx != ULLONG_MAX; \
__next_memory_region(i, type_a, type_b, p_start, p_end))
/* iterate through all memory regions known to memblock.
@@ -47,7 +48,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -65,7 +66,7 @@ extern "C" {
struct memblock_iter it;
for_each_mem_region (&it, 0x40000, 0x80000) { ... }
*/
#define for_each_mem_range(i, p_start, p_end) \
#define for_each_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, NULL, p_start, p_end)
/* iterate through all memory regions reserved using memblock.
@@ -77,7 +78,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -95,7 +96,7 @@ extern "C" {
struct memblock_iter it;
for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... }
*/
#define for_each_reserved_mem_range(i, p_start, p_end) \
#define for_each_reserved_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.reserved, NULL, p_start, p_end)
/* iterate through all memory regions known by memblock to be free.
@@ -108,7 +109,7 @@ extern "C" {
@param i the iterator. this should be a pointer of type struct memblock_iter.
for each iteration, this structure will be filled with details about
the current memory region.
the current memory region.
@param p_start the lower bound of the memory region to iterate through.
if you don't want to use a lower bound, pass 0.
@param p_end the upper bound of the memory region to iterate through.
@@ -138,19 +139,25 @@ extern "C" {
- 0x08000 -> 0x08fff
- 0x10000 -> 0x1ffff
*/
#define for_each_free_mem_range(i, p_start, p_end) \
__for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end)
#define for_each_free_mem_range(i, p_start, p_end) \
__for_each_mem_range( \
i, \
&memblock.memory, \
&memblock.reserved, \
p_start, \
p_end)
typedef uint64_t memblock_index_t;
enum memblock_region_status {
/* Used in memblock.memory regions, indicates that the memory region exists */
/* Used in memblock.memory regions, indicates that the memory region
* exists */
MEMBLOCK_MEMORY = 0,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_alloc() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_alloc() */
MEMBLOCK_ALLOC,
/* Used in memblock.reserved regions, indicates that the memory region was reserved
* by a call to memblock_reserve() */
/* Used in memblock.reserved regions, indicates that the memory region
* was reserved by a call to memblock_reserve() */
MEMBLOCK_RESERVED,
};
@@ -176,9 +183,10 @@ struct memblock {
/* bounds of the memory region that can be used by memblock_alloc()
both of these are virtual addresses */
uintptr_t m_alloc_start, m_alloc_end;
/* memblock assumes that all memory in the alloc zone is contiguously mapped
(if paging is enabled). m_voffset is the offset that needs to be added to
a given physical address to get the corresponding virtual address */
/* memblock assumes that all memory in the alloc zone is contiguously
mapped (if paging is enabled). m_voffset is the offset that needs to
be added to a given physical address to get the corresponding virtual
address */
uintptr_t m_voffset;
struct memblock_type memory;
@@ -212,7 +220,10 @@ extern int __next_mem_range(struct memblock_iter *it);
@param voffset the offset between the physical address of a given page and
its corresponding virtual address.
*/
extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset);
extern int memblock_init(
uintptr_t alloc_start,
uintptr_t alloc_end,
uintptr_t voffset);
/* add a region of memory to memblock.
@@ -234,7 +245,8 @@ extern int memblock_add(phys_addr_t base, size_t size);
reserved memory will not be used by memblock_alloc(), and will remain
reserved when the vm_page memory map is initialised.
@param base the physical address of the start of the memory region to reserve.
@param base the physical address of the start of the memory region to
reserve.
@oaram size the size of the memory region to reserve in bytes.
*/
extern int memblock_reserve(phys_addr_t base, size_t size);
@@ -257,7 +269,7 @@ extern int memblock_reserve(phys_addr_t base, size_t size);
@param size the size of the buffer to allocate in bytes.
@param align the alignment to use. for example, an alignment of 4096
will result in the returned pointer being a multiple
of 4096. this must be a power of 2.
of 4096. this must be a power of 2.
*/
extern void *memblock_alloc(size_t size, phys_addr_t align);
@@ -279,7 +291,7 @@ extern void *memblock_alloc(size_t size, phys_addr_t align);
@param size the size of the buffer to allocate in bytes.
@param align the alignment to use. for example, an alignment of 4096
will result in the returned pointer being a multiple
of 4096. this must be a power of 2.
of 4096. this must be a power of 2.
*/
extern phys_addr_t memblock_alloc_phys(size_t size, phys_addr_t align);
@@ -310,7 +322,7 @@ extern int memblock_free_phys(phys_addr_t addr, size_t size);
@param p the pointer to convert.
*/
extern phys_addr_t memblock_virt_to_phys(void *p);
extern phys_addr_t memblock_virt_to_phys(const void *p);
/* convert a physical address returned by memblock
to a virtual pointer.
@@ -319,9 +331,14 @@ extern phys_addr_t memblock_virt_to_phys(void *p);
*/
extern void *memblock_phys_to_virt(phys_addr_t p);
extern void __next_memory_region(struct memblock_iter *it, \
struct memblock_type *type_a, struct memblock_type *type_b,
phys_addr_t start, phys_addr_t end);
extern void __next_memory_region(
struct memblock_iter *it,
struct memblock_type *type_a,
struct memblock_type *type_b,
phys_addr_t start,
phys_addr_t end);
extern void memblock_dump(void);
#ifdef __cplusplus
}

29
include/kernel/msg.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef KERNEL_MSG_H_
#define KERNEL_MSG_H_
#include <kernel/btree.h>
#include <kernel/locks.h>
#include <mango/status.h>
#include <mango/types.h>
struct port;
struct thread;
enum kmsg_status {
KMSG_WAIT_RECEIVE,
KMSG_WAIT_REPLY,
KMSG_REPLY_SENT,
};
struct msg {
spin_lock_t msg_lock;
enum kmsg_status msg_status;
struct btree_node msg_node;
msgid_t msg_id;
kern_status_t msg_result;
struct port *msg_sender_port;
struct thread *msg_sender_thread;
kern_msg_t msg_req, msg_resp;
};
#endif

122
include/kernel/object.h Normal file
View File

@@ -0,0 +1,122 @@
#ifndef KERNEL_OBJECT_H_
#define KERNEL_OBJECT_H_
#include <kernel/flags.h>
#include <kernel/locks.h>
#include <kernel/vm.h>
#include <mango/status.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define DEFINE_OBJECT_LOCK_FUNCTION(object_name, base) \
static inline void object_name##_lock(struct object_name *p) \
{ \
object_lock(&p->base); \
} \
static inline void object_name##_unlock(struct object_name *p) \
{ \
object_unlock(&p->base); \
} \
static inline void object_name##_lock_irqsave( \
struct object_name *p, \
unsigned long *flags) \
{ \
object_lock_irqsave(&p->base, flags); \
} \
static inline void object_name##_unlock_irqrestore( \
struct object_name *p, \
unsigned long flags) \
{ \
object_unlock_irqrestore(&p->base, flags); \
} \
static inline void object_name##_lock_pair_irqsave( \
struct object_name *a, \
struct object_name *b, \
unsigned long *flags) \
{ \
object_lock_pair_irqsave(&a->base, &b->base, flags); \
} \
static inline void object_name##_unlock_pair_irqrestore( \
struct object_name *a, \
struct object_name *b, \
unsigned long flags) \
{ \
object_unlock_pair_irqrestore(&a->base, &b->base, flags); \
}
#define OBJECT_MAGIC 0xBADDCAFE
#define OBJECT_NAME_MAX 64
#define OBJECT_PATH_MAX 256
#define OBJECT_CAST(to_type, to_type_member, p) \
((to_type *)((uintptr_t)p) - offsetof(to_type, to_type_member))
#define OBJECT_C_CAST(c_type, c_type_member, obj_type, objp) \
OBJECT_IS_TYPE(objp, obj_type) \
? OBJECT_CAST(c_type, c_type_member, (objp)) : NULL
#define OBJECT_IS_TYPE(obj, type_ptr) ((obj)->ob_type == (type_ptr))
struct object;
struct object_attrib;
enum object_type_flags {
OBJTYPE_INIT = 0x01u,
};
struct object_ops {
kern_status_t (*destroy)(struct object *obj, struct queue *q);
kern_status_t (*destroy_recurse)(
struct queue_entry *entry,
struct object **out);
};
struct object_type {
enum object_type_flags ob_flags;
char ob_name[32];
unsigned int ob_size;
unsigned int ob_header_offset;
struct vm_cache ob_cache;
struct queue_entry ob_list;
struct object_ops ob_ops;
};
struct object {
uint32_t ob_magic;
koid_t ob_id;
struct object_type *ob_type;
spin_lock_t ob_lock;
unsigned int ob_refcount;
unsigned int ob_handles;
struct queue_entry ob_list;
} __aligned(sizeof(long));
extern kern_status_t object_bootstrap(void);
extern kern_status_t object_type_register(struct object_type *p);
extern kern_status_t object_type_unregister(struct object_type *p);
extern struct object *object_create(struct object_type *type);
extern struct object *object_ref(struct object *obj);
extern void object_unref(struct object *obj);
extern void object_add_handle(struct object *obj);
extern void object_remove_handle(struct object *obj);
extern void object_lock(struct object *obj);
extern void object_unlock(struct object *obj);
extern void object_lock_irqsave(struct object *obj, unsigned long *flags);
extern void object_unlock_irqrestore(struct object *obj, unsigned long flags);
extern void object_lock_pair_irqsave(
struct object *a,
struct object *b,
unsigned long *flags);
extern void object_unlock_pair_irqrestore(
struct object *a,
struct object *b,
unsigned long flags);
#ifdef __cplusplus
}
#endif
#endif

13
include/kernel/panic.h Normal file
View File

@@ -0,0 +1,13 @@
#ifndef KERNEL_PANIC_H_
#define KERNEL_PANIC_H_
#include <kernel/compiler.h>
struct ml_cpu_context;
#define panic(...) panic_irq(NULL, __VA_ARGS__)
extern void __noreturn
panic_irq(struct ml_cpu_context *ctx, const char *fmt, ...);
#endif

View File

@@ -1,9 +1,9 @@
#ifndef MANGO_PERCPU_H_
#define MANGO_PERCPU_H_
#ifndef KERNEL_PERCPU_H_
#define KERNEL_PERCPU_H_
#include <mango/status.h>
#include <mango/compiler.h>
#include <mango/sched.h>
#include <kernel/compiler.h>
#include <kernel/sched.h>
#ifdef __cplusplus
extern "C" {

81
include/kernel/pmap.h Normal file
View File

@@ -0,0 +1,81 @@
#ifndef KERNEL_PMAP_H_
#define KERNEL_PMAP_H_
/* all the functions declared in this file are defined in arch/xyz/pmap.c */
#include <kernel/machine/pmap.h>
#include <kernel/vm.h>
#include <mango/status.h>
#include <stddef.h>
#define PMAP_INVALID ML_PMAP_INVALID
#define PFN(x) ((x) >> VM_PAGE_SHIFT)
#ifdef __cplusplus
extern "C" {
#endif
typedef ml_pmap_t pmap_t;
typedef ml_pfn_t pfn_t;
enum pmap_fault_flags {
/* if set, the faulting page is present, and the fault is
* protection-related.
* if clear, the faulting page is missing, and the
* fault is due to the missing page.
*/
PMAP_FAULT_PRESENT = 0x01u,
/* if set, the faulting page was accessed from user mode.
* if clear, the faulting page was accessed from kernel mode.
*/
PMAP_FAULT_USER = 0x02u,
/* if set, the fault was caused by a write operation.
* if clear, the faulting page was caused by a read operation.
*/
PMAP_FAULT_WRITE = 0x04u,
/* if set, the fault was caused while fetching an instruction from the
* faulting page.
*/
PMAP_FAULT_IFETCH = 0x08u,
/* if set, the fault was caused by misconfigured page tables */
PMAP_FAULT_BADCFG = 0x10u,
};
enum pmap_flags {
PMAP_NORMAL = 0x00u,
PMAP_HUGEPAGE = 0x01u,
};
extern void pmap_bootstrap(void);
extern pmap_t get_kernel_pmap(void);
extern pmap_t pmap_create(void);
extern void pmap_destroy(pmap_t pmap);
extern void pmap_switch(pmap_t pmap);
extern kern_status_t pmap_handle_fault(
virt_addr_t fault_addr,
enum pmap_fault_flags flags);
extern kern_status_t pmap_add(
pmap_t pmap,
virt_addr_t p,
pfn_t pfn,
vm_prot_t prot,
enum pmap_flags flags);
extern kern_status_t pmap_add_block(
pmap_t pmap,
virt_addr_t p,
pfn_t pfn,
size_t len,
vm_prot_t prot,
enum pmap_flags flags);
extern kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p);
extern kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len);
#ifdef __cplusplus
}
#endif
#endif

41
include/kernel/port.h Normal file
View File

@@ -0,0 +1,41 @@
#ifndef KERNEL_PORT_H_
#define KERNEL_PORT_H_
#include <kernel/object.h>
#include <kernel/sched.h>
enum port_status {
/* port is not connected */
PORT_OFFLINE = 0,
/* port is connected and ready to send messages */
PORT_READY,
/* port has sent a message, and is waiting for the remote to receive it
*/
PORT_SEND_BLOCKED,
/* port has sent a message, and the remote has received it. waiting for
* the remote to reply. */
PORT_REPLY_BLOCKED,
};
struct port {
struct object p_base;
enum port_status p_status;
struct channel *p_remote;
};
extern kern_status_t port_type_init(void);
extern struct port *port_cast(struct object *obj);
extern struct port *port_create(void);
extern kern_status_t port_connect(struct port *port, struct channel *remote);
extern kern_status_t port_disconnect(struct port *port);
extern kern_status_t port_send_msg(
struct port *port,
const kern_msg_t *msg,
kern_msg_t *out_response,
unsigned long *lock_flags);
DEFINE_OBJECT_LOCK_FUNCTION(port, p_base)
#endif

25
include/kernel/printk.h Normal file
View File

@@ -0,0 +1,25 @@
#ifndef KERNEL_PRINTK_H_
#define KERNEL_PRINTK_H_
#include <kernel/console.h>
#undef TRACE
#ifdef __cplusplus
extern "C" {
#endif
#ifdef TRACE
#define tracek(...) printk(__VA_ARGS__)
#else
#define tracek(...)
#endif
extern void early_printk_init(struct console *con);
extern int printk(const char *format, ...);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,7 +1,7 @@
#ifndef MANGO_QUEUE_H_
#define MANGO_QUEUE_H_
#ifndef KERNEL_QUEUE_H_
#define KERNEL_QUEUE_H_
#include <mango/libc/string.h>
#include <kernel/libc/string.h>
#include <stdbool.h>
#ifdef __cplusplus

View File

@@ -1,8 +1,8 @@
#ifndef MANGO_RINGBUFFER_H_
#define MANGO_RINGBUFFER_H_
#ifndef KERNEL_RINGBUFFER_H_
#define KERNEL_RINGBUFFER_H_
#include <mango/locks.h>
#include <mango/sched.h>
#include <kernel/locks.h>
#include <kernel/sched.h>
struct ringbuffer {
unsigned char *r_buffer;

View File

@@ -1,15 +1,18 @@
#ifndef MANGO_SCHED_H_
#define MANGO_SCHED_H_
#ifndef KERNEL_SCHED_H_
#define KERNEL_SCHED_H_
#include <mango/btree.h>
#include <mango/locks.h>
#include <mango/object.h>
#include <mango/pmap.h>
#include <mango/queue.h>
#include <kernel/btree.h>
#include <kernel/handle.h>
#include <kernel/locks.h>
#include <kernel/msg.h>
#include <kernel/object.h>
#include <kernel/pmap.h>
#include <kernel/queue.h>
#include <mango/status.h>
#define TASK_NAME_MAX 64
#define PRIO_MAX 32
#define PID_MAX 99999
#define THREAD_KSTACK_ORDER VM_PAGE_4K
#define THREAD_MAX 65536
@@ -32,6 +35,7 @@
extern "C" {
#endif
struct channel;
struct runqueue;
struct work_item;
@@ -75,13 +79,20 @@ struct task {
struct object t_base;
struct task *t_parent;
unsigned int t_id;
long t_id;
enum task_state t_state;
char t_name[TASK_NAME_MAX];
pmap_t t_pmap;
struct vm_region *t_address_space;
spin_lock_t t_handles_lock;
struct handle_table *t_handles;
struct btree b_channels;
struct btree_node t_tasklist;
struct queue_entry t_child_entry;
size_t t_next_thread_id;
struct queue t_threads;
struct queue t_children;
};
@@ -100,14 +111,17 @@ struct thread {
cycles_t tr_quantum_cycles, tr_quantum_target;
cycles_t tr_total_cycles;
uintptr_t tr_sp, tr_bp;
virt_addr_t tr_ip, tr_sp, tr_bp;
virt_addr_t tr_cpu_user_sp, tr_cpu_kernel_sp;
struct runqueue *tr_rq;
struct msg tr_msg;
struct queue_entry tr_threads;
struct queue_entry tr_parent_entry;
struct queue_entry tr_rqentry;
struct vm_page *tr_kstack;
struct vm_object *tr_ustack;
};
struct runqueue {
@@ -175,15 +189,35 @@ extern void rq_remove_thread(struct runqueue *rq, struct thread *thr);
extern struct runqueue *cpu_rq(unsigned int cpu);
extern struct task *task_alloc(void);
extern struct task *task_cast(struct object *obj);
extern struct task *task_create(const char *name, size_t name_len);
static inline struct task *task_ref(struct task *task)
{
return OBJECT_CAST(struct task, t_base, object_ref(&task->t_base));
}
static inline void task_deref(struct task *task)
static inline void task_unref(struct task *task)
{
object_deref(&task->t_base);
object_unref(&task->t_base);
}
extern struct task *task_from_pid(unsigned int pid);
extern kern_status_t task_add_child(struct task *parent, struct task *child);
extern kern_status_t task_add_channel(
struct task *task,
struct channel *channel,
unsigned int id);
extern struct channel *task_get_channel(struct task *task, unsigned int id);
extern struct task *task_from_tid(tid_t id);
extern kern_status_t task_open_handle(
struct task *task,
struct object *obj,
handle_flags_t flags,
kern_handle_t *out);
extern kern_status_t task_resolve_handle(
struct task *task,
kern_handle_t handle,
struct object **out_obj,
handle_flags_t *out_flags);
extern kern_status_t task_close_handle(struct task *task, kern_handle_t handle);
extern struct thread *task_create_thread(struct task *parent);
extern struct task *kernel_task(void);
extern struct task *idle_task(void);
extern cycles_t default_quantum(void);
@@ -198,21 +232,19 @@ extern void schedule_thread_on_cpu(struct thread *thr);
extern void start_charge_period(void);
extern void end_charge_period(void);
static inline void task_lock_irqsave(struct task *task, unsigned long *flags)
{
object_lock_irqsave(&task->t_base, flags);
}
static inline void task_unlock_irqrestore(
struct task *task,
unsigned long flags)
{
object_unlock_irqrestore(&task->t_base, flags);
}
DEFINE_OBJECT_LOCK_FUNCTION(task, t_base)
extern struct thread *thread_alloc(void);
extern kern_status_t thread_init(struct thread *thr, uintptr_t ip);
extern struct thread *thread_cast(struct object *obj);
extern kern_status_t thread_init_kernel(struct thread *thr, virt_addr_t ip);
extern kern_status_t thread_init_user(
struct thread *thr,
virt_addr_t ip,
virt_addr_t sp,
const uintptr_t *args,
size_t nr_args);
extern int thread_priority(struct thread *thr);
extern void thread_awaken(struct thread *thr);
extern void idle(void);
extern struct thread *create_kernel_thread(void (*fn)(void));
extern struct thread *create_idle_thread(void);

181
include/kernel/syscall.h Normal file
View File

@@ -0,0 +1,181 @@
#ifndef KERNEL_SYSCALL_H_
#define KERNEL_SYSCALL_H_
#include <kernel/handle.h>
#include <kernel/sched.h>
#include <kernel/vm-region.h>
#include <kernel/vm.h>
#include <mango/status.h>
#include <mango/syscall.h>
#define validate_access(task, ptr, len, flags) \
__validate_access(task, (const void *)ptr, len, flags)
#define validate_access_r(task, ptr, len) \
validate_access(task, ptr, len, VM_PROT_READ | VM_PROT_USER)
#define validate_access_w(task, ptr, len) \
validate_access(task, ptr, len, VM_PROT_WRITE | VM_PROT_USER)
#define validate_access_rw(task, ptr, len) \
validate_access( \
task, \
ptr, \
len, \
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER)
static inline bool __validate_access(
struct task *task,
const void *ptr,
size_t len,
vm_prot_t flags)
{
unsigned long irq_flags;
vm_region_lock_irqsave(task->t_address_space, &irq_flags);
bool result = vm_region_validate_access(
task->t_address_space,
(virt_addr_t)ptr,
len,
flags | VM_PROT_USER);
vm_region_unlock_irqrestore(task->t_address_space, irq_flags);
return result;
}
extern kern_status_t sys_task_exit(int status);
extern kern_status_t sys_task_self(kern_handle_t *out);
extern kern_status_t sys_task_create(
kern_handle_t parent_handle,
const char *name,
size_t name_len,
kern_handle_t *out_task,
kern_handle_t *out_address_space);
extern kern_status_t sys_task_create_thread(
kern_handle_t task,
virt_addr_t ip,
virt_addr_t sp,
uintptr_t *args,
size_t nr_args,
kern_handle_t *out_thread);
extern kern_status_t sys_task_get_address_space(
kern_handle_t task,
kern_handle_t *out);
extern kern_status_t sys_thread_start(kern_handle_t thread);
extern kern_status_t sys_vm_object_create(
const char *name,
size_t name_len,
size_t data_len,
vm_prot_t prot,
kern_handle_t *out);
extern kern_status_t sys_vm_object_read(
kern_handle_t object,
void *dst,
off_t offset,
size_t count,
size_t *nr_read);
extern kern_status_t sys_vm_object_write(
kern_handle_t object,
const void *src,
off_t offset,
size_t count,
size_t *nr_written);
extern kern_status_t sys_vm_object_copy(
kern_handle_t dst,
off_t dst_offset,
kern_handle_t src,
off_t src_offset,
size_t count,
size_t *nr_copied);
extern kern_status_t sys_vm_region_create(
kern_handle_t parent,
const char *name,
size_t name_len,
off_t offset,
size_t region_len,
vm_prot_t prot,
kern_handle_t *out,
virt_addr_t *out_base_address);
extern kern_status_t sys_vm_region_kill(kern_handle_t region);
extern kern_status_t sys_vm_region_read(
kern_handle_t region,
void *dst,
off_t offset,
size_t count,
size_t *nr_read);
extern kern_status_t sys_vm_region_write(
kern_handle_t region,
const void *src,
off_t offset,
size_t count,
size_t *nr_read);
extern kern_status_t sys_vm_region_map_absolute(
kern_handle_t region,
virt_addr_t map_address,
kern_handle_t object,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address);
extern kern_status_t sys_vm_region_map_relative(
kern_handle_t region,
off_t region_offset,
kern_handle_t object,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out_base_address);
extern kern_status_t sys_vm_region_unmap_absolute(
kern_handle_t region,
virt_addr_t address,
size_t length);
extern kern_status_t sys_vm_region_unmap_relative(
kern_handle_t region,
off_t offset,
size_t length);
extern kern_status_t sys_kern_log(const char *s);
extern kern_status_t sys_kern_handle_close(kern_handle_t handle);
extern kern_status_t sys_kern_config_get(
kern_config_key_t key,
void *ptr,
size_t len);
extern kern_status_t sys_kern_config_set(
kern_config_key_t key,
const void *ptr,
size_t len);
extern kern_status_t sys_channel_create(unsigned int id, kern_handle_t *out);
extern kern_status_t sys_port_create(kern_handle_t *out);
extern kern_status_t sys_port_connect(
kern_handle_t port,
tid_t task_id,
unsigned int channel_id);
extern kern_status_t sys_port_disconnect(kern_handle_t port);
extern kern_status_t sys_msg_send(
kern_handle_t port,
const kern_msg_t *msg,
kern_msg_t *out_reply);
extern kern_status_t sys_msg_recv(kern_handle_t channel, kern_msg_t *out_msg);
extern kern_status_t sys_msg_reply(
kern_handle_t channel,
msgid_t id,
const kern_msg_t *msg);
extern kern_status_t sys_msg_read(
kern_handle_t channel_handle,
msgid_t id,
size_t offset,
const kern_iovec_t *iov,
size_t iov_count,
size_t *nr_read);
extern kern_status_t sys_msg_write(
kern_handle_t channel,
msgid_t id,
size_t offset,
const kern_iovec_t *in,
size_t nr_in,
size_t *nr_written);
extern virt_addr_t syscall_get_function(unsigned int sysid);
#endif

View File

@@ -1,5 +1,5 @@
#ifndef MANGO_TEST_H_
#define MANGO_TEST_H_
#ifndef KERNEL_TEST_H_
#define KERNEL_TEST_H_
#ifdef __cplusplus
extern "C" {

18
include/kernel/types.h Normal file
View File

@@ -0,0 +1,18 @@
#ifndef KERNEL_TYPES_H_
#define KERNEL_TYPES_H_
#include <mango/types.h>
#include <stddef.h>
#include <stdint.h>
#define CYCLES_MAX UINT64_MAX
typedef uint64_t cycles_t;
typedef uint64_t sectors_t;
struct boot_module {
phys_addr_t mod_base;
size_t mod_size;
};
#endif

View File

@@ -1,27 +1,35 @@
#ifndef MANGO_UTIL_H_
#define MANGO_UTIL_H_
#ifndef KERNEL_UTIL_H_
#define KERNEL_UTIL_H_
#include <mango/types.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define CLAMP(x, lo, hi) (MIN(MAX(x, lo), hi))
extern uint64_t hash_string(const char *s);
extern void data_size_to_string(size_t value, char *out, size_t outsz);
static inline bool power_of_2(size_t x) { return (x > 0 && (x & (x - 1)) == 0); }
static inline unsigned long long div64_pow2(unsigned long long x, unsigned long long y)
static inline bool power_of_2(size_t x)
{
return (x > 0 && (x & (x - 1)) == 0);
}
static inline unsigned long long div64_pow2(
unsigned long long x,
unsigned long long y)
{
return x >> (__builtin_ctz(y));
}
static inline unsigned long long absdiff64(unsigned long long x, unsigned long long y)
static inline unsigned long long absdiff64(
unsigned long long x,
unsigned long long y)
{
return x < y ? y - x : x - y;
}
@@ -53,6 +61,8 @@ extern uint64_t host_to_little_u64(uint64_t v);
extern uint64_t big_to_host_u64(uint64_t v);
extern uint64_t little_to_host_u64(uint64_t v);
extern bool fill_random(void *buffer, unsigned int size);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,91 @@
#ifndef KERNEL_VM_OBJECT_H_
#define KERNEL_VM_OBJECT_H_
#include <kernel/locks.h>
#include <kernel/object.h>
#define VM_OBJECT_NAME_MAX 64
enum vm_object_flags {
/* the memory behind this vm-object wasn't allocated by us, and
* therefore shouldn't be freed by us */
VMO_IN_PLACE = 0x01u,
};
struct vm_object {
struct object vo_base;
char vo_name[VM_OBJECT_NAME_MAX];
enum vm_object_flags vo_flags;
/* queue of struct vm_region_mapping */
struct queue vo_mappings;
/* memory protection flags. mappings of this vm_object can only use
* a subset of the flags set in this mask. */
vm_prot_t vo_prot;
/* btree of vm_pages that have been allocated to this vm_object.
* vm_page->p_vmo_offset and the size of each page is the bst key. */
struct btree vo_pages;
/* total length of the vm_object in bytes. */
size_t vo_size;
};
extern kern_status_t vm_object_type_init(void);
extern struct vm_object *vm_object_cast(struct object *obj);
/* create a vm_object with the specified length in bytes and protection flags.
* the length will be automatically rounded up to the nearest vm_object page
* order size. the actual page frames themselves won't be allocated until
* they are mapped and accessed. */
extern struct vm_object *vm_object_create(
const char *name,
size_t name_len,
size_t data_len,
vm_prot_t prot);
/* create a vm_object that represents the specified range of physical memory.
* the length will be automatically rounded up to the nearest vm_object page
* order size.
* NOTE this function assumes that the physical memory has already been
* reserved, and is not in use by any other kernel component. */
extern struct vm_object *vm_object_create_in_place(
const char *name,
size_t name_len,
phys_addr_t base,
size_t data_len,
vm_prot_t prot);
extern struct vm_page *vm_object_get_page(
const struct vm_object *vo,
off_t offset);
extern struct vm_page *vm_object_alloc_page(
struct vm_object *vo,
off_t offset,
enum vm_page_order size);
extern kern_status_t vm_object_read(
struct vm_object *vo,
void *dst,
off_t offset,
size_t count,
size_t *nr_read);
extern kern_status_t vm_object_write(
struct vm_object *vo,
const void *dst,
off_t offset,
size_t count,
size_t *nr_written);
extern kern_status_t vm_object_copy(
struct vm_object *dst,
off_t dst_offset,
struct vm_object *src,
off_t src_offset,
size_t count,
size_t *nr_copied);
DEFINE_OBJECT_LOCK_FUNCTION(vm_object, vo_base)
#endif

191
include/kernel/vm-region.h Normal file
View File

@@ -0,0 +1,191 @@
#ifndef KERNEL_VM_REGION_H_
#define KERNEL_VM_REGION_H_
#include <kernel/object.h>
#include <kernel/pmap.h>
#include <kernel/vm.h>
#define VM_REGION_NAME_MAX 64
#define VM_REGION_COPY_ALL ((size_t)-1)
struct vm_region;
struct vm_object;
enum vm_region_status {
VM_REGION_DEAD = 0,
VM_REGION_ONLINE,
};
enum vm_region_entry_type {
VM_REGION_ENTRY_NONE = 0,
VM_REGION_ENTRY_REGION,
VM_REGION_ENTRY_MAPPING,
};
struct vm_region_entry {
union {
struct btree_node e_node;
/* this entry is only used to queue vm-region objects for
* recursive cleanup */
struct queue_entry e_entry;
};
struct vm_region_entry *e_parent;
enum vm_region_entry_type e_type;
/* absolute address of this entry */
virt_addr_t e_address;
/* offset in bytes of this entry within its immediate parent. */
off_t e_offset;
/* size of the entry in bytes */
size_t e_size;
};
struct vm_region_mapping {
struct vm_region_entry m_entry;
struct vm_object *m_object;
/* used to link to vm_object->vo_mappings */
struct queue_entry m_object_entry;
vm_prot_t m_prot;
/* offset in bytes to the start of the object data that was mapped */
off_t m_object_offset;
};
struct vm_region {
struct object vr_base;
enum vm_region_status vr_status;
struct vm_region_entry vr_entry;
char vr_name[VM_REGION_NAME_MAX];
/* btree of struct vm_region_entry.
* sibling entries cannot overlap each other, and child entries must
* be entirely contained within their immediate parent entry. */
struct btree vr_entries;
/* memory protection restriction mask.
* any mapping in this region, or any of its children, cannot use
* protection flags that are not set in this mask.
* for example, if VM_PROT_EXEC is /not/ set here, no mapping
* can be created in this region or any child region with VM_PROT_EXEC
* set. */
vm_prot_t vr_prot;
/* the physical address space in which mappings in this region (and
* its children) are created */
pmap_t vr_pmap;
};
extern kern_status_t vm_region_type_init(void);
extern struct vm_region *vm_region_cast(struct object *obj);
/* create a new vm-region, optionally within a parent region.
* `offset` is the byte offset within the parent region where the new region
* should start.
* if no parent is specified, `offset` is the absolute virtual address of the
* start of the region.
* in both cases, `len` is the length of the new region in bytes. */
extern kern_status_t vm_region_create(
struct vm_region *parent,
const char *name,
size_t name_len,
off_t offset,
size_t region_len,
vm_prot_t prot,
struct vm_region **out);
/* recursively kills a given region and all of its sub-regions.
* when a region is killed, all of its mappings are unmapped, and any further
* operations on the region are denied. however, all handles and references to
* the region (any any sub-region) remain valid, and no kernel memory is
* de-allocated.
* the memory used by the vm-region object itself is de-allocated when the last
* handle/reference to the object is released.
* this function should be called with `region` locked.
*/
extern kern_status_t vm_region_kill(
struct vm_region *region,
unsigned long *lock_flags);
/* map a vm-object into a vm-region.
* [region_offset,length] must fall within exactly one region, and cannot span
* multiple sibling regions.
* if [region_offset,length] falls within a child region, the map operation
* will be transparently redirected to the relevant region.
* `prot` must be allowed both by the region into which the mapping is being
* created AND the vm-object being mapped. */
extern kern_status_t vm_region_map_object(
struct vm_region *region,
off_t region_offset,
struct vm_object *object,
off_t object_offset,
size_t length,
vm_prot_t prot,
virt_addr_t *out);
extern kern_status_t vm_region_unmap(
struct vm_region *region,
off_t region_offset,
size_t length);
extern bool vm_region_validate_access(
struct vm_region *region,
off_t offset,
size_t len,
vm_prot_t prot);
/* find the mapping corresponding to the given virtual address, and page-in the
* necessary vm_page to allow the memory access to succeed. if the relevant
* vm-object page hasn't been allocated yet, it will be allocated here. */
extern kern_status_t vm_region_demand_map(
struct vm_region *region,
virt_addr_t addr,
enum pmap_fault_flags flags);
/* get the absolute base virtual address of a region within its
* parent/ancestors. */
extern virt_addr_t vm_region_get_base_address(const struct vm_region *region);
extern void vm_region_dump(struct vm_region *region);
/* read data from the user-space area of a vm-region into a kernel-mode buffer
*/
extern kern_status_t vm_region_read_kernel(
struct vm_region *src_region,
virt_addr_t src_ptr,
size_t count,
void *dest,
size_t *nr_read);
/* write data to the user-space area of a vm-region from a kernel-mode buffer
*/
extern kern_status_t vm_region_write_kernel(
struct vm_region *dst_region,
virt_addr_t dst_ptr,
size_t count,
const void *src,
size_t *nr_written);
extern kern_status_t vm_region_memmove(
struct vm_region *dest_region,
virt_addr_t dest_ptr,
struct vm_region *src_region,
virt_addr_t src_ptr,
size_t count,
size_t *nr_moved);
extern kern_status_t vm_region_memmove_v(
struct vm_region *dest_region,
size_t dest_offset,
const kern_iovec_t *dest,
size_t nr_dest,
struct vm_region *src_region,
size_t src_offset,
const kern_iovec_t *src,
size_t nr_src,
size_t bytes_to_move,
size_t *nr_bytes_moved);
DEFINE_OBJECT_LOCK_FUNCTION(vm_region, vr_base)
#endif

View File

@@ -1,14 +1,14 @@
#ifndef MANGO_VM_H_
#define MANGO_VM_H_
#ifndef KERNEL_VM_H_
#define KERNEL_VM_H_
#include <stddef.h>
#include <mango/types.h>
#include <kernel/bitmap.h>
#include <kernel/btree.h>
#include <kernel/locks.h>
#include <kernel/machine/vm.h>
#include <kernel/queue.h>
#include <kernel/types.h>
#include <mango/status.h>
#include <mango/queue.h>
#include <mango/btree.h>
#include <mango/bitmap.h>
#include <mango/locks.h>
#include <mango/machine/vm.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
@@ -17,13 +17,13 @@ extern "C" {
struct bcache;
/* maximum number of NUMA nodes */
#define VM_MAX_NODES 64
#define VM_MAX_NODES 64
/* maximum number of memory zones per node */
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
/* maximum number of supported page orders */
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
/* maximum number of sparse memory sectors */
#define VM_MAX_SECTORS 8192
#define VM_MAX_SECTORS 8192
/* maximum number of disk sectors that can be stored in a single
page. AKA the number of bits in the sector bitmap.
@@ -33,44 +33,32 @@ struct bcache;
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define VM_PAGE_IS_FREE(pg) \
(((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
#define vm_page_foreach(pg, i) \
#define vm_page_foreach(pg, i) \
for (struct vm_page *i = (pg); i; i = vm_page_get_next_tail(i))
typedef phys_addr_t vm_alignment_t;
typedef unsigned int vm_node_id_t;
struct vm_object {
unsigned int reserved;
};
enum vm_model {
VM_MODEL_FLAT = 1,
VM_MODEL_SPARSE,
};
enum vm_prot {
VM_PROT_READ = 0x01u,
VM_PROT_WRITE = 0x02u,
VM_PROT_EXEC = 0x04u,
VM_PROT_USER = 0x08u,
VM_PROT_SVR = 0x10u,
VM_PROT_NOCACHE = 0x20u,
};
enum vm_flags {
VM_NORMAL = 0x00u,
VM_NORMAL = 0x00u,
VM_GET_DMA = 0x01u,
};
enum vm_zone_id {
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
they need to be continuous, and must start at 0!
/* NOTE that these are used as indices into the node_zones array in
vm/zone.c they need to be continuous, and must start at 0!
not all of these zones are implemented for every architecture. */
VM_ZONE_DMA = 0u,
VM_ZONE_NORMAL = 1u,
VM_ZONE_DMA = 0u,
VM_ZONE_NORMAL = 1u,
VM_ZONE_HIGHMEM = 2u,
};
@@ -108,27 +96,28 @@ enum vm_page_order {
};
enum vm_page_flags {
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page is reserved (probably by a call to memblock_reserve()) and
cannot be returned by any allocation function */
VM_PAGE_RESERVED = 0x01u,
/* page has been allocated by a zone's buddy allocator, and is in-use */
VM_PAGE_ALLOC = 0x02u,
VM_PAGE_ALLOC = 0x02u,
/* page is the first page of a huge-page */
VM_PAGE_HEAD = 0x04u,
VM_PAGE_HEAD = 0x04u,
/* page is part of a huge-page */
VM_PAGE_HUGE = 0x08u,
/* page is holding cached data from secondary storage, and can be freed if necessary (and not dirty). */
VM_PAGE_CACHE = 0x10u,
VM_PAGE_HUGE = 0x08u,
/* page is holding cached data from secondary storage, and can be freed
* if necessary (and not dirty). */
VM_PAGE_CACHE = 0x10u,
};
enum vm_memory_region_status {
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
VM_REGION_FREE = 0x01u,
VM_REGION_RESERVED = 0x02u,
};
enum vm_cache_flags {
VM_CACHE_OFFSLAB = 0x01u,
VM_CACHE_DMA = 0x02u
VM_CACHE_DMA = 0x02u
};
struct vm_zone_descriptor {
@@ -151,12 +140,6 @@ struct vm_pg_data {
struct vm_zone pg_zones[VM_MAX_ZONES];
};
struct vm_region {
enum vm_memory_region_status r_status;
phys_addr_t r_base;
phys_addr_t r_limit;
};
struct vm_cache {
const char *c_name;
enum vm_cache_flags c_flags;
@@ -204,7 +187,7 @@ struct vm_slab {
- s_freelist[s_free] should be set to the previous value of s_free.
this is commented as it as flexible arrays are not supported in c++.
*/
//unsigned int s_freelist[];
// unsigned int s_freelist[];
};
struct vm_page {
@@ -231,20 +214,25 @@ struct vm_page {
/* multi-purpose list/tree entry.
the owner of the page can decide what to do with this.
some examples:
- the buddy allocator uses this to maintain its per-zone free-page lists.
- the block cache uses this to maintain a tree of pages keyed by block number.
*/
- the buddy allocator uses this to maintain its per-zone free-page
lists.
- vm_object uses it to maintain a btree of allocated pages keyed
by offset/size.
- the block cache uses this to maintain a tree of pages keyed by
block number.
*/
union {
struct queue_entry p_list;
struct btree_node p_bnode;
/* btree_node contains three pointers, so provide three pointer-sized integers for
use if p_bnode isn't needed. */
/* btree_node contains three pointers, so provide three
pointer-sized integers for use if p_bnode isn't needed. */
uintptr_t priv1[3];
};
union {
/* used by bcache when sector size is < page size. bitmap of present/missing sectors */
/* used by bcache when sector size is < page size. bitmap of
* present/missing sectors */
DECLARE_BITMAP(p_blockbits, VM_MAX_SECTORS_PER_PAGE);
uint32_t p_priv2;
};
@@ -252,10 +240,12 @@ struct vm_page {
union {
/* sector address, used by bcache */
sectors_t p_blockid;
/* offset of this page within the vm_object it is a part of */
off_t p_vmo_offset;
uint32_t p_priv3[2];
};
} __attribute__((aligned(2 * sizeof(unsigned long))));
} __aligned(2 * sizeof(unsigned long));
/* represents a sector of memory, containing its own array of vm_pages.
this struct is used under the sparse memory model, instead of the
@@ -272,39 +262,58 @@ struct vm_sector {
struct vm_page *s_pages;
};
extern kern_status_t vm_bootstrap(const struct vm_zone_descriptor *zones, size_t nr_zones);
extern kern_status_t vm_bootstrap(
const struct vm_zone_descriptor *zones,
size_t nr_zones);
extern enum vm_model vm_memory_model(void);
extern void vm_set_memory_model(enum vm_model model);
extern struct vm_pg_data *vm_pg_data_get(vm_node_id_t node);
extern phys_addr_t vm_virt_to_phys(void *p);
extern phys_addr_t vm_virt_to_phys(const void *p);
extern void *vm_phys_to_virt(phys_addr_t p);
extern void vm_page_init_array();
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern void vm_page_init_array(void);
extern struct vm_page *vm_page_get(phys_addr_t addr);
extern phys_addr_t vm_page_get_paddr(struct vm_page *pg);
extern struct vm_zone *vm_page_get_zone(struct vm_page *pg);
extern void *vm_page_get_vaddr(struct vm_page *pg);
extern size_t vm_page_get_pfn(struct vm_page *pg);
extern size_t vm_page_order_to_bytes(enum vm_page_order order);
extern size_t vm_page_order_to_pages(enum vm_page_order order);
extern vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order);
extern struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags);
static inline size_t vm_page_get_size_bytes(const struct vm_page *pg)
{
return vm_page_order_to_bytes(pg->p_order);
}
extern struct vm_page *vm_page_alloc(
enum vm_page_order order,
enum vm_flags flags);
extern void vm_page_free(struct vm_page *pg);
extern int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b);
extern int vm_page_split(
struct vm_page *pg,
struct vm_page **a,
struct vm_page **b);
extern struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b);
extern struct vm_page *vm_page_get_buddy(struct vm_page *pg);
extern struct vm_page *vm_page_get_next_tail(struct vm_page *pg);
extern size_t vm_bytes_to_pages(size_t bytes);
extern void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags);
extern void vm_zone_init(
struct vm_zone *z,
const struct vm_zone_descriptor *zone_info);
extern struct vm_page *vm_zone_alloc_page(
struct vm_zone *z,
enum vm_page_order order,
enum vm_flags flags);
extern void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg);
extern struct vm_cache *vm_cache_create(const char *name, size_t objsz, enum vm_cache_flags flags);
extern struct vm_cache *vm_cache_create(
const char *name,
size_t objsz,
enum vm_cache_flags flags);
extern void vm_cache_init(struct vm_cache *cache);
extern void vm_cache_destroy(struct vm_cache *cache);
extern void *vm_cache_alloc(struct vm_cache *cache, enum vm_flags flags);
@@ -330,15 +339,18 @@ extern size_t vm_page_get_pfn_sparse(struct vm_page *pg);
#endif
#ifdef __cplusplus
inline void *operator new(size_t count, void *p) { return p; }
inline void *operator new(size_t count, void *p)
{
return p;
}
#define kmalloc_object(objtype, flags, ...) \
__extension__({ \
void *p = kmalloc(sizeof(objtype), flags); \
if (p) { \
new (p) objtype(__VA_ARGS__); \
} \
(objtype *)p; \
#define kmalloc_object(objtype, flags, ...) \
__extension__({ \
void *p = kmalloc(sizeof(objtype), flags); \
if (p) { \
new (p) objtype(__VA_ARGS__); \
} \
(objtype *)p; \
})
#endif

View File

@@ -1,36 +0,0 @@
#ifndef MANGO_BLOCK_H_
#define MANGO_BLOCK_H_
#include <mango/types.h>
#include <mango/btree.h>
#include <mango/locks.h>
#include <mango/status.h>
#include <stdbool.h>
enum block_device_flags {
BLOCK_DEVICE_NO_BCACHE = 0x01u,
};
struct bcache {
unsigned int b_sector_size;
unsigned int b_sectors_per_page;
struct btree b_pagetree;
};
struct bcache_sector {
struct vm_page *sect_page;
unsigned int sect_index;
void *sect_buf;
bool sect_present;
};
extern struct bcache *bcache_create(unsigned int block_size);
extern void bcache_destroy(struct bcache *cache);
extern kern_status_t bcache_init(struct bcache *cache, unsigned int block_size);
extern void bcache_deinit(struct bcache *cache);
extern kern_status_t bcache_get(struct bcache *cache, sectors_t at, bool create, struct bcache_sector *out);
extern void bcache_mark_present(struct bcache_sector *sect);
#endif

View File

@@ -1,336 +0,0 @@
#ifndef MANGO_DEVICE_H_
#define MANGO_DEVICE_H_
#include <mango/queue.h>
#include <mango/btree.h>
#include <mango/status.h>
#include <mango/bitmap.h>
#include <mango/object.h>
#include <mango/block.h>
#include <mango/fb.h>
#include <mango/ringbuffer.h>
struct device;
struct input_event;
struct input_event_hook;
struct tty_device;
#define DEV_NAME_MAX OBJECT_NAME_MAX
#define DEV_MODEL_NAME_MAX 64
#define DEV_MAJOR_MAX 1024
#define DEV_MINOR_MAX 1024
#define DEV_MAJOR_INVALID ((unsigned int)0)
#define DEV_MINOR_INVALID ((unsigned int)0)
#define INPUT_DEVICE_EVENT_QUEUE_SIZE 128
#define INPUT_DEVICE_MAX 4096
#define BLOCK_DEVICE_MAX 4096
#define FRAMEBUFFER_DEVICE_MAX 4096
#define BLOCK_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_BLOCK ? &(dev)->blk : NULL)
#define CHAR_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_CHAR ? &(dev)->chr : NULL)
#define NET_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_NET ? &(dev)->net : NULL)
#define INPUT_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_INPUT ? &(dev)->input : NULL)
#define BUS_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_BUS ? &(dev)->bus : NULL)
#define FRAMEBUFFER_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_FRAMEBUFFER ? &(dev)->fb : NULL)
enum device_type {
DEV_TYPE_UNKNOWN = 0,
DEV_TYPE_BLOCK,
DEV_TYPE_CHAR,
DEV_TYPE_NET,
DEV_TYPE_INPUT,
DEV_TYPE_BUS,
DEV_TYPE_FRAMEBUFFER,
};
struct iovec {
void *io_buf;
size_t io_len;
};
struct device_type_ops {
kern_status_t(*read)(struct device *, void *, size_t, size_t, size_t *, mango_flags_t);
kern_status_t(*write)(struct device *, const void *, size_t, size_t, size_t *, mango_flags_t);
kern_status_t(*register_device)(struct device *);
};
struct block_device_ops {
kern_status_t(*read_blocks)(struct device *, sectors_t, size_t *, struct iovec *, size_t, mango_flags_t);
kern_status_t(*write_blocks)(struct device *, sectors_t, size_t *, struct iovec *, size_t, mango_flags_t);
kern_status_t(*ioctl)(struct device *, unsigned int, void *);
};
struct net_device_ops {
kern_status_t(*online)(struct device *);
kern_status_t(*offline)(struct device *);
kern_status_t(*transmit)(struct device *, const void *, size_t);
kern_status_t(*ioctl)(struct device *, unsigned int, void *);
};
struct char_device_ops {
kern_status_t(*read)(struct device *, void *, size_t, size_t, size_t *, mango_flags_t);
kern_status_t(*write)(struct device *, const void *, size_t, size_t, size_t *, mango_flags_t);
};
struct input_device_ops {
kern_status_t(*ioctl)(struct device *, unsigned int, void *);
};
struct bus_device_ops {
kern_status_t(*scan)(struct device *);
};
struct framebuffer_device_ops {
kern_status_t(*set_varinfo)(struct device *, const struct framebuffer_varinfo *);
};
struct block_device {
struct block_device_ops *b_ops;
struct bcache b_cache;
enum block_device_flags b_flags;
unsigned int b_id;
unsigned int b_sector_size;
sectors_t b_capacity;
};
struct char_device {
struct char_device_ops *c_ops;
/* only valid for TTY devices */
struct tty_device *c_tty;
};
struct net_device {
struct net_device_ops *n_ops;
};
struct input_device {
struct input_device_ops *i_ops;
unsigned int i_id;
struct ringbuffer i_events;
struct queue i_hooks;
};
struct bus_device {
struct queue_entry b_buslist;
struct bus_device_ops *b_ops;
};
struct framebuffer_device {
unsigned int fb_id;
struct framebuffer_device_ops *fb_ops;
struct framebuffer_varinfo fb_varinfo;
struct framebuffer_fixedinfo fb_fixedinfo;
};
struct device {
struct object dev_base;
unsigned int dev_minor;
enum device_type dev_type;
struct device *dev_parent;
struct driver *dev_owner;
struct queue dev_children;
struct queue_entry dev_childent;
struct btree_node dev_driverent;
char dev_name[DEV_NAME_MAX];
char dev_model_name[DEV_MODEL_NAME_MAX];
void *dev_bus_priv;
void *dev_priv;
union {
struct block_device blk;
struct char_device chr;
struct net_device net;
struct input_device input;
struct bus_device bus;
struct framebuffer_device fb;
};
};
struct driver;
struct driver_ops {
/* called when a bus driver finds a device for this driver to manage. */
kern_status_t(*bind)(struct driver *, struct device *, struct device *);
/* called when driver is registered. */
kern_status_t(*install)(struct driver *);
/* called when driver is unregistered. */
kern_status_t(*uninstall)(struct driver *);
};
struct driver {
struct kext *drv_owner;
unsigned int drv_major;
DECLARE_BITMAP(drv_minors, DEV_MINOR_MAX);
char drv_name[DEV_NAME_MAX];
struct btree drv_children;
struct btree_node drv_ent;
spin_lock_t drv_lock;
void *drv_priv;
struct driver_ops *drv_ops;
};
extern kern_status_t device_init(void);
extern struct device *root_device(void);
extern struct device *misc_device(void);
extern struct device *device_alloc(void);
static inline void device_lock(struct device *dev)
{
object_lock(&dev->dev_base);
}
static inline void device_unlock(struct device *dev)
{
object_unlock(&dev->dev_base);
}
static inline void device_lock_irqsave(struct device *dev, unsigned long *flags)
{
object_lock_irqsave(&dev->dev_base, flags);
}
static inline void device_unlock_irqrestore(struct device *dev, unsigned long flags)
{
object_unlock_irqrestore(&dev->dev_base, flags);
}
extern kern_status_t device_read(struct device *dev, void *buf, size_t offset, size_t size, size_t *bytes_read, mango_flags_t flags);
extern kern_status_t device_write(struct device *dev, const void *buf, size_t offset, size_t size, size_t *bytes_written, mango_flags_t flags);
extern struct device *cast_to_device(struct object *obj);
extern struct device *generic_device_create(void);
extern struct char_device *char_device_create(void);
extern struct block_device *block_device_create(void);
extern struct net_device *net_device_create(void);
extern struct input_device *input_device_create(void);
extern struct bus_device *bus_device_create(void);
extern struct framebuffer_device *framebuffer_device_create(void);
extern struct char_device *char_device_from_generic(struct device *dev);
extern struct block_device *block_device_from_generic(struct device *dev);
extern struct net_device *net_device_from_generic(struct device *dev);
extern struct input_device *input_device_from_generic(struct device *dev);
extern struct bus_device *bus_device_from_generic(struct device *dev);
extern struct framebuffer_device *framebuffer_device_from_generic(struct device *dev);
static inline struct device *char_device_base(struct char_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, chr));
}
static inline struct device *block_device_base(struct block_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, blk));
}
static inline struct device *net_device_base(struct net_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, net));
}
static inline struct device *input_device_base(struct input_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, input));
}
static inline struct device *bus_device_base(struct bus_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, bus));
}
static inline struct device *framebuffer_device_base(struct framebuffer_device *dev)
{
return (struct device *)((char *)dev - offsetof(struct device, fb));
}
static inline struct object *char_device_object(struct char_device *dev)
{
return &char_device_base(dev)->dev_base;
}
static inline struct object *block_device_object(struct block_device *dev)
{
return &block_device_base(dev)->dev_base;
}
static inline struct object *net_device_object(struct net_device *dev)
{
return &net_device_base(dev)->dev_base;
}
static inline struct object *input_device_object(struct input_device *dev)
{
return &input_device_base(dev)->dev_base;
}
static inline struct object *bus_device_object(struct bus_device *dev)
{
return &bus_device_base(dev)->dev_base;
}
static inline struct object *framebuffer_device_object(struct framebuffer_device *dev)
{
return &framebuffer_device_base(dev)->dev_base;
}
extern kern_status_t device_register(struct device *dev, struct driver *owner, struct device *parent);
static inline struct device *device_ref(struct device *dev)
{
return cast_to_device(object_ref(&dev->dev_base));
}
static inline void device_deref(struct device *dev)
{
object_deref(&dev->dev_base);
}
extern kern_status_t input_device_report_event(struct input_device *dev, const struct input_event *ev, bool noblock);
extern kern_status_t input_device_read(struct device *dev, void *buf, size_t offset,
size_t size, size_t *bytes_read, mango_flags_t flags);
extern kern_status_t input_device_add_hook(struct device *dev, struct input_event_hook *hook);
extern kern_status_t input_device_remove_hook(struct device *dev, struct input_event_hook *hook);
extern struct driver *driver_create(struct kext *self, const char *name);
extern kern_status_t driver_destroy(struct driver *driver);
extern kern_status_t driver_init(struct driver *driver, struct kext *self, const char *name);
extern kern_status_t driver_deinit(struct driver *driver);
extern kern_status_t driver_register(struct driver *driver);
extern kern_status_t driver_unregister(struct driver *driver);
extern unsigned int driver_alloc_minor(struct driver *driver);
extern void driver_free_minor(struct driver *driver, unsigned int minor);
extern struct device *driver_get_device(struct driver *driver, unsigned int minor);
extern kern_status_t driver_add_device(struct driver *driver, struct device *dev);
extern kern_status_t driver_remove_device(struct driver *driver, struct device *dev);
extern struct driver *system_driver(void);
extern kern_status_t framebuffer_get_fixedinfo(struct device *dev, struct framebuffer_fixedinfo *out);
extern kern_status_t framebuffer_get_varinfo(struct device *dev, struct framebuffer_varinfo *out);
extern kern_status_t framebuffer_set_varinfo(struct device *dev, const struct framebuffer_varinfo *varinfo);
static inline void driver_lock(struct driver *driver)
{
spin_lock(&driver->drv_lock);
}
static inline void driver_unlock(struct driver *driver)
{
spin_unlock(&driver->drv_lock);
}
static inline void driver_lock_irqsave(struct driver *driver, unsigned long *flags)
{
spin_lock_irqsave(&driver->drv_lock, flags);
}
static inline void driver_unlock_irqrestore(struct driver *driver, unsigned long flags)
{
spin_unlock_irqrestore(&driver->drv_lock, flags);
}
extern kern_status_t scan_all_buses(void);
#endif

View File

@@ -1,99 +0,0 @@
#ifndef MANGO_KEXT_H_
#define MANGO_KEXT_H_
#include <mango/status.h>
#include <mango/object.h>
#include <mango/compiler.h>
#include <mango/btree.h>
#define KERNEL_KEXT_ID "net.doorstuck.mango-kernel"
#define KEXT_IDENT_MAX 80
#define KEXT_NO_DEPENDENCIES NULL
#define __KEXT_INFO_VARNAME_2(a, b) a ## b
#define __KEXT_INFO_VARNAME_1(a, b) __KEXT_INFO_VARNAME_2(a, b)
#ifdef MANGO_INTERNAL
#define __KEXT_INFO_LINKAGE static
#define __KEXT_INFO_VARNAME() __KEXT_INFO_VARNAME_1(__kext_info, __LINE__)
#define __KEXT_INFO_DEPNAME() __KEXT_INFO_VARNAME_1(__kext_deps, __LINE__)
#define __KEXT_INFO_FLAGS KEXT_INTERNAL
#define __KEXT_INFO_ALIGNMENT 0x80
#else
#define __KEXT_INFO_LINKAGE
#define __KEXT_INFO_VARNAME() __kext_info
#define __KEXT_INFO_DEPNAME() __kext_deps
#define __KEXT_INFO_FLAGS KEXT_NONE
#define __KEXT_INFO_ALIGNMENT 0x80
#endif
#ifdef __cplusplus
#define DEFINE_KEXT(ident, online, offline, ...) \
static const char *__KEXT_INFO_DEPNAME()[] = { \
__VA_ARGS__, NULL \
}; \
static struct kext_info __section(".kextinfo") __aligned(__KEXT_INFO_ALIGNMENT) __used __KEXT_INFO_VARNAME() = { \
__KEXT_INFO_FLAGS, \
ident, \
online, \
offline, \
__KEXT_INFO_DEPNAME(), \
}
#else
#define DEFINE_KEXT(ident, online, offline, ...) \
static const char *__KEXT_INFO_DEPNAME()[] = { \
__VA_ARGS__, NULL \
}; \
static struct kext_info __section(".kextinfo") __aligned(__KEXT_INFO_ALIGNMENT) __used __KEXT_INFO_VARNAME() = { \
.k_flags = __KEXT_INFO_FLAGS, \
.k_ident = ident, \
.k_online = online, \
.k_offline = offline, \
.k_dependencies = __KEXT_INFO_DEPNAME(), \
}
#endif
struct kext;
enum kext_flags {
KEXT_NONE = 0x00u,
KEXT_INTERNAL = 0x01u,
KEXT_ONLINE = 0x02u,
};
struct kext_info {
enum kext_flags k_flags;
char k_ident[KEXT_IDENT_MAX];
kern_status_t(*k_online)(struct kext *);
kern_status_t(*k_offline)(struct kext *);
const char **k_dependencies;
};
struct kext {
struct object k_base;
enum kext_flags k_flags;
char k_ident[KEXT_IDENT_MAX];
uint64_t k_ident_hash;
struct btree_node k_node;
kern_status_t(*k_online)(struct kext *);
kern_status_t(*k_offline)(struct kext *);
unsigned int k_nr_dependencies;
struct kext **k_dependencies;
};
extern kern_status_t scan_internal_kexts(void);
extern kern_status_t bring_internal_kexts_online(void);
extern kern_status_t init_kernel_kext(void);
extern struct kext *kernel_kext(void);
extern kern_status_t kext_cache_init(void);
extern struct kext *kext_alloc(void);
extern void kext_release(struct kext *kext);
extern kern_status_t kext_register(struct kext *kext);
extern struct kext *kext_get_by_id(const char *ident);
extern kern_status_t kext_bring_online(struct kext *kext);
#endif

View File

@@ -1,25 +0,0 @@
#ifndef MANGO_LOCKS_H_
#define MANGO_LOCKS_H_
#include <mango/compiler.h>
#include <mango/machine/hwlock.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef __aligned(8) ml_hwlock_t spin_lock_t;
#define SPIN_LOCK_INIT ML_HWLOCK_INIT
#define spin_lock(lck) ml_hwlock_lock(lck);
#define spin_unlock(lck) ml_hwlock_unlock(lck);
#define spin_lock_irqsave(lck, flags) ml_hwlock_lock_irqsave(lck, flags);
#define spin_unlock_irqrestore(lck, flags) ml_hwlock_unlock_irqrestore(lck, flags);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,118 +0,0 @@
#ifndef MANGO_OBJECT_H_
#define MANGO_OBJECT_H_
#include <mango/locks.h>
#include <mango/status.h>
#include <mango/flags.h>
#include <mango/vm.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define OBJECT_MAGIC 0xBADDCAFE
#define OBJECT_NAME_MAX 64
#define OBJECT_PATH_MAX 256
#define OBJECT_CAST(to_type, to_type_member, p) \
((to_type *)((uintptr_t)p) - offsetof(to_type, to_type_member))
#define OBJECT_C_CAST(c_type, c_type_member, obj_type, objp) \
OBJECT_IS_TYPE(objp, obj_type) ? OBJECT_CAST(c_type, c_type_member, (objp)) : NULL
#define OBJECT_IS_TYPE(obj, type_ptr) \
((obj)->ob_type == (type_ptr))
struct object;
struct object_attrib;
enum object_type_flags {
OBJTYPE_INIT = 0x01u,
};
struct object_ops {
kern_status_t(*open)(struct object *obj);
kern_status_t(*close)(struct object *obj);
kern_status_t(*read)(struct object *obj, void *p, size_t off, size_t *r, mango_flags_t flags);
kern_status_t(*write)(struct object *obj, const void *p, size_t off, size_t *w, mango_flags_t flags);
kern_status_t(*destroy)(struct object *obj);
kern_status_t(*query_name)(struct object *obj, char out[OBJECT_NAME_MAX]);
kern_status_t(*parse)(struct object *obj, const char *path, struct object **out);
kern_status_t(*get_named)(struct object *obj, const char *name, struct object **out);
kern_status_t(*get_at)(struct object *obj, size_t at, struct object **out);
kern_status_t(*read_attrib)(struct object *obj, struct object_attrib *attrib, char *out, size_t max, size_t *r);
kern_status_t(*write_attrib)(struct object *obj, struct object_attrib *attrib, const char *s, size_t len, size_t *r);
};
struct object_attrib {
char *a_name;
struct queue_entry a_list;
};
struct object_type {
enum object_type_flags ob_flags;
char ob_name[32];
unsigned int ob_size;
unsigned int ob_header_offset;
struct vm_cache ob_cache;
struct queue_entry ob_list;
struct queue ob_attrib;
struct object_ops ob_ops;
};
struct object {
uint32_t ob_magic;
struct object_type *ob_type;
spin_lock_t ob_lock;
unsigned int ob_refcount;
unsigned int ob_handles;
struct queue ob_attrib;
struct queue_entry ob_list;
} __aligned(sizeof(long));
extern kern_status_t object_bootstrap(void);
extern kern_status_t object_type_register(struct object_type *p);
extern kern_status_t object_type_unregister(struct object_type *p);
extern struct object_namespace *global_namespace(void);
extern struct object_namespace *object_namespace_create(void);
extern struct object *ns_header(struct object_namespace *ns);
extern kern_status_t object_namespace_get_object(struct object_namespace *ns, const char *path, struct object **out);
extern kern_status_t object_namespace_create_link(struct object_namespace *ns, const char *linkpath, struct object *dest);
extern kern_status_t object_publish(struct object_namespace *ns, const char *path, struct object *obj);
extern kern_status_t object_unpublish(struct object_namespace *ns, struct object *obj);
extern struct object *object_create(struct object_type *type);
extern struct object *object_ref(struct object *obj);
extern void object_deref(struct object *obj);
extern void object_lock(struct object *obj);
extern void object_unlock(struct object *obj);
extern void object_lock_irqsave(struct object *obj, unsigned long *flags);
extern void object_unlock_irqrestore(struct object *obj, unsigned long flags);
static inline kern_status_t object_get(const char *path, struct object **out)
{
return object_namespace_get_object(global_namespace(), path, out);
}
extern kern_status_t object_read(struct object *obj, void *p, size_t offset, size_t max, size_t *nr_read, mango_flags_t flags);
extern kern_status_t object_write(struct object *obj, const void *p, size_t offset, size_t max, size_t *nr_written, mango_flags_t flags);
extern kern_status_t object_get_child_named(struct object *obj, const char *name, struct object **out);
extern kern_status_t object_get_child_at(struct object *obj, size_t at, struct object **out);
extern kern_status_t object_query_name(struct object *obj, char name[OBJECT_NAME_MAX]);
extern struct object *set_create(const char *name);
extern kern_status_t set_add_object(struct object *set, struct object *obj);
extern kern_status_t set_remove_object(struct object *set, struct object *obj);
extern bool object_is_set(struct object *obj);
extern struct object *link_create(const char *name, struct object *dest);
extern struct object *link_read_ptr(struct object *link);
extern bool object_is_link(struct object *obj);
extern void init_set_objects(void);
extern void init_link_objects(void);
extern void init_global_namespace(void);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,12 +0,0 @@
#ifndef MANGO_PANIC_H_
#define MANGO_PANIC_H_
#include <mango/compiler.h>
struct cpu_context;
#define panic(...) panic_irq(NULL, __VA_ARGS__)
extern void __noreturn panic_irq(struct cpu_context *ctx, const char *fmt, ...);
#endif

View File

@@ -1,42 +0,0 @@
#ifndef MANGO_PMAP_H_
#define MANGO_PMAP_H_
/* all the functions declared in this file are defined in arch/xyz/pmap.c */
#include <mango/vm.h>
#include <mango/status.h>
#include <mango/machine/pmap.h>
#include <stddef.h>
#define PFN(x) ((x) >> VM_PAGE_SHIFT)
#ifdef __cplusplus
extern "C" {
#endif
typedef ml_pmap_t pmap_t;
typedef ml_pfn_t pfn_t;
enum pmap_flags {
PMAP_NORMAL = 0x00u,
PMAP_HUGEPAGE = 0x01u,
};
extern void pmap_bootstrap(void);
extern pmap_t get_kernel_pmap(void);
extern pmap_t pmap_create(void);
extern void pmap_destroy(pmap_t pmap);
extern void pmap_switch(pmap_t pmap);
extern kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum pmap_flags flags);
extern kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, enum vm_prot prot, enum pmap_flags flags);
extern kern_status_t pmap_remove(pmap_t pmap, void *p);
extern kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,17 +0,0 @@
#ifndef MANGO_PRINTK_H_
#define MANGO_PRINTK_H_
#include <mango/console.h>
#ifdef __cplusplus
extern "C" {
#endif
extern void early_printk_init(struct console *con);
extern int printk(const char *format, ...);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,113 +0,0 @@
#ifndef MANGO_TERMIOS_H_
#define MANGO_TERMIOS_H_
#include <stdint.h>
#define NCCS 32
#define BRKINT 00000001
#define ICRNL 00000002
#define IGNBRK 00000004
#define IGNCR 00000010
#define IGNPAR 00000020
#define INLCR 00000040
#define INPCK 00000100
#define ISTRIP 00000200
#define IUCLC 00000400
#define IXANY 00001000
#define IXOFF 00002000
#define IXON 00004000
#define PARMRK 00010000
#define OPOST 00000001
#define OLCUC 00000002
#define ONLCR 00000004
#define OCRNL 00000010
#define ONOCR 00000020
#define ONLRET 00000040
#define NLDLY 00000100
#define NL0 00000000
#define NL1 00000100
#define OFILL 00000200
#define CRDLY 00003400
#define CR0 00000000
#define CR1 00000400
#define CR2 00001000
#define CR3 00002000
#define TABDLY 00034000
#define TAB0 00000000
#define TAB1 00004000
#define TAB2 00010000
#define TAB3 00020000
#define BSDLY 00040000
#define BS0 00000000
#define BS1 00040000
#define VTDLY 00100000
#define VT0 00000000
#define VT1 00100000
#define FFDLY 00200000
#define FF0 00000000
#define FF1 00200000
#define B0 0
#define B50 50
#define B75 75
#define B110 110
#define B134 134
#define B150 150
#define B200 200
#define B300 300
#define B600 600
#define B1200 1200
#define B1800 1800
#define B2400 2400
#define B4800 4800
#define B9600 9600
#define B19200 19200
#define B38400 38400
#define CSIZE 00000007
#define CS5 00000000
#define CS6 00000001
#define CS7 00000002
#define CS8 00000004
#define CSTOPB 00000010
#define CREAD 00000020
#define PARENB 00000040
#define PARODD 00000100
#define HUPCL 00000200
#define CLOCAL 00000400
#define ECHO 00000001
#define ECHOE 00000002
#define ECHOK 00000004
#define ECHONL 00000010
#define ICANON 00000020
#define IEXTEN 00000040
#define ISIG 00000100
#define NOFLSH 00000200
#define TOSTOP 00000400
#define XCASE 00001000
#define TCSANOW 1
#define TCSADRAIN 2
#define TCSAFLUSH 3
#define TCIFLUSH 1
#define TCOFLUSH 2
#define TCIOFLUSH (TCIFLUSH | TCOFLUSH)
typedef unsigned int speed_t;
typedef unsigned int tcflag_t;
typedef unsigned char cc_t;
struct termios {
tcflag_t c_iflag;
tcflag_t c_oflag;
tcflag_t c_cflag;
tcflag_t c_lflag;
cc_t c_line;
cc_t c_cc[NCCS];
};
#endif

View File

@@ -1,145 +0,0 @@
#ifndef MANGO_TTY_H_
#define MANGO_TTY_H_
#include <mango/status.h>
#include <mango/device.h>
#include <mango/queue.h>
#include <mango/object.h>
#include <mango/termios.h>
#include <stdint.h>
#define TTY_DEVICE(dev) ((dev)->dev_type == DEV_TYPE_CHAR ? (dev)->chr.c_tty : NULL)
#define TTY_DRIVER(drv) ((struct tty_driver *)((char *)drv - offsetof(struct tty_driver, tty_base)))
#define TTY_INPUT_QUEUE_SIZE 256
#define TTY_LINE_MAX 4096
struct kext;
/* The TTY system.
TTYs are an enhanced version of the console object. Rather than a simple output
device for log messages, TTYs are intended to support fully-featured interactive
user sessions, including advanced display manipulation (if applicable) and
buffered user input.
A TTY object is split into 2 parts:
- struct tty: This represents the terminal session, and tracks things like the cursor
position, input buffer, flags, etc.
- struct tty_driver: This is a set of function callbacks that the TTY can use to
manipulate the output device. This could represent a char-based framebuffer
device, a serial port, etc.
*/
#ifdef __cplusplus
extern "C" {
#endif
enum tty_driver_type {
/* For TTYs operating on simple IO devices like serial ports.
Allows writing characters, receiving characters, and not much else. */
TTY_DRIVER_SIMPLE,
/* For TTYs operating on more capable display interfaces.
Allows putting characters at arbitrary locations, scrolling, etc */
TTY_DRIVER_FULL,
};
/* TTY cursor status. The extra cursor styles are just for completeness,
the important one to support (if possible), is TTY_CURSOR_NONE.
The others can be interpreted as "just turn on a cursor of any style". */
enum tty_cursor {
TTY_CURSOR_ULINE,
TTY_CURSOR_BLOCK,
TTY_CURSOR_NONE,
};
/* direction to use for scrolling. The important one to support is
TTY_SCROLL_DOWN for when output overflows the display */
enum tty_scroll_dir {
TTY_SCROLL_DOWN,
TTY_SCROLL_UP,
};
enum tty_modifier_key {
TTY_KEY_OTHER = 0x00u,
TTY_KEY_CTRL = 0x01u,
TTY_KEY_ALT = 0x02u,
TTY_KEY_SHIFT = 0x04u,
};
/* character attribute. this could be as simple as VGA's 16-colour palette
plus an extra bit for bright, or a full 24-bit RGB value with bold and underline
support, depending on what the driver supports. */
typedef uint64_t tty_attrib_t;
struct tty_driver_ops {
void (*tty_init)(struct device *dev);
void (*tty_deinit)(struct device *dev);
void (*tty_clear)(struct device *dev, int x, int y, int width, int height);
void (*tty_putc)(struct device *dev, int c, int xpos, int ypos, tty_attrib_t attrib);
void (*tty_set_cursor)(struct device *dev, enum tty_cursor cur);
void (*tty_move_cursor)(struct device *dev, int x, int y);
void (*tty_scroll)(struct device *dev, enum tty_scroll_dir dir, int lines);
};
struct tty_driver {
struct driver tty_base;
char tty_name[16];
enum tty_driver_type tty_type;
struct queue_entry tty_head;
struct tty_driver_ops *tty_ops;
};
struct tty_ldisc {
char name[OBJECT_NAME_MAX];
kern_status_t(*read)(struct device *, void *, size_t, size_t *, mango_flags_t);
void(*write)(struct device *, const struct input_event *);
};
struct tty_device {
unsigned int tty_xcells, tty_ycells;
unsigned int tty_xcur, tty_ycur;
struct termios tty_config;
tty_attrib_t tty_curattrib;
enum tty_modifier_key tty_modstate;
struct tty_ldisc *tty_ldisc;
/* input characters processed from tty_events, returned by tty_read() */
struct ringbuffer tty_input;
char *tty_linebuf;
};
extern void register_tty_console(void);
extern void redirect_printk_to_tty(struct device *dest);
extern kern_status_t tty_bootstrap(void);
extern struct tty_ldisc *tty_default_line_discipline(void);
extern struct device *tty_device_create(void);
extern kern_status_t tty_device_register(struct device *dev, struct tty_driver *owner, struct device *parent);
extern void tty_set_foreground(struct device *tty);
extern kern_status_t tty_connect_foreground_input_device(struct device *input);
extern struct tty_driver *tty_driver_create(struct kext *self, const char *name);
extern kern_status_t tty_driver_destroy(struct tty_driver *drv);
extern kern_status_t tty_driver_register(struct tty_driver *drv);
extern kern_status_t tty_driver_unregister(struct tty_driver *drv);
static inline struct driver *tty_driver_base(struct tty_driver *drv)
{
return &drv->tty_base;
}
extern kern_status_t tty_read(struct device *tty, void *buf, size_t offset, size_t max, size_t *nr_read, mango_flags_t flags);
extern kern_status_t tty_write(struct device *tty, const void *buf, size_t offset, size_t len, size_t *nr_written, mango_flags_t flags);
extern kern_status_t tty_report_event(struct device *tty, const struct input_event *ev);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,14 +0,0 @@
#ifndef MANGO_TYPES_H_
#define MANGO_TYPES_H_
#include <stdint.h>
#define CYCLES_MAX UINT64_MAX
typedef uintptr_t phys_addr_t;
typedef uint64_t cycles_t;
typedef uint64_t sectors_t;
typedef unsigned int umode_t;
#endif

View File

@@ -1,5 +1,4 @@
#include <mango/init.h>
#include <kernel/init.h>
int do_initcalls(void)
{

Some files were not shown because too many files have changed in this diff Show More