diff --git a/arch/x86_64/cpu.c b/arch/x86_64/cpu.c index d79fc38..0d8fbb4 100644 --- a/arch/x86_64/cpu.c +++ b/arch/x86_64/cpu.c @@ -3,11 +3,13 @@ int ml_cpu_block_init(ml_cpu_block *p) { gdt_init(&p->c_gdt, &p->c_gdt_ptr); + idt_init(&p->c_idt_ptr); return 0; } int ml_cpu_block_use(ml_cpu_block *p) { gdt_load(&p->c_gdt_ptr); + idt_load(&p->c_idt_ptr); return 0; } diff --git a/arch/x86_64/include/arch/irq.h b/arch/x86_64/include/arch/irq.h new file mode 100644 index 0000000..54d1a17 --- /dev/null +++ b/arch/x86_64/include/arch/irq.h @@ -0,0 +1,43 @@ +#ifndef ARCH_IRQ_H_ +#define ARCH_IRQ_H_ + +#include +#include + +#define NR_IDT_ENTRIES 48 + +struct cpu_context { + uint64_t r15, r14, r13, r12, r11, r10, r9, r8; + uint64_t rdi, rsi, rbp, unused_rsp, rbx, rdx, rcx, rax; + uint64_t int_no, err_no; + uint64_t rip, cs, rflags, rsp, ss; +} __packed; + +struct idt_entry { + uint16_t base_low; + uint16_t selector; + uint8_t always0; + uint8_t type : 4; + uint8_t zero : 1; + uint8_t dpl : 2; + uint8_t present : 1; + uint16_t base_middle; + uint32_t base_high; + uint32_t reserved; +} __packed; + +struct idt { + struct idt_entry i_entries[NR_IDT_ENTRIES]; +}; + +struct idt_ptr { + uint16_t i_limit; + uintptr_t i_base; +} __packed; + +typedef void (*int_hook)(struct cpu_context *); + +extern int idt_init(struct idt_ptr *idtp); +extern int idt_load(struct idt_ptr *idtp); + +#endif diff --git a/arch/x86_64/include/socks/machine/cpu.h b/arch/x86_64/include/socks/machine/cpu.h index ab088b7..c298c93 100644 --- a/arch/x86_64/include/socks/machine/cpu.h +++ b/arch/x86_64/include/socks/machine/cpu.h @@ -2,11 +2,13 @@ #define SOCKS_X86_64_CPU_H_ #include - +#include typedef struct ml_cpu_block { struct gdt c_gdt; struct gdt_ptr c_gdt_ptr; + + struct idt_ptr c_idt_ptr; } ml_cpu_block; extern int ml_init_bootcpu(void); diff --git a/arch/x86_64/include/socks/machine/irq.h b/arch/x86_64/include/socks/machine/irq.h new file mode 100644 index 0000000..be06f01 --- /dev/null +++ b/arch/x86_64/include/socks/machine/irq.h @@ -0,0 +1,5 @@ +#ifndef SOCKS_X86_64_IRQ_H_ +#define SOCKS_X86_64_IRQ_H_ + + +#endif diff --git a/arch/x86_64/init.c b/arch/x86_64/init.c index 321e2d9..dd96c86 100644 --- a/arch/x86_64/init.c +++ b/arch/x86_64/init.c @@ -44,6 +44,7 @@ int ml_init(uintptr_t arg) print_kernel_banner(); + early_vm_init(); e820_scan(PTR32(mb->mmap_addr), mb->mmap_length); @@ -60,9 +61,9 @@ int ml_init(uintptr_t arg) /* test allocation */ vm_page_t *p = vm_page_alloc(VM_PAGE_16K, 0); if (p) { - void *p_ptr = vm_page_get_vaddr(p); + void *p_ptr = vm_page_get_vaddr(p); - printk("allocated 16K at %p", p_ptr); + printk("allocated 16K at %p", p_ptr); } else { printk("alloc failed"); } diff --git a/arch/x86_64/irq.c b/arch/x86_64/irq.c new file mode 100644 index 0000000..170f24a --- /dev/null +++ b/arch/x86_64/irq.c @@ -0,0 +1,255 @@ +#include +#include +#include +#include +#include +#include +#include + +#define MAX_ISR_HANDLERS 16 + +extern void _isr0(); +extern void _isr1(); +extern void _isr2(); +extern void _isr3(); +extern void _isr4(); +extern void _isr5(); +extern void _isr6(); +extern void _isr7(); +extern void _isr8(); +extern void _isr9(); +extern void _isr10(); +extern void _isr11(); +extern void _isr12(); +extern void _isr13(); +extern void _isr14(); +extern void _isr15(); +extern void _isr16(); +extern void _isr17(); +extern void _isr18(); +extern void _isr19(); +extern void _isr20(); +extern void _isr21(); +extern void _isr22(); +extern void _isr23(); +extern void _isr24(); +extern void _isr25(); +extern void _isr26(); +extern void _isr27(); +extern void _isr28(); +extern void _isr29(); +extern void _isr30(); +extern void _isr31(); +extern void _isr128(); + +extern void _irq0(); +extern void _irq1(); +extern void _irq2(); +extern void _irq3(); +extern void _irq4(); +extern void _irq5(); +extern void _irq6(); +extern void _irq7(); +extern void _irq8(); +extern void _irq9(); +extern void _irq10(); +extern void _irq11(); +extern void _irq12(); +extern void _irq13(); +extern void _irq14(); +extern void _irq15(); + +extern void syscall_gate(); +extern uintptr_t pf_faultptr(void); + +static int_hook isr_handlers[NR_IDT_ENTRIES]; + +static struct idt idt; +static int idt_initialised = 0; + +static void set_idt_gate(struct idt *idt, uint8_t index, uintptr_t base, uint16_t sel, uint8_t flags) +{ + idt->i_entries[index].base_low = base & 0xFFFF; + idt->i_entries[index].base_middle = (base >> 16) & 0xFFFF; + idt->i_entries[index].base_high = (base >> 32) & 0xFFFFFFFF; + idt->i_entries[index].selector = sel; + idt->i_entries[index].always0 = 0; + idt->i_entries[index].present = 1; + idt->i_entries[index].dpl = 3; + idt->i_entries[index].zero = 0; + idt->i_entries[index].type = 0xE; + idt->i_entries[index].reserved = 0; +} + +static void gpf_handler(struct cpu_context *regs) +{ + int ext = regs->err_no & 1; + int table = (regs->err_no >> 1) & 2; + int index = (regs->err_no >> 3) & 13; + + printk("general protection fault (%08x %08x %08x %016llx)", ext, table, index, regs->rip); + ml_halt_cpu(); +} + +static void pf_handler(struct cpu_context *regs) +{ + printk("page fault (%016llx %016llx)", pf_faultptr(), regs->rip); + ml_halt_cpu(); +} + +#if 0 +static void set_syscall_gate(uintptr_t rip) +{ + /* sysret adds 0x10 to this to get cs, and 0x8 to get ss + * note that the CPU should force the RPL to 3 when loading + * the selector by using user_cs | 3. However, this doesn't happen + * in certain scenarios (specifically, QEMU + KVM on a Ryzen 5 1600X). + * It's probably a Magenta bug, but just in case it's not, + * we perform the RPL OR ourselves */ + uint64_t user_cs = 0x13; + uint64_t kernel_cs = 0x8; + + uintptr_t star_reg = 0xC0000081; + uintptr_t lstar_reg = 0xC0000082; + uintptr_t sfmask_reg = 0xC0000084; + + uint64_t selectors = 0; + selectors |= (user_cs) << 48; + selectors |= (kernel_cs) << 32; + + /* disable interrupts */ + uint64_t flag_mask = 0x200; + + write_msr(star_reg, selectors); + write_msr(lstar_reg, rip); + write_msr(sfmask_reg, flag_mask); +} +#endif + +static void init_pic() +{ + // Remap the PIC + outportb(0x20, 0x11); + outportb(0xA0, 0x11); + outportb(0x21, 0x20); + outportb(0xA1, 0x28); + outportb(0x21, 0x04); + outportb(0xA1, 0x02); + outportb(0x21, 0x01); + outportb(0xA1, 0x01); + outportb(0x21, 0x0); + outportb(0xA1, 0x0); + + isr_handlers[13] = gpf_handler; + isr_handlers[14] = pf_handler; +} + +static void init_global_idt(void) +{ + memset((void *)&idt.i_entries, 0, sizeof idt.i_entries); + + set_idt_gate(&idt, 0, (uintptr_t)_isr0, 0x08, 0x8E); + set_idt_gate(&idt, 1, (uintptr_t)_isr1, 0x08, 0x8E); + set_idt_gate(&idt, 2, (uintptr_t)_isr2, 0x08, 0x8E); + set_idt_gate(&idt, 3, (uintptr_t)_isr3, 0x08, 0x8E); + set_idt_gate(&idt, 4, (uintptr_t)_isr4, 0x08, 0x8E); + set_idt_gate(&idt, 5, (uintptr_t)_isr5, 0x08, 0x8E); + set_idt_gate(&idt, 6, (uintptr_t)_isr6, 0x08, 0x8E); + set_idt_gate(&idt, 7, (uintptr_t)_isr7, 0x08, 0x8E); + set_idt_gate(&idt, 8, (uintptr_t)_isr8, 0x08, 0x8E); + set_idt_gate(&idt, 9, (uintptr_t)_isr9, 0x08, 0x8E); + set_idt_gate(&idt, 10, (uintptr_t)_isr10, 0x08, 0x8E); + set_idt_gate(&idt, 11, (uintptr_t)_isr11, 0x08, 0x8E); + set_idt_gate(&idt, 12, (uintptr_t)_isr12, 0x08, 0x8E); + set_idt_gate(&idt, 13, (uintptr_t)_isr13, 0x08, 0x8E); + set_idt_gate(&idt, 14, (uintptr_t)_isr14, 0x08, 0x8E); + set_idt_gate(&idt, 15, (uintptr_t)_isr15, 0x08, 0x8E); + set_idt_gate(&idt, 16, (uintptr_t)_isr16, 0x08, 0x8E); + set_idt_gate(&idt, 17, (uintptr_t)_isr17, 0x08, 0x8E); + set_idt_gate(&idt, 18, (uintptr_t)_isr18, 0x08, 0x8E); + set_idt_gate(&idt, 19, (uintptr_t)_isr19, 0x08, 0x8E); + set_idt_gate(&idt, 20, (uintptr_t)_isr20, 0x08, 0x8E); + set_idt_gate(&idt, 21, (uintptr_t)_isr21, 0x08, 0x8E); + set_idt_gate(&idt, 22, (uintptr_t)_isr22, 0x08, 0x8E); + set_idt_gate(&idt, 23, (uintptr_t)_isr23, 0x08, 0x8E); + set_idt_gate(&idt, 24, (uintptr_t)_isr24, 0x08, 0x8E); + set_idt_gate(&idt, 25, (uintptr_t)_isr25, 0x08, 0x8E); + set_idt_gate(&idt, 26, (uintptr_t)_isr26, 0x08, 0x8E); + set_idt_gate(&idt, 27, (uintptr_t)_isr27, 0x08, 0x8E); + set_idt_gate(&idt, 28, (uintptr_t)_isr28, 0x08, 0x8E); + set_idt_gate(&idt, 29, (uintptr_t)_isr29, 0x08, 0x8E); + set_idt_gate(&idt, 30, (uintptr_t)_isr30, 0x08, 0x8E); + set_idt_gate(&idt, 31, (uintptr_t)_isr31, 0x08, 0x8E); + + init_pic(); + + // Install the IRQs + set_idt_gate(&idt, 32, (uintptr_t)_irq0, 0x08, 0x8E); + set_idt_gate(&idt, 33, (uintptr_t)_irq1, 0x08, 0x8E); + set_idt_gate(&idt, 34, (uintptr_t)_irq2, 0x08, 0x8E); + set_idt_gate(&idt, 35, (uintptr_t)_irq3, 0x08, 0x8E); + set_idt_gate(&idt, 36, (uintptr_t)_irq4, 0x08, 0x8E); + set_idt_gate(&idt, 37, (uintptr_t)_irq5, 0x08, 0x8E); + set_idt_gate(&idt, 38, (uintptr_t)_irq6, 0x08, 0x8E); + set_idt_gate(&idt, 39, (uintptr_t)_irq7, 0x08, 0x8E); + set_idt_gate(&idt, 40, (uintptr_t)_irq8, 0x08, 0x8E); + set_idt_gate(&idt, 41, (uintptr_t)_irq9, 0x08, 0x8E); + set_idt_gate(&idt, 42, (uintptr_t)_irq10, 0x08, 0x8E); + set_idt_gate(&idt, 43, (uintptr_t)_irq11, 0x08, 0x8E); + set_idt_gate(&idt, 44, (uintptr_t)_irq12, 0x08, 0x8E); + set_idt_gate(&idt, 45, (uintptr_t)_irq13, 0x08, 0x8E); + set_idt_gate(&idt, 46, (uintptr_t)_irq14, 0x08, 0x8E); + set_idt_gate(&idt, 47, (uintptr_t)_irq15, 0x08, 0x8E); + + idt_initialised = 1; +} + +int idt_init(struct idt_ptr *ptr) +{ + if (idt_initialised == 0) { + init_global_idt(); + } + + ptr->i_limit = sizeof(idt) - 1; + ptr->i_base = (uintptr_t)&idt; + + return 0; +} + +int idt_load(struct idt_ptr *ptr) +{ + asm volatile("lidt (%0)" ::"r" (ptr)); + return 0; +} + +void isr_dispatch(struct cpu_context *regs) +{ + int_hook h = isr_handlers[regs->int_no]; + if (h) { + h(regs); + } +} + +void irq_dispatch(struct cpu_context *regs) +{ + if (regs->int_no >= 40) { + outportb(0xA0, 0x20); + } + + outportb(0x20, 0x20); +} + +void syscall_dispatch(struct cpu_context *regs) +{ + +} + +void ml_int_enable() +{ + asm volatile("sti"); +} + +void ml_int_disable() +{ + asm volatile("cli"); +} diff --git a/arch/x86_64/irqvec.S b/arch/x86_64/irqvec.S new file mode 100644 index 0000000..85750a0 --- /dev/null +++ b/arch/x86_64/irqvec.S @@ -0,0 +1,204 @@ + .section .text + .align 4 + +.macro ISR_ERROR index + .global _isr\index + .type _isr\index, @function +_isr\index: + cli + pushq $\index + jmp isr_common_stub +.endm + +.macro ISR_NO_ERROR index + .global _isr\index + .type _isr\index, @function +_isr\index: + cli + pushq $0 + pushq $\index + jmp isr_common_stub +.endm + +.macro IRQ id byte + .global _irq\id + .type _irq\id, @function +_irq\id: + cli + pushq $\id + pushq $\byte + jmp irq_common_stub +.endm + +.macro PUSH_REGS + push %rax + push %rcx + push %rdx + push %rbx + pushq $0 + push %rbp + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + push %r12 + push %r13 + push %r14 + push %r15 +.endm + +.macro POP_REGS + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rbp + add $8, %rsp + pop %rbx + pop %rdx + pop %rcx + pop %rax +.endm + +ISR_NO_ERROR 0 +ISR_NO_ERROR 1 +ISR_NO_ERROR 2 +ISR_NO_ERROR 3 +ISR_NO_ERROR 4 +ISR_NO_ERROR 5 +ISR_NO_ERROR 6 +ISR_NO_ERROR 7 +ISR_ERROR 8 +ISR_NO_ERROR 9 +ISR_ERROR 10 +ISR_ERROR 11 +ISR_ERROR 12 +ISR_ERROR 13 +ISR_ERROR 14 +ISR_NO_ERROR 15 +ISR_NO_ERROR 16 +ISR_NO_ERROR 17 +ISR_NO_ERROR 18 +ISR_NO_ERROR 19 +ISR_NO_ERROR 20 +ISR_NO_ERROR 21 +ISR_NO_ERROR 22 +ISR_NO_ERROR 23 +ISR_NO_ERROR 24 +ISR_NO_ERROR 25 +ISR_NO_ERROR 26 +ISR_NO_ERROR 27 +ISR_NO_ERROR 28 +ISR_NO_ERROR 29 +ISR_NO_ERROR 30 +ISR_NO_ERROR 31 +ISR_NO_ERROR 128 + +IRQ 0, 32 +IRQ 1, 33 +IRQ 2, 34 +IRQ 3, 35 +IRQ 4, 36 +IRQ 5, 37 +IRQ 6, 38 +IRQ 7, 39 +IRQ 8, 40 +IRQ 9, 41 +IRQ 10, 42 +IRQ 11, 43 +IRQ 12, 44 +IRQ 13, 45 +IRQ 14, 46 +IRQ 15, 47 + + + .global isr_common_stub + .type isr_common_stub, @function + +isr_common_stub: + PUSH_REGS + + mov %rsp, %rdi + call isr_dispatch + + POP_REGS + add $16, %rsp + iretq + + + .global irq_common_stub + .type irq_common_stub, @function + +irq_common_stub: + PUSH_REGS + + mov %rsp, %rdi + call irq_dispatch + + POP_REGS + add $16, %rsp + iretq + + + .global syscall_gate + .type syscall_gate, @function + + .extern syscall_dispatch + .type syscall_dispatch, @function + +syscall_gate: + swapgs + movq %rsp, %gs:20 # GS+20 = rsp2 in the current TSS block (user stack storage) + movq %gs:4, %rsp # GS+4 = rsp0 in the current TSS block (per-thread kstack) + + # start building a pf_cpu_context + pushq $0x1b + pushq %gs:20 + push %r11 + push $0x23 + push %rcx + + pushq $0 + pushq $0x80 + + PUSH_REGS + + mov %rsp, %rdi + + # switch back to user gs while in syscall_dispatch. Interrupts are enabled in syscall_dispatch, + # and if the task gets pre-empted, the incoming task will expect %gs to have its usermode value. + swapgs + + call syscall_dispatch + + POP_REGS + + add $16, %rsp + pop %rcx + add $8, %rsp + pop %r11 + add $16, %rsp + + swapgs + movq %gs:20, %rsp # GS+20 = rsp2 in the current TSS block + swapgs + + # back to usermode + sysretq + + + .global pf_faultptr + .type pf_faultptr, @function +pf_faultptr: + mov %cr2, %rax + ret + +