diff --git a/arch/x86_64/include/socks/machine/vm.h b/arch/x86_64/include/socks/machine/vm.h index aa52ae6..11b3418 100644 --- a/arch/x86_64/include/socks/machine/vm.h +++ b/arch/x86_64/include/socks/machine/vm.h @@ -1,8 +1,16 @@ #ifndef SOCKS_X86_64_VM_H_ #define SOCKS_X86_64_VM_H_ +/* kernel higher-half base virtual address. */ #define VM_KERNEL_VOFFSET 0xFFFFFFFF80000000 +/* direct page-mapping region. + NOTE that these are the maximum bounds of this region. + the actual size depends on the amount of physical + memory present. */ +#define VM_PAGEMAP_BASE 0xFFFF888000000000 +#define VM_PAGEMAP_LIMIT 0xFFFFC87FFFFFFFFF + #define VM_PAGE_SIZE 0x1000 #define VM_PAGE_MASK (VM_PAGE_SIZE-1) #define VM_PAGE_SHIFT 12 diff --git a/arch/x86_64/pmap.c b/arch/x86_64/pmap.c index 6f2586b..6f93c48 100644 --- a/arch/x86_64/pmap.c +++ b/arch/x86_64/pmap.c @@ -94,7 +94,7 @@ static void delete_ptab(phys_addr_t pt) /* physical address of 0x0, nothing to delete */ return; } - + ptab_t *ptab = vm_phys_to_virt(pt); kfree(ptab); } @@ -111,13 +111,13 @@ static void delete_pdir(phys_addr_t pd) /* physical address of 0x0, nothing to delete */ return; } - + pdir_t *pdir = vm_phys_to_virt(pd); for (int i = 0; i < 512; i++) { if (pdir->p_pages[i] & PTE_PAGESIZE) { /* this is a hugepage, there is nothing to delete */ continue; - } + } delete_ptab(pdir->p_entries[i]); } @@ -179,13 +179,13 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot if this slot points to a hugepage, this does nothing */ delete_pdir(pdpt->p_entries[pdpt_index]); } - + pdpt->p_pages[pdpt_index] = make_pte(pfn, prot, size); return KERN_OK; } - - + + /* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */ pdir_t *pdir = NULL; if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) { @@ -247,6 +247,23 @@ void pmap_bootstrap(void) hugepage); } + phys_addr_t pmem_limit = 0x0; + memblock_iter_t it; + for_each_mem_range(&it, 0x00, UINTPTR_MAX) { + if (it.it_limit > pmem_limit) { + pmem_limit = it.it_limit; + } + } + + printk("pmap: initialising direct physical memory mappings"); + vbase = VM_PAGEMAP_BASE; + for (size_t i = 0; i < pmem_limit; i += hugepage_sz) { + do_pmap_add(kernel_pmap, + (void *)(vbase + i), + PFN(i), + VM_PROT_READ | VM_PROT_WRITE | VM_PROT_SVR, hugepage); + } + pmap_switch(kernel_pmap); printk("pmap: kernel pmap initialised"); }