x86_64: pmap: map all of physical memory starting at VM_PAGEMAP_BASE

This commit is contained in:
2023-02-07 15:38:18 +00:00
parent e939aae775
commit ad63f0b251
2 changed files with 31 additions and 6 deletions

View File

@@ -1,8 +1,16 @@
#ifndef SOCKS_X86_64_VM_H_ #ifndef SOCKS_X86_64_VM_H_
#define SOCKS_X86_64_VM_H_ #define SOCKS_X86_64_VM_H_
/* kernel higher-half base virtual address. */
#define VM_KERNEL_VOFFSET 0xFFFFFFFF80000000 #define VM_KERNEL_VOFFSET 0xFFFFFFFF80000000
/* direct page-mapping region.
NOTE that these are the maximum bounds of this region.
the actual size depends on the amount of physical
memory present. */
#define VM_PAGEMAP_BASE 0xFFFF888000000000
#define VM_PAGEMAP_LIMIT 0xFFFFC87FFFFFFFFF
#define VM_PAGE_SIZE 0x1000 #define VM_PAGE_SIZE 0x1000
#define VM_PAGE_MASK (VM_PAGE_SIZE-1) #define VM_PAGE_MASK (VM_PAGE_SIZE-1)
#define VM_PAGE_SHIFT 12 #define VM_PAGE_SHIFT 12

View File

@@ -94,7 +94,7 @@ static void delete_ptab(phys_addr_t pt)
/* physical address of 0x0, nothing to delete */ /* physical address of 0x0, nothing to delete */
return; return;
} }
ptab_t *ptab = vm_phys_to_virt(pt); ptab_t *ptab = vm_phys_to_virt(pt);
kfree(ptab); kfree(ptab);
} }
@@ -111,13 +111,13 @@ static void delete_pdir(phys_addr_t pd)
/* physical address of 0x0, nothing to delete */ /* physical address of 0x0, nothing to delete */
return; return;
} }
pdir_t *pdir = vm_phys_to_virt(pd); pdir_t *pdir = vm_phys_to_virt(pd);
for (int i = 0; i < 512; i++) { for (int i = 0; i < 512; i++) {
if (pdir->p_pages[i] & PTE_PAGESIZE) { if (pdir->p_pages[i] & PTE_PAGESIZE) {
/* this is a hugepage, there is nothing to delete */ /* this is a hugepage, there is nothing to delete */
continue; continue;
} }
delete_ptab(pdir->p_entries[i]); delete_ptab(pdir->p_entries[i]);
} }
@@ -179,13 +179,13 @@ static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, vm_prot_t prot
if this slot points to a hugepage, this does nothing */ if this slot points to a hugepage, this does nothing */
delete_pdir(pdpt->p_entries[pdpt_index]); delete_pdir(pdpt->p_entries[pdpt_index]);
} }
pdpt->p_pages[pdpt_index] = make_pte(pfn, prot, size); pdpt->p_pages[pdpt_index] = make_pte(pfn, prot, size);
return KERN_OK; return KERN_OK;
} }
/* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */ /* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */
pdir_t *pdir = NULL; pdir_t *pdir = NULL;
if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) { if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) {
@@ -247,6 +247,23 @@ void pmap_bootstrap(void)
hugepage); hugepage);
} }
phys_addr_t pmem_limit = 0x0;
memblock_iter_t it;
for_each_mem_range(&it, 0x00, UINTPTR_MAX) {
if (it.it_limit > pmem_limit) {
pmem_limit = it.it_limit;
}
}
printk("pmap: initialising direct physical memory mappings");
vbase = VM_PAGEMAP_BASE;
for (size_t i = 0; i < pmem_limit; i += hugepage_sz) {
do_pmap_add(kernel_pmap,
(void *)(vbase + i),
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_SVR, hugepage);
}
pmap_switch(kernel_pmap); pmap_switch(kernel_pmap);
printk("pmap: kernel pmap initialised"); printk("pmap: kernel pmap initialised");
} }