Files
mango/arch/x86_64/pmap.c

310 lines
6.8 KiB
C
Raw Normal View History

2023-02-07 12:10:12 +00:00
#include <socks/types.h>
#include <socks/memblock.h>
#include <socks/vm.h>
#include <socks/printk.h>
#include <socks/status.h>
#include <socks/compiler.h>
#include <socks/pmap.h>
/* some helpful datasize constants */
#define C_1GiB 0x40000000ULL
#define C_2GiB (2 * C_1GiB)
#define BAD_INDEX ((unsigned int)-1)
#define PTR_TO_ENTRY(x) (((x) & ~VM_PAGE_MASK) | PTE_PRESENT | PTE_RW)
#define ENTRY_TO_PTR(x) ((x) & ~VM_PAGE_MASK)
#define PFN(x) ((x) >> VM_PAGE_SHIFT)
static int can_use_gbpages = 0;
static pmap_t kernel_pmap;
static size_t ps_size(enum page_size ps)
{
switch (ps) {
case PS_4K:
return 0x1000;
case PS_2M:
return 0x200000;
case PS_1G:
return 0x40000000;
default:
return 0;
}
}
2023-02-07 12:10:12 +00:00
static pmap_t alloc_pmap()
{
struct pml4t *p = kzalloc(sizeof *p, 0);
return vm_virt_to_phys(p);
}
static pte_t make_pte(pfn_t pfn, enum vm_prot prot, enum page_size size)
{
pte_t v = pfn;
switch (size) {
case PS_1G:
/* pfn_t is in terms of 4KiB pages, convert to 1GiB page frame number */
pfn >>= 18;
v = (pfn & 0x3FFFFF) << 30;
break;
case PS_2M:
/* pfn_t is in terms of 4KiB pages, convert to 2MiB page frame number */
pfn >>= 9;
v = (pfn & 0x7FFFFFFF) << 21;
break;
case PS_4K:
v = (pfn & 0xFFFFFFFFFF) << 12;
break;
default:
return 0;
}
v |= PTE_PRESENT;
if (size != PS_4K) {
v |= PTE_PAGESIZE;
}
if (prot & VM_PROT_WRITE) {
v |= PTE_RW;
}
if (!(prot & VM_PROT_EXEC)) {
v |= PTE_NX;
}
if ((prot & VM_PROT_USER) && !(prot & VM_PROT_SVR)) {
v |= PTE_USR;
}
return v;
}
static void delete_ptab(phys_addr_t pt)
{
if (pt & PTE_PAGESIZE) {
/* this entry points to a hugepage, nothing to delete */
return;
}
pt &= ~VM_PAGE_MASK;
if (!pt) {
/* physical address of 0x0, nothing to delete */
return;
}
struct ptab *ptab = vm_phys_to_virt(pt);
kfree(ptab);
}
static void delete_pdir(phys_addr_t pd)
{
if (pd & PTE_PAGESIZE) {
/* this entry points to a hugepage, nothing to delete */
return;
}
pd &= ~0x1FFFFFULL;
if (!pd) {
/* physical address of 0x0, nothing to delete */
return;
}
struct pdir *pdir = vm_phys_to_virt(pd);
for (int i = 0; i < 512; i++) {
if (pdir->p_pages[i] & PTE_PAGESIZE) {
/* this is a hugepage, there is nothing to delete */
continue;
}
delete_ptab(pdir->p_entries[i]);
}
kfree(pdir);
}
static kern_status_t do_pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum page_size size)
{
uintptr_t pv = (uintptr_t)p;
unsigned int
pml4t_index = BAD_INDEX,
pdpt_index = BAD_INDEX,
pd_index = BAD_INDEX,
pt_index = BAD_INDEX;
switch (size) {
case PS_4K:
pml4t_index = (pv >> 39) & 0x1FF;
pdpt_index = (pv >> 30) & 0x1FF;
pd_index = (pv >> 21) & 0x1FF;
pt_index = (pv >> 12) & 0x1FF;
break;
case PS_2M:
pml4t_index = (pv >> 39) & 0x1FF;
pdpt_index = (pv >> 30) & 0x1FF;
pd_index = (pv >> 21) & 0x1FF;
break;
case PS_1G:
if (!can_use_gbpages) {
return KERN_UNSUPPORTED;
}
pml4t_index = (pv >> 39) & 0x1FF;
pdpt_index = (pv >> 30) & 0x1FF;
break;
default:
return KERN_INVALID_ARGUMENT;
}
/* 1. get PML4T (mandatory) */
struct pml4t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap));
if (!pml4t) {
return KERN_INVALID_ARGUMENT;
}
/* 2. traverse PML4T, get PDPT (mandatory) */
struct pdpt *pdpt = NULL;
if (!pml4t->p_entries[pml4t_index]) {
pdpt = kzalloc(sizeof *pdpt, 0);
pml4t->p_entries[pml4t_index] = PTR_TO_ENTRY(vm_virt_to_phys(pdpt));
} else {
pdpt = vm_phys_to_virt(ENTRY_TO_PTR(pml4t->p_entries[pml4t_index]));
}
/* if we're mapping a 1GiB page, we stop here */
if (size == PS_1G) {
if (pdpt->p_entries[pdpt_index] != 0) {
2023-02-07 12:10:12 +00:00
/* this slot points to a pdir, delete it.
if this slot points to a hugepage, this does nothing */
delete_pdir(pdpt->p_entries[pdpt_index]);
}
pdpt->p_pages[pdpt_index] = make_pte(pfn, prot, size);
return KERN_OK;
}
/* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */
struct pdir *pdir = NULL;
if (!pdpt->p_entries[pdpt_index] || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) {
/* entry is null, or points to a hugepage */
pdir = kzalloc(sizeof *pdir, 0);
pdpt->p_entries[pdpt_index] = PTR_TO_ENTRY(vm_virt_to_phys(pdir));
} else {
pdir = vm_phys_to_virt(ENTRY_TO_PTR(pdpt->p_entries[pdpt_index]));
}
2023-02-07 12:10:12 +00:00
/* if we're mapping a 2MiB page, we stop here */
if (size == PS_2M) {
if (pdir->p_entries[pd_index] != 0) {
/* this slot points to a ptab, delete it.
if this slot points to a hugepage, this does nothing */
delete_ptab(pdir->p_entries[pd_index]);
}
pdir->p_pages[pd_index] = make_pte(pfn, prot, size);
return KERN_OK;
}
/* 4. traverse PDIR, get PTAB (optional, 4K only) */
struct ptab *ptab = NULL;
if (!pdir->p_entries[pd_index] || pdir->p_pages[pd_index] & PTE_PAGESIZE) {
/* entry is null, or points to a hugepage */
ptab = kzalloc(sizeof *ptab, 0);
pdir->p_entries[pd_index] = PTR_TO_ENTRY(vm_virt_to_phys(ptab));
} else {
ptab = vm_phys_to_virt(ENTRY_TO_PTR(pdir->p_entries[pd_index]));
}
ptab->p_pages[pt_index] = make_pte(pfn, prot, size);
return KERN_OK;
}
2023-03-09 19:50:22 +00:00
pmap_t get_kernel_pmap(void)
{
return kernel_pmap;
}
void pmap_bootstrap(void)
{
can_use_gbpages = gigabyte_pages();
printk("pmap: gigabyte pages %sabled", can_use_gbpages == 1 ? "en" : "dis");
enable_nx();
printk("pmap: NX protection enabled");
enum page_size hugepage = PS_2M;
if (can_use_gbpages) {
hugepage = PS_1G;
}
size_t hugepage_sz = ps_size(hugepage);
2023-02-07 12:10:12 +00:00
kernel_pmap = alloc_pmap();
2023-02-07 12:10:12 +00:00
/* map 2GiB at the end of the address space to
replace the mapping created by start_32 and allow access to
the kernel and memblock-allocated data. */
uintptr_t vbase = VM_KERNEL_VOFFSET;
for (size_t i = 0; i < C_2GiB; i += hugepage_sz) {
do_pmap_add(kernel_pmap,
(void *)(vbase + i),
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXEC | VM_PROT_SVR,
hugepage);
}
phys_addr_t pmem_limit = 0x0;
struct memblock_iter it;
for_each_mem_range(&it, 0x00, UINTPTR_MAX) {
if (it.it_limit > pmem_limit) {
pmem_limit = it.it_limit;
}
}
vbase = VM_PAGEMAP_BASE;
for (size_t i = 0; i < pmem_limit; i += hugepage_sz) {
do_pmap_add(kernel_pmap,
(void *)(vbase + i),
PFN(i),
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_SVR, hugepage);
}
pmap_switch(kernel_pmap);
}
pmap_t pmap_create(void)
{
return 0;
}
void pmap_destroy(pmap_t pmap)
{
}
kern_status_t pmap_add(pmap_t pmap, void *p, pfn_t pfn, enum vm_prot prot, enum pmap_flags flags)
{
2023-05-01 18:12:07 +01:00
enum page_size ps = PS_4K;
if (flags & PMAP_HUGEPAGE) {
ps = PS_2M;
}
return do_pmap_add(pmap, p, pfn, prot, ps);
}
kern_status_t pmap_add_block(pmap_t pmap, void *p, pfn_t pfn, size_t len, enum vm_prot prot, enum pmap_flags flags)
{
return KERN_OK;
}
kern_status_t pmap_remove(pmap_t pmap, void *p)
{
return KERN_OK;
}
kern_status_t pmap_remove_range(pmap_t pmap, void *p, size_t len)
{
return KERN_OK;
}