From 399742cabf9484ce0e0308e588b382f001080798 Mon Sep 17 00:00:00 2001 From: Max Wash Date: Sun, 15 Mar 2026 14:38:32 +0000 Subject: [PATCH] x86_64: pmap: implement pmap_remove --- arch/x86_64/pmap.c | 100 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 99 insertions(+), 1 deletion(-) diff --git a/arch/x86_64/pmap.c b/arch/x86_64/pmap.c index 13b9e14..b6bd2f5 100644 --- a/arch/x86_64/pmap.c +++ b/arch/x86_64/pmap.c @@ -241,6 +241,104 @@ static kern_status_t do_pmap_add( return KERN_OK; } +static kern_status_t do_pmap_remove( + pmap_t pmap, + virt_addr_t pv, + enum page_size size) +{ + unsigned int pml4t_index = BAD_INDEX, pdpt_index = BAD_INDEX, + pd_index = BAD_INDEX, pt_index = BAD_INDEX; + + switch (size) { + case PS_4K: + pml4t_index = (pv >> 39) & 0x1FF; + pdpt_index = (pv >> 30) & 0x1FF; + pd_index = (pv >> 21) & 0x1FF; + pt_index = (pv >> 12) & 0x1FF; + break; + case PS_2M: + pml4t_index = (pv >> 39) & 0x1FF; + pdpt_index = (pv >> 30) & 0x1FF; + pd_index = (pv >> 21) & 0x1FF; + break; + case PS_1G: + if (!can_use_gbpages) { + return KERN_UNSUPPORTED; + } + pml4t_index = (pv >> 39) & 0x1FF; + pdpt_index = (pv >> 30) & 0x1FF; + break; + default: + return KERN_INVALID_ARGUMENT; + } + + /* 1. get PML4T (mandatory) */ + struct pml4t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap)); + if (!pml4t) { + return KERN_OK; + } + + /* 2. traverse PML4T, get PDPT (mandatory) */ + struct pdpt *pdpt = NULL; + if (!pml4t->p_entries[pml4t_index]) { + return KERN_OK; + } else { + pdpt = vm_phys_to_virt( + ENTRY_TO_PTR(pml4t->p_entries[pml4t_index])); + } + + /* if we're mapping a 1GiB page, we stop here */ + if (size == PS_1G) { + if (pdpt->p_entries[pdpt_index] != 0) { + /* this slot points to a pdir, delete it. + if this slot points to a hugepage, this does nothing + */ + delete_pdir(pdpt->p_entries[pdpt_index]); + } + + pdpt->p_pages[pdpt_index] = 0; + + return KERN_OK; + } + + /* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */ + struct pdir *pdir = NULL; + if (!pdpt->p_entries[pdpt_index] + || pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) { + /* entry is null, or points to a hugepage */ + return KERN_OK; + } else { + pdir = vm_phys_to_virt( + ENTRY_TO_PTR(pdpt->p_entries[pdpt_index])); + } + + /* if we're mapping a 2MiB page, we stop here */ + if (size == PS_2M) { + if (pdir->p_entries[pd_index] != 0) { + /* this slot points to a ptab, delete it. + if this slot points to a hugepage, this does nothing + */ + delete_ptab(pdir->p_entries[pd_index]); + } + + pdir->p_pages[pd_index] = 0; + return KERN_OK; + } + + /* 4. traverse PDIR, get PTAB (optional, 4K only) */ + struct ptab *ptab = NULL; + if (!pdir->p_entries[pd_index] + || pdir->p_pages[pd_index] & PTE_PAGESIZE) { + /* entry is null, or points to a hugepage */ + return KERN_OK; + } else { + ptab = vm_phys_to_virt(ENTRY_TO_PTR(pdir->p_entries[pd_index])); + } + + ptab->p_pages[pt_index] = 0; + return KERN_OK; +} + pmap_t get_kernel_pmap(void) { return kernel_pmap; @@ -404,7 +502,7 @@ kern_status_t pmap_add_block( kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p) { - return KERN_OK; + return do_pmap_remove(pmap, p, PS_4K); } kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len)