Compare commits
3 Commits
0af35c70ef
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| a2e918c428 | |||
| 399742cabf | |||
| cef4af53c9 |
@@ -20,6 +20,8 @@
|
|||||||
#include <kernel/util.h>
|
#include <kernel/util.h>
|
||||||
#include <kernel/vm.h>
|
#include <kernel/vm.h>
|
||||||
|
|
||||||
|
#undef HARDWARE_RNG
|
||||||
|
|
||||||
#define PTR32(x) ((void *)((uintptr_t)(x)))
|
#define PTR32(x) ((void *)((uintptr_t)(x)))
|
||||||
|
|
||||||
/* the physical address of the start of the memblock heap.
|
/* the physical address of the start of the memblock heap.
|
||||||
@@ -125,17 +127,21 @@ int ml_init(uintptr_t arg)
|
|||||||
reserve_end = bsp.mod_base + bsp.mod_size;
|
reserve_end = bsp.mod_base + bsp.mod_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(HARDWARE_RNG)
|
||||||
if (ml_hwrng_available()) {
|
if (ml_hwrng_available()) {
|
||||||
printk("cpu: ardware RNG available");
|
printk("cpu: ardware RNG available");
|
||||||
uint64_t seed = ml_hwrng_generate();
|
uint64_t seed = ml_hwrng_generate();
|
||||||
printk("cpu: RNG seed=%zx", seed);
|
printk("cpu: RNG seed=%zx", seed);
|
||||||
init_random(seed);
|
init_random(seed);
|
||||||
} else {
|
} else {
|
||||||
|
#endif
|
||||||
printk("cpu: hardware RNG unavailable");
|
printk("cpu: hardware RNG unavailable");
|
||||||
uint64_t seed = 0xeddc4c8a679dc23f;
|
uint64_t seed = 0xeddc4c8a679dc23f;
|
||||||
printk("cpu: RNG seed=%zx", seed);
|
printk("cpu: RNG seed=%zx", seed);
|
||||||
init_random(seed);
|
init_random(seed);
|
||||||
|
#if defined(HARDWARE_RNG)
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
early_vm_init(reserve_end);
|
early_vm_init(reserve_end);
|
||||||
|
|
||||||
|
|||||||
@@ -241,6 +241,104 @@ static kern_status_t do_pmap_add(
|
|||||||
return KERN_OK;
|
return KERN_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static kern_status_t do_pmap_remove(
|
||||||
|
pmap_t pmap,
|
||||||
|
virt_addr_t pv,
|
||||||
|
enum page_size size)
|
||||||
|
{
|
||||||
|
unsigned int pml4t_index = BAD_INDEX, pdpt_index = BAD_INDEX,
|
||||||
|
pd_index = BAD_INDEX, pt_index = BAD_INDEX;
|
||||||
|
|
||||||
|
switch (size) {
|
||||||
|
case PS_4K:
|
||||||
|
pml4t_index = (pv >> 39) & 0x1FF;
|
||||||
|
pdpt_index = (pv >> 30) & 0x1FF;
|
||||||
|
pd_index = (pv >> 21) & 0x1FF;
|
||||||
|
pt_index = (pv >> 12) & 0x1FF;
|
||||||
|
break;
|
||||||
|
case PS_2M:
|
||||||
|
pml4t_index = (pv >> 39) & 0x1FF;
|
||||||
|
pdpt_index = (pv >> 30) & 0x1FF;
|
||||||
|
pd_index = (pv >> 21) & 0x1FF;
|
||||||
|
break;
|
||||||
|
case PS_1G:
|
||||||
|
if (!can_use_gbpages) {
|
||||||
|
return KERN_UNSUPPORTED;
|
||||||
|
}
|
||||||
|
pml4t_index = (pv >> 39) & 0x1FF;
|
||||||
|
pdpt_index = (pv >> 30) & 0x1FF;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return KERN_INVALID_ARGUMENT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 1. get PML4T (mandatory) */
|
||||||
|
struct pml4t *pml4t = vm_phys_to_virt(ENTRY_TO_PTR(pmap));
|
||||||
|
if (!pml4t) {
|
||||||
|
return KERN_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 2. traverse PML4T, get PDPT (mandatory) */
|
||||||
|
struct pdpt *pdpt = NULL;
|
||||||
|
if (!pml4t->p_entries[pml4t_index]) {
|
||||||
|
return KERN_OK;
|
||||||
|
} else {
|
||||||
|
pdpt = vm_phys_to_virt(
|
||||||
|
ENTRY_TO_PTR(pml4t->p_entries[pml4t_index]));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if we're mapping a 1GiB page, we stop here */
|
||||||
|
if (size == PS_1G) {
|
||||||
|
if (pdpt->p_entries[pdpt_index] != 0) {
|
||||||
|
/* this slot points to a pdir, delete it.
|
||||||
|
if this slot points to a hugepage, this does nothing
|
||||||
|
*/
|
||||||
|
delete_pdir(pdpt->p_entries[pdpt_index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
pdpt->p_pages[pdpt_index] = 0;
|
||||||
|
|
||||||
|
return KERN_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 3. traverse PDPT, get PDIR (optional, 4K and 2M only) */
|
||||||
|
struct pdir *pdir = NULL;
|
||||||
|
if (!pdpt->p_entries[pdpt_index]
|
||||||
|
|| pdpt->p_pages[pdpt_index] & PTE_PAGESIZE) {
|
||||||
|
/* entry is null, or points to a hugepage */
|
||||||
|
return KERN_OK;
|
||||||
|
} else {
|
||||||
|
pdir = vm_phys_to_virt(
|
||||||
|
ENTRY_TO_PTR(pdpt->p_entries[pdpt_index]));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if we're mapping a 2MiB page, we stop here */
|
||||||
|
if (size == PS_2M) {
|
||||||
|
if (pdir->p_entries[pd_index] != 0) {
|
||||||
|
/* this slot points to a ptab, delete it.
|
||||||
|
if this slot points to a hugepage, this does nothing
|
||||||
|
*/
|
||||||
|
delete_ptab(pdir->p_entries[pd_index]);
|
||||||
|
}
|
||||||
|
|
||||||
|
pdir->p_pages[pd_index] = 0;
|
||||||
|
return KERN_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 4. traverse PDIR, get PTAB (optional, 4K only) */
|
||||||
|
struct ptab *ptab = NULL;
|
||||||
|
if (!pdir->p_entries[pd_index]
|
||||||
|
|| pdir->p_pages[pd_index] & PTE_PAGESIZE) {
|
||||||
|
/* entry is null, or points to a hugepage */
|
||||||
|
return KERN_OK;
|
||||||
|
} else {
|
||||||
|
ptab = vm_phys_to_virt(ENTRY_TO_PTR(pdir->p_entries[pd_index]));
|
||||||
|
}
|
||||||
|
|
||||||
|
ptab->p_pages[pt_index] = 0;
|
||||||
|
return KERN_OK;
|
||||||
|
}
|
||||||
|
|
||||||
pmap_t get_kernel_pmap(void)
|
pmap_t get_kernel_pmap(void)
|
||||||
{
|
{
|
||||||
return kernel_pmap;
|
return kernel_pmap;
|
||||||
@@ -404,7 +502,7 @@ kern_status_t pmap_add_block(
|
|||||||
|
|
||||||
kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p)
|
kern_status_t pmap_remove(pmap_t pmap, virt_addr_t p)
|
||||||
{
|
{
|
||||||
return KERN_OK;
|
return do_pmap_remove(pmap, p, PS_4K);
|
||||||
}
|
}
|
||||||
|
|
||||||
kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len)
|
kern_status_t pmap_remove_range(pmap_t pmap, virt_addr_t p, size_t len)
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ struct vm_area {
|
|||||||
* will avoid the area, but fixed address mappings in this area
|
* will avoid the area, but fixed address mappings in this area
|
||||||
* will succeed. */
|
* will succeed. */
|
||||||
struct vm_object *vma_object;
|
struct vm_object *vma_object;
|
||||||
|
/* the address space that this vm_area is a part of */
|
||||||
|
struct address_space *vma_space;
|
||||||
/* used to link to vm_object->vo_mappings */
|
/* used to link to vm_object->vo_mappings */
|
||||||
struct queue_entry vma_object_entry;
|
struct queue_entry vma_object_entry;
|
||||||
/* the memory protection flags applied to this area */
|
/* the memory protection flags applied to this area */
|
||||||
|
|||||||
@@ -659,6 +659,7 @@ kern_status_t address_space_map(
|
|||||||
}
|
}
|
||||||
|
|
||||||
object_ref(&object->vo_base);
|
object_ref(&object->vo_base);
|
||||||
|
area->vma_space = root;
|
||||||
area->vma_object = object;
|
area->vma_object = object;
|
||||||
area->vma_prot = prot;
|
area->vma_prot = prot;
|
||||||
area->vma_object_offset = object_offset;
|
area->vma_object_offset = object_offset;
|
||||||
@@ -726,6 +727,7 @@ static kern_status_t split_area(
|
|||||||
left->vma_base = left_base;
|
left->vma_base = left_base;
|
||||||
left->vma_limit = left_base + left_length - 1;
|
left->vma_limit = left_base + left_length - 1;
|
||||||
|
|
||||||
|
right->vma_space = left->vma_space;
|
||||||
right->vma_object = left->vma_object;
|
right->vma_object = left->vma_object;
|
||||||
right->vma_prot = left->vma_prot;
|
right->vma_prot = left->vma_prot;
|
||||||
right->vma_object_offset = right_object_offset;
|
right->vma_object_offset = right_object_offset;
|
||||||
@@ -983,6 +985,7 @@ kern_status_t address_space_reserve(
|
|||||||
return KERN_NO_MEMORY;
|
return KERN_NO_MEMORY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
area->vma_space = space;
|
||||||
area->vma_base = base;
|
area->vma_base = base;
|
||||||
area->vma_limit = base + length - 1;
|
area->vma_limit = base + length - 1;
|
||||||
|
|
||||||
@@ -1142,6 +1145,7 @@ static kern_status_t request_missing_page(
|
|||||||
irq_flags);
|
irq_flags);
|
||||||
if (!pg) {
|
if (!pg) {
|
||||||
vm_object_unlock_irqrestore(object, *irq_flags);
|
vm_object_unlock_irqrestore(object, *irq_flags);
|
||||||
|
printk("page request for %zx failed", addr);
|
||||||
return KERN_FATAL_ERROR;
|
return KERN_FATAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1203,8 +1207,12 @@ kern_status_t address_space_demand_map(
|
|||||||
object_offset,
|
object_offset,
|
||||||
VMO_ALLOCATE_MISSING_PAGE,
|
VMO_ALLOCATE_MISSING_PAGE,
|
||||||
NULL);
|
NULL);
|
||||||
// tracek("vm: mapping %07llx -> %10llx", vm_page_get_paddr(pg),
|
// tracek("vm: mapping %07llx -> %10llx", vm_page_get_paddr(pg), addr);
|
||||||
// addr);
|
|
||||||
|
if (!pg) {
|
||||||
|
return KERN_FATAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
kern_status_t status = pmap_add(
|
kern_status_t status = pmap_add(
|
||||||
region->s_pmap,
|
region->s_pmap,
|
||||||
addr,
|
addr,
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#include <kernel/printk.h>
|
#include <kernel/address-space.h>
|
||||||
#include <kernel/sched.h>
|
#include <kernel/sched.h>
|
||||||
#include <kernel/util.h>
|
#include <kernel/util.h>
|
||||||
#include <kernel/vm-controller.h>
|
#include <kernel/vm-controller.h>
|
||||||
@@ -236,7 +236,6 @@ extern struct vm_object *vm_object_create_in_place(
|
|||||||
i += VM_PAGE_SIZE, offset += VM_PAGE_SIZE) {
|
i += VM_PAGE_SIZE, offset += VM_PAGE_SIZE) {
|
||||||
struct vm_page *pg = vm_page_get(i);
|
struct vm_page *pg = vm_page_get(i);
|
||||||
if (!pg) {
|
if (!pg) {
|
||||||
printk("vm-object: invalid physical address %08llx", i);
|
|
||||||
object_unref(&vmo->vo_base);
|
object_unref(&vmo->vo_base);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -871,8 +870,35 @@ kern_status_t vm_object_transfer(
|
|||||||
moved += VM_PAGE_SIZE;
|
moved += VM_PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* TODO evict all page table entries that reference the transferred
|
struct queue_entry *cur = queue_first(&src->vo_mappings);
|
||||||
* pages in `src` */
|
off_t src_limit = src_offset + count - 1;
|
||||||
|
while (cur) {
|
||||||
|
struct vm_area *area = QUEUE_CONTAINER(
|
||||||
|
struct vm_area,
|
||||||
|
vma_object_entry,
|
||||||
|
cur);
|
||||||
|
off_t area_offset = area->vma_object_offset;
|
||||||
|
off_t area_limit
|
||||||
|
= area_offset + (area->vma_limit - area->vma_base);
|
||||||
|
|
||||||
|
if (src_offset > area_limit || src_limit < area_offset) {
|
||||||
|
cur = queue_next(cur);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
off_t unmap_offset = MAX(area_offset, src_offset);
|
||||||
|
off_t unmap_limit = MIN(area_limit, src_limit);
|
||||||
|
|
||||||
|
virt_addr_t base
|
||||||
|
= area->vma_base + (unmap_offset - area_offset);
|
||||||
|
virt_addr_t limit = base + (unmap_limit - unmap_offset);
|
||||||
|
|
||||||
|
for (virt_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
||||||
|
pmap_remove(area->vma_space->s_pmap, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
cur = queue_next(cur);
|
||||||
|
}
|
||||||
|
|
||||||
if (nr_moved) {
|
if (nr_moved) {
|
||||||
*nr_moved = moved;
|
*nr_moved = moved;
|
||||||
|
|||||||
Reference in New Issue
Block a user