Files
mango/vm/page.c

279 lines
5.6 KiB
C
Raw Normal View History

#include <socks/types.h>
#include <socks/memblock.h>
#include <socks/printk.h>
#include <socks/vm.h>
2023-02-03 20:51:23 +00:00
#include <socks/libc/string.h>
/* Pre-calculated page order -> size conversion table */
static size_t page_order_bytes[] = {
[VM_PAGE_4K] = 0x1000,
[VM_PAGE_8K] = 0x2000,
[VM_PAGE_16K] = 0x4000,
[VM_PAGE_32K] = 0x8000,
[VM_PAGE_64K] = 0x10000,
[VM_PAGE_128K] = 0x20000,
[VM_PAGE_256K] = 0x40000,
[VM_PAGE_512K] = 0x80000,
[VM_PAGE_1M] = 0x100000,
[VM_PAGE_2M] = 0x200000,
[VM_PAGE_4M] = 0x400000,
[VM_PAGE_8M] = 0x800000,
[VM_PAGE_16M] = 0x1000000,
[VM_PAGE_32M] = 0x2000000,
[VM_PAGE_64M] = 0x4000000,
[VM_PAGE_128M] = 0x8000000,
/* vm can support pages of this size, but
vm_page_t only has 4 bits with which to store
the page order, which cannot accomodate these
larger order numbers */
[VM_PAGE_256M] = 0x10000000,
[VM_PAGE_512M] = 0x20000000,
[VM_PAGE_1G] = 0x40000000,
[VM_PAGE_2G] = 0x80000000,
[VM_PAGE_4G] = 0x100000000,
[VM_PAGE_8G] = 0x200000000,
[VM_PAGE_16G] = 0x400000000,
[VM_PAGE_32G] = 0x800000000,
[VM_PAGE_64G] = 0x1000000000,
};
phys_addr_t vm_virt_to_phys(void *p)
{
uintptr_t pv = (uintptr_t)p;
if (pv >= memblock.m_alloc_start && pv < memblock.m_alloc_end) {
return memblock_virt_to_phys(p);
}
if (pv >= VM_PAGEMAP_BASE && pv <= VM_PAGEMAP_LIMIT) {
return pv - VM_PAGEMAP_BASE;
}
/* TODO use pmap to find the physical address */
return 0;
}
void *vm_phys_to_virt(phys_addr_t p)
{
if (p >= (memblock.m_alloc_start - memblock.m_voffset) && p < (memblock.m_alloc_end - memblock.m_voffset)) {
return memblock_phys_to_virt(p);
}
return (void *)(VM_PAGEMAP_BASE + p);
}
vm_page_t *vm_page_get(phys_addr_t addr)
{
switch (vm_memory_model()) {
case VM_MODEL_FLAT:
return vm_page_get_flat(addr);
case VM_MODEL_SPARSE:
return vm_page_get_sparse(addr);
default:
return NULL;
}
}
phys_addr_t vm_page_get_paddr(vm_page_t *pg)
{
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
}
void *vm_page_get_vaddr(vm_page_t *pg)
{
return (void *)(vm_phys_to_virt(vm_page_get_pfn(pg) * VM_PAGE_SIZE));
}
size_t vm_page_get_pfn(vm_page_t *pg)
{
switch (vm_memory_model()) {
case VM_MODEL_FLAT:
return vm_page_get_pfn_flat(pg);
case VM_MODEL_SPARSE:
return vm_page_get_pfn_sparse(pg);
default:
return 0;
}
}
size_t vm_page_order_to_bytes(vm_page_order_t order)
{
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
return 0;
}
return page_order_bytes[order];
}
phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
{
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
return 0;
}
return page_order_bytes[order] >> VM_PAGE_SHIFT;
}
vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order)
{
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
return 0;
}
return ~(page_order_bytes[order] - 1);
}
size_t vm_bytes_to_pages(size_t bytes)
{
if (bytes & (VM_PAGE_SIZE-1)) {
bytes &= ~(VM_PAGE_SIZE-1);
bytes += VM_PAGE_SIZE;
}
bytes >>= VM_PAGE_SHIFT;
return bytes;
}
vm_zone_t *vm_page_get_zone(vm_page_t *pg)
{
vm_pg_data_t *node = vm_pg_data_get(pg->p_node);
if (!node) {
return 0;
}
if (pg->p_zone >= VM_MAX_ZONES) {
return NULL;
}
return &node->pg_zones[pg->p_zone];
}
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
{
/* TODO prefer nodes closer to us */
vm_pg_data_t *node = vm_pg_data_get(0);
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
if (flags & VM_GET_DMA) {
zone_id = VM_ZONE_DMA;
}
while (1) {
vm_zone_t *z = &node->pg_zones[zone_id];
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
if (pg) {
return pg;
}
if (zone_id == VM_ZONE_MIN) {
break;
}
zone_id--;
}
return NULL;
}
void vm_page_free(vm_page_t *pg)
{
vm_zone_t *z = vm_page_get_zone(pg);
if (!z) {
return;
}
vm_zone_free_page(z, pg);
}
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
{
if (pg->p_order == VM_PAGE_MIN_ORDER) {
return -1;
}
/* NOTE that we cannot use vm_page_foreach here,
as we are modifying the flags that vm_page_foreach
uses to determine where a given page block ends */
size_t nr_frames = vm_page_order_to_pages(pg->p_order);
for (size_t i = 0; i < nr_frames; i++) {
pg[i].p_order--;
}
vm_page_t *buddy = vm_page_get_buddy(pg);
if (pg->p_order == VM_PAGE_MIN_ORDER) {
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
buddy->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
} else {
pg->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
buddy->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
}
*a = pg;
*b = buddy;
return 0;
}
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
{
if (a->p_order != b->p_order) {
return NULL;
}
if (a->p_order == VM_PAGE_MAX_ORDER) {
return NULL;
}
if (vm_page_get_buddy(a) != b) {
return NULL;
}
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
return NULL;
}
/* make sure that a comes before b */
if (a > b) {
vm_page_t *tmp = a;
a = b;
b = tmp;
}
a->p_order++;
/* NOTE that we cannot use vm_page_foreach here,
as we are modifying the flags that vm_page_foreach
uses to determine where a given page block ends */
size_t nr_frames = vm_page_order_to_pages(a->p_order);
for (size_t i = 0; i < nr_frames; i++) {
a[i].p_flags &= ~VM_PAGE_HEAD;
a[i].p_flags |= VM_PAGE_HUGE;
a[i].p_order = a->p_order;
}
a->p_flags |= VM_PAGE_HEAD;
return a;
}
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
{
phys_addr_t paddr = vm_page_get_paddr(pg);
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
return vm_page_get(paddr);
}
vm_page_t *vm_page_get_next_tail(vm_page_t *pg)
{
vm_page_t *next = pg + 1;
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
return NULL;
}
return next;
}