287 lines
6.0 KiB
C
287 lines
6.0 KiB
C
#include <socks/types.h>
|
|
#include <socks/memblock.h>
|
|
#include <socks/vm.h>
|
|
#include <string.h>
|
|
#include <stdio.h>
|
|
|
|
/* array of pages, one for each physical page frame present in RAM */
|
|
static vm_page_t *page_array = NULL;
|
|
|
|
/* number of pages stored in page_array */
|
|
static size_t page_array_count = 0;
|
|
|
|
/* Pre-calculated page order -> size conversion table */
|
|
static size_t page_order_bytes[] = {
|
|
[VM_PAGE_4K] = 0x1000,
|
|
[VM_PAGE_8K] = 0x2000,
|
|
[VM_PAGE_16K] = 0x4000,
|
|
[VM_PAGE_32K] = 0x8000,
|
|
[VM_PAGE_64K] = 0x10000,
|
|
[VM_PAGE_128K] = 0x20000,
|
|
[VM_PAGE_256K] = 0x40000,
|
|
[VM_PAGE_512K] = 0x80000,
|
|
[VM_PAGE_1M] = 0x100000,
|
|
[VM_PAGE_2M] = 0x200000,
|
|
[VM_PAGE_4M] = 0x400000,
|
|
[VM_PAGE_8M] = 0x800000,
|
|
[VM_PAGE_16M] = 0x1000000,
|
|
[VM_PAGE_32M] = 0x2000000,
|
|
[VM_PAGE_64M] = 0x4000000,
|
|
[VM_PAGE_128M] = 0x8000000,
|
|
#if 0
|
|
/* vm can support pages of this size, but
|
|
vm_page_t only has 4 bits with which to store
|
|
the page order, which cannot accomodate these
|
|
larger order numbers */
|
|
[VM_PAGE_256M] = 0x10000000,
|
|
[VM_PAGE_512M] = 0x20000000,
|
|
[VM_PAGE_1G] = 0x40000000,
|
|
#endif
|
|
};
|
|
|
|
/* temporary */
|
|
static void *tmp_vaddr_base = NULL;
|
|
void tmp_set_vaddr_base(void *p)
|
|
{
|
|
tmp_vaddr_base = p;
|
|
}
|
|
|
|
void vm_page_init_array()
|
|
{
|
|
size_t pmem_size = 0;
|
|
|
|
memblock_iter_t it;
|
|
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
|
|
if (pmem_size < it.it_limit + 1) {
|
|
pmem_size = it.it_limit + 1;
|
|
}
|
|
}
|
|
|
|
size_t nr_pages = pmem_size / VM_PAGE_SIZE;
|
|
if (pmem_size % VM_PAGE_SIZE) {
|
|
nr_pages++;
|
|
}
|
|
|
|
page_array = memblock_alloc(sizeof(vm_page_t) * nr_pages);
|
|
page_array_count = nr_pages;
|
|
printf("page_array covers 0x%zx bytes, %zu page frames\n", pmem_size, pmem_size / VM_PAGE_SIZE);
|
|
printf("page_array is %zu bytes long\n", sizeof(vm_page_t) * nr_pages);
|
|
|
|
for (size_t i = 0; i < nr_pages; i++) {
|
|
memset(&page_array[i], 0x0, sizeof page_array[i]);
|
|
}
|
|
|
|
size_t nr_reserved = 0;
|
|
for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX) {
|
|
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
|
size_t pfn = i / VM_PAGE_SIZE;
|
|
|
|
page_array[pfn].p_flags |= VM_PAGE_RESERVED;
|
|
nr_reserved++;
|
|
}
|
|
}
|
|
|
|
printf("%zu reserved page frames\n", nr_reserved);
|
|
}
|
|
|
|
vm_page_t *vm_page_get(phys_addr_t addr)
|
|
{
|
|
size_t pfn = addr / VM_PAGE_SIZE;
|
|
return pfn < page_array_count ? &page_array[pfn] : NULL;
|
|
}
|
|
|
|
phys_addr_t vm_page_get_paddr(vm_page_t *pg)
|
|
{
|
|
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
|
|
}
|
|
|
|
void *vm_page_get_vaddr(vm_page_t *pg)
|
|
{
|
|
return (void *)((char *)tmp_vaddr_base + (vm_page_get_pfn(pg) * VM_PAGE_SIZE));
|
|
}
|
|
|
|
size_t vm_page_get_pfn(vm_page_t *pg)
|
|
{
|
|
return ((uintptr_t)pg - (uintptr_t)page_array) / sizeof *pg;
|
|
}
|
|
|
|
size_t vm_page_order_to_bytes(vm_page_order_t order)
|
|
{
|
|
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
|
return 0;
|
|
}
|
|
|
|
return page_order_bytes[order];
|
|
}
|
|
|
|
phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
|
|
{
|
|
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
|
return 0;
|
|
}
|
|
|
|
return page_order_bytes[order] >> VM_PAGE_SHIFT;
|
|
}
|
|
|
|
vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order)
|
|
{
|
|
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
|
return 0;
|
|
}
|
|
|
|
return ~(page_order_bytes[order] - 1);
|
|
}
|
|
|
|
|
|
size_t vm_bytes_to_pages(size_t bytes)
|
|
{
|
|
if (bytes & (VM_PAGE_SIZE-1)) {
|
|
bytes &= ~(VM_PAGE_SIZE-1);
|
|
bytes += VM_PAGE_SIZE;
|
|
}
|
|
|
|
bytes >>= VM_PAGE_SHIFT;
|
|
return bytes;
|
|
}
|
|
|
|
vm_zone_t *vm_page_get_zone(vm_page_t *pg)
|
|
{
|
|
vm_pg_data_t *node = vm_pg_data_get(pg->p_node);
|
|
if (!node) {
|
|
return 0;
|
|
}
|
|
|
|
if (pg->p_zone >= VM_MAX_ZONES) {
|
|
return NULL;
|
|
}
|
|
|
|
return &node->pg_zones[pg->p_zone];
|
|
}
|
|
|
|
|
|
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
|
|
{
|
|
/* TODO prefer nodes closer to us */
|
|
vm_pg_data_t *node = vm_pg_data_get(0);
|
|
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
|
|
if (flags & VM_GET_DMA) {
|
|
zone_id = VM_ZONE_DMA;
|
|
}
|
|
|
|
while (1) {
|
|
vm_zone_t *z = &node->pg_zones[zone_id];
|
|
|
|
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
|
|
if (pg) {
|
|
return pg;
|
|
}
|
|
|
|
if (zone_id == VM_ZONE_MIN) {
|
|
break;
|
|
}
|
|
|
|
zone_id--;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void vm_page_free(vm_page_t *pg)
|
|
{
|
|
vm_zone_t *z = vm_page_get_zone(pg);
|
|
if (!z) {
|
|
return;
|
|
}
|
|
|
|
vm_zone_free_page(z, pg);
|
|
}
|
|
|
|
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
|
{
|
|
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
|
return -1;
|
|
}
|
|
|
|
/* NOTE that we cannot use vm_page_foreach here,
|
|
as we are modifying the flags that vm_page_foreach
|
|
uses to determine where a given page block ends */
|
|
size_t nr_frames = vm_page_order_to_pages(pg->p_order);
|
|
for (size_t i = 0; i < nr_frames; i++) {
|
|
pg[i].p_order--;
|
|
}
|
|
|
|
vm_page_t *buddy = vm_page_get_buddy(pg);
|
|
|
|
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
|
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
|
buddy->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
|
} else {
|
|
pg->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
|
buddy->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
|
}
|
|
|
|
*a = pg;
|
|
*b = buddy;
|
|
|
|
return 0;
|
|
}
|
|
|
|
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
|
{
|
|
if (a->p_order != b->p_order) {
|
|
return NULL;
|
|
}
|
|
|
|
if (a->p_order == VM_PAGE_MAX_ORDER) {
|
|
return NULL;
|
|
}
|
|
|
|
if (vm_page_get_buddy(a) != b) {
|
|
return NULL;
|
|
}
|
|
|
|
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
|
|
return NULL;
|
|
}
|
|
|
|
/* make sure that a comes before b */
|
|
if (a > b) {
|
|
vm_page_t *tmp = a;
|
|
a = b;
|
|
b = tmp;
|
|
}
|
|
|
|
a->p_order++;
|
|
|
|
/* NOTE that we cannot use vm_page_foreach here,
|
|
as we are modifying the flags that vm_page_foreach
|
|
uses to determine where a given page block ends */
|
|
size_t nr_frames = vm_page_order_to_pages(a->p_order);
|
|
for (size_t i = 0; i < nr_frames; i++) {
|
|
a[i].p_flags &= ~VM_PAGE_HEAD;
|
|
a[i].p_flags |= VM_PAGE_HUGE;
|
|
a[i].p_order = a->p_order;
|
|
}
|
|
|
|
a->p_flags |= VM_PAGE_HEAD;
|
|
|
|
return a;
|
|
}
|
|
|
|
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
|
|
{
|
|
phys_addr_t paddr = vm_page_get_paddr(pg);
|
|
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
|
|
return vm_page_get(paddr);
|
|
}
|
|
|
|
vm_page_t *vm_page_get_next_tail(vm_page_t *pg)
|
|
{
|
|
vm_page_t *next = pg + 1;
|
|
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
|
|
return NULL;
|
|
}
|
|
|
|
return next;
|
|
}
|