sandbox: vm: add page splitting, merging, and allocation
This commit is contained in:
@@ -174,6 +174,24 @@ int memory_test(void)
|
||||
printf("all pages:\n");
|
||||
print_all_pages();
|
||||
|
||||
vm_page_t *pg = vm_page_alloc(VM_PAGE_128K, 0);
|
||||
printf("allocated 128K at 0x%lx\n", vm_page_get_paddr(pg));
|
||||
|
||||
vm_page_t *a, *b;
|
||||
if (vm_page_split(pg, &a, &b) == 0) {
|
||||
printf("split page into two 64K pages at 0x%lx and 0x%lx:\n", vm_page_get_paddr(a), vm_page_get_paddr(b));
|
||||
|
||||
assert(a->p_flags & VM_PAGE_HEAD);
|
||||
assert(b->p_flags & VM_PAGE_HEAD);
|
||||
|
||||
size_t nr_frames = vm_page_order_to_pages(VM_PAGE_128K);
|
||||
for (size_t i = 0; i < nr_frames; i++) {
|
||||
printf(" 0x%lx: order:%u, flags:0x%x\n", vm_page_get_paddr(a + i), a[i].p_order, a[i].p_flags);
|
||||
assert(a[i].p_flags & VM_PAGE_HUGE);
|
||||
assert((a[i].p_flags & VM_PAGE_RESERVED) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
munmap(system_memory, MB_TO_BYTES(MEMORY_SIZE_MB));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@
|
||||
#define VM_PAGE_SIZE 0x1000
|
||||
#define VM_PAGE_SHIFT 12
|
||||
|
||||
#define VM_PAGE_IS_RESERVED(pg) ((pg)->p_flags & VM_PAGE_RESERVED)
|
||||
|
||||
typedef phys_addr_t vm_alignment_t;
|
||||
typedef unsigned int vm_node_id_t;
|
||||
|
||||
@@ -133,9 +135,14 @@ extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order);
|
||||
extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_page_free(vm_page_t *pg);
|
||||
|
||||
extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b);
|
||||
extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b);
|
||||
extern vm_page_t *vm_page_get_buddy(vm_page_t *pg);
|
||||
|
||||
extern size_t vm_bytes_to_pages(size_t bytes);
|
||||
|
||||
extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
|
||||
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
|
||||
|
||||
#endif
|
||||
|
||||
@@ -145,3 +145,118 @@ vm_zone_t *vm_page_get_zone(vm_page_t *pg)
|
||||
|
||||
return &node->pg_zones[pg->p_zone];
|
||||
}
|
||||
|
||||
|
||||
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
|
||||
{
|
||||
/* TODO prefer nodes closer to us */
|
||||
vm_pg_data_t *node = vm_pg_data_get(0);
|
||||
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
|
||||
if (flags & VM_GET_DMA) {
|
||||
zone_id = VM_ZONE_DMA;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
vm_zone_t *z = &node->pg_zones[zone_id];
|
||||
|
||||
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
|
||||
if (pg) {
|
||||
return pg;
|
||||
}
|
||||
|
||||
if (zone_id == VM_ZONE_MIN) {
|
||||
break;
|
||||
}
|
||||
|
||||
zone_id--;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vm_page_free(vm_page_t *pg)
|
||||
{
|
||||
vm_zone_t *z = vm_page_get_zone(pg);
|
||||
if (!z) {
|
||||
return;
|
||||
}
|
||||
|
||||
vm_zone_free_page(z, pg);
|
||||
}
|
||||
|
||||
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
||||
{
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
size_t nr_frames = vm_page_order_to_pages(pg->p_order);
|
||||
for (size_t i = 0; i < nr_frames; i++) {
|
||||
pg[i].p_order = pg->p_order - 1;
|
||||
}
|
||||
|
||||
vm_page_t *buddy = vm_page_get_buddy(pg);
|
||||
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
||||
buddy->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
||||
} else {
|
||||
pg->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
||||
buddy->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
||||
}
|
||||
|
||||
*a = pg;
|
||||
*b = buddy;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
||||
{
|
||||
if (a->p_order != b->p_order) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (a->p_order == VM_PAGE_MAX_ORDER) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (vm_page_get_buddy(a) != b) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (VM_PAGE_IS_RESERVED(a) && !VM_PAGE_IS_RESERVED(b)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!VM_PAGE_IS_RESERVED(a) && VM_PAGE_IS_RESERVED(b)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* make sure that a comes before b */
|
||||
if (a > b) {
|
||||
vm_page_t *tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
a->p_order++;
|
||||
|
||||
size_t nr_frames = vm_page_order_to_pages(a->p_order);
|
||||
for (size_t i = 0; i < nr_frames; i++) {
|
||||
a[i].p_flags &= ~VM_PAGE_HEAD;
|
||||
a[i].p_flags |= VM_PAGE_HUGE;
|
||||
a[i].p_order = a->p_order;
|
||||
}
|
||||
|
||||
a->p_flags |= VM_PAGE_HEAD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
|
||||
{
|
||||
phys_addr_t paddr = vm_page_get_paddr(pg);
|
||||
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
|
||||
return vm_page_get(paddr);
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
#include "socks/queue.h"
|
||||
#include <socks/types.h>
|
||||
#include <socks/vm.h>
|
||||
#include <string.h>
|
||||
@@ -126,3 +127,80 @@ void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved);
|
||||
}
|
||||
}
|
||||
|
||||
static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
{
|
||||
if (queue_length(&z->z_free_pages[order]) != 0) {
|
||||
/* we already have pages available. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (order == VM_PAGE_MAX_ORDER) {
|
||||
/* there are no larger pages to split, so just give up. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* the lowest page order that is >= `order` and still has pages available */
|
||||
vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS;
|
||||
|
||||
for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
if (queue_length(&z->z_free_pages[i]) > 0) {
|
||||
first_order_with_free = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_order_with_free == VM_MAX_PAGE_ORDERS) {
|
||||
/* there are no pages available to split */
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (first_order_with_free == order) {
|
||||
/* there are free pages of the requested order, so nothing needs to be done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* starting from the first page list with free pages,
|
||||
take a page, split it in half, and add the sub-pages
|
||||
to the next order's free list. */
|
||||
for (vm_page_order_t i = first_order_with_free; i > order; i--) {
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_free_list, pg_entry);
|
||||
|
||||
vm_page_t *a, *b;
|
||||
vm_page_split(pg, &a, &b);
|
||||
|
||||
queue_push_back(&z->z_free_pages[order - 1], &a->p_free_list);
|
||||
queue_push_back(&z->z_free_pages[order - 1], &b->p_free_list);
|
||||
}
|
||||
|
||||
/* handle the last order separately. if the requested order is 0 (4K)
|
||||
handling it within the for-loop would cause an underflow */
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order + 1]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_free_list, pg_entry);
|
||||
|
||||
vm_page_t *a, *b;
|
||||
vm_page_split(pg, &a, &b);
|
||||
|
||||
queue_push_back(&z->z_free_pages[order - 1], &a->p_free_list);
|
||||
queue_push_back(&z->z_free_pages[order - 1], &b->p_free_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags)
|
||||
{
|
||||
int result = replenish_free_page_list(z, order);
|
||||
if (result != 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
return QUEUE_CONTAINER(vm_page_t, p_free_list, pg_entry);
|
||||
}
|
||||
|
||||
void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user