Files
mango/vm/zone.c

259 lines
6.4 KiB
C
Raw Normal View History

#include <socks/locks.h>
#include <socks/util.h>
#include <socks/queue.h>
#include <socks/memblock.h>
#include <socks/types.h>
#include <socks/vm.h>
#include <socks/printk.h>
2023-02-03 20:51:23 +00:00
#include <socks/libc/string.h>
#include <socks/machine/cpu.h>
static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_addr_t limit, int order)
{
vm_page_t *first_page = NULL;
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
vm_page_t *pg = vm_page_get(i);
if (!pg) {
continue;
}
pg->p_order = order;
pg->p_node = z->z_info.zd_node;
pg->p_zone = z->z_info.zd_id;
if (order != VM_PAGE_MIN_ORDER) {
pg->p_flags |= VM_PAGE_HUGE;
}
if (i == base) {
pg->p_flags |= VM_PAGE_HEAD;
first_page = pg;
}
pg->p_list = QUEUE_ENTRY_INIT;
pg->p_slab = NULL;
}
return first_page;
}
static void convert_region_to_blocks(vm_zone_t *zone,
phys_addr_t base, phys_addr_t limit,
int reserved)
{
size_t block_frames = vm_bytes_to_pages(limit - base + 1);
int reset_order = 0;
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER; ) {
size_t order_frames = vm_page_order_to_pages(order);
vm_alignment_t order_alignment = vm_page_order_to_alignment(order);
if (order_frames > block_frames) {
order--;
continue;
}
if (!VM_CHECK_ALIGN(base, order_alignment)) {
reset_order = 1;
order--;
continue;
}
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
vm_page_t *block_page = group_pages_into_block(zone, base, block_limit, order);
if (reserved == 0) {
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
}
base = block_limit + 1;
block_frames -= order_frames;
if (reset_order) {
order = VM_PAGE_MAX_ORDER;
reset_order = 0;
}
if (base == limit) {
break;
}
}
}
static size_t zone_free_bytes(vm_zone_t *z)
{
size_t free_bytes = 0;
for (vm_page_order_t i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
size_t page_bytes = vm_page_order_to_bytes(i);
size_t nr_pages = 0;
queue_foreach (vm_page_t, pg, &z->z_free_pages[i], p_list) {
free_bytes += page_bytes;
nr_pages++;
}
}
return free_bytes;
}
void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
{
memset(z, 0x0, sizeof *z);
memcpy(&z->z_info, zone_info, sizeof *zone_info);
z->z_lock = SPIN_LOCK_INIT;
unsigned long flags;
spin_lock_irqsave(&z->z_lock, &flags);
phys_addr_t block_start = zone_info->zd_base, block_end = zone_info->zd_limit;
int this_page_reserved = 0, last_page_reserved = -1;
phys_addr_t plimit = 0;
memblock_iter_t it;
for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
if (it.it_limit + 1 > plimit) {
plimit = it.it_limit + 1;
}
}
if (z->z_info.zd_limit > plimit) {
z->z_info.zd_limit = plimit;
}
size_t nr_pages_found = 0;
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
vm_page_t *pg = vm_page_get(i);
if (pg) {
nr_pages_found++;
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
} else {
this_page_reserved = 1;
}
if (last_page_reserved == -1) {
last_page_reserved = this_page_reserved;
}
if (this_page_reserved == last_page_reserved) {
block_end = i;
continue;
}
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, last_page_reserved);
block_start = i;
last_page_reserved = this_page_reserved;
}
if (block_start != block_end) {
/* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */
if (nr_pages_found > 0) {
/* the entire zone is homogeneous :) */
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved);
}
}
size_t free_bytes = zone_free_bytes(z);
spin_unlock_irqrestore(&z->z_lock, flags);
char free_bytes_str[64];
data_size_to_string(free_bytes, free_bytes_str, sizeof free_bytes_str);
printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str);
}
static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
{
if (!queue_empty(&z->z_free_pages[order])) {
/* we already have pages available. */
return 0;
}
if (order == VM_PAGE_MAX_ORDER) {
/* there are no larger pages to split, so just give up. */
return -1;
}
/* the lowest page order that is >= `order` and still has pages available */
vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS;
for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) {
if (!queue_empty(&z->z_free_pages[i])) {
first_order_with_free = i;
break;
}
}
if (first_order_with_free == VM_MAX_PAGE_ORDERS) {
/* there are no pages available to split */
return -1;
}
if (first_order_with_free == order) {
/* there are free pages of the requested order, so nothing needs to be done */
return 0;
}
/* starting from the first page list with free pages,
take a page, split it in half, and add the sub-pages
to the next order's free list. */
for (vm_page_order_t i = first_order_with_free; i > order; i--) {
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[i]);
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
vm_page_t *a, *b;
vm_page_split(pg, &a, &b);
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
queue_push_back(&z->z_free_pages[i - 1], &b->p_list);
}
return 0;
}
vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags)
{
unsigned long irq_flags;
spin_lock_irqsave(&z->z_lock, &irq_flags);
int result = replenish_free_page_list(z, order);
if (result != 0) {
spin_unlock_irqrestore(&z->z_lock, irq_flags);
return NULL;
}
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
vm_page_foreach (pg, i) {
i->p_flags |= VM_PAGE_ALLOC;
}
spin_unlock_irqrestore(&z->z_lock, irq_flags);
return pg;
}
void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
{
unsigned long irq_flags;
spin_lock_irqsave(&z->z_lock, &irq_flags);
pg->p_flags &= ~VM_PAGE_ALLOC;
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
while (1) {
vm_page_t *buddy = vm_page_get_buddy(pg);
vm_page_t *huge = vm_page_merge(pg, buddy);
if (!huge) {
break;
}
queue_delete(&z->z_free_pages[buddy->p_order - 1], &buddy->p_list);
queue_delete(&z->z_free_pages[buddy->p_order - 1], &pg->p_list);
queue_push_back(&z->z_free_pages[huge->p_order], &huge->p_list);
pg = huge;
}
spin_unlock_irqrestore(&z->z_lock, irq_flags);
}