all kernel headers have been moved from include/mango to include/kernel and include definitions that are only relevant to kernel-space. any definitions that are relevant to both kernel- and user-space (i.e. type definitions, syscall IDs) have been moved to include/mango within libmango.
301 lines
7.6 KiB
C
301 lines
7.6 KiB
C
#include <kernel/locks.h>
|
|
#include <kernel/util.h>
|
|
#include <kernel/queue.h>
|
|
#include <kernel/memblock.h>
|
|
#include <kernel/types.h>
|
|
#include <kernel/vm.h>
|
|
#include <kernel/printk.h>
|
|
#include <kernel/libc/string.h>
|
|
#include <kernel/machine/cpu.h>
|
|
|
|
static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t base, phys_addr_t limit, int order)
|
|
{
|
|
struct vm_page *first_page = NULL;
|
|
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
|
struct vm_page *pg = vm_page_get(i);
|
|
if (!pg) {
|
|
continue;
|
|
}
|
|
|
|
pg->p_order = order;
|
|
pg->p_node = z->z_info.zd_node;
|
|
pg->p_zone = z->z_info.zd_id;
|
|
|
|
if (order != VM_PAGE_MIN_ORDER) {
|
|
pg->p_flags |= VM_PAGE_HUGE;
|
|
}
|
|
|
|
if (i == base) {
|
|
pg->p_flags |= VM_PAGE_HEAD;
|
|
first_page = pg;
|
|
}
|
|
|
|
pg->p_list = QUEUE_ENTRY_INIT;
|
|
pg->p_slab = NULL;
|
|
}
|
|
|
|
return first_page;
|
|
}
|
|
|
|
static void convert_region_to_blocks(struct vm_zone *zone,
|
|
phys_addr_t base, phys_addr_t limit,
|
|
int reserved)
|
|
{
|
|
size_t block_frames = vm_bytes_to_pages(limit - base + 1);
|
|
int reset_order = 0;
|
|
|
|
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER; ) {
|
|
size_t order_frames = vm_page_order_to_pages(order);
|
|
vm_alignment_t order_alignment = vm_page_order_to_alignment(order);
|
|
|
|
if (order_frames > block_frames) {
|
|
order--;
|
|
continue;
|
|
}
|
|
|
|
if (!VM_CHECK_ALIGN(base, order_alignment)) {
|
|
reset_order = 1;
|
|
order--;
|
|
continue;
|
|
}
|
|
|
|
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
|
|
struct vm_page *block_page = group_pages_into_block(zone, base, block_limit, order);
|
|
|
|
if (reserved == 0) {
|
|
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
|
|
}
|
|
|
|
base = block_limit + 1;
|
|
block_frames -= order_frames;
|
|
|
|
if (reset_order) {
|
|
order = VM_PAGE_MAX_ORDER;
|
|
reset_order = 0;
|
|
}
|
|
|
|
if (base == limit) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
static size_t zone_free_bytes(struct vm_zone *z)
|
|
{
|
|
size_t free_bytes = 0;
|
|
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
|
|
size_t page_bytes = vm_page_order_to_bytes(i);
|
|
size_t nr_pages = 0;
|
|
queue_foreach (struct vm_page, pg, &z->z_free_pages[i], p_list) {
|
|
free_bytes += page_bytes;
|
|
nr_pages++;
|
|
}
|
|
}
|
|
|
|
return free_bytes;
|
|
}
|
|
|
|
void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
|
{
|
|
memset(z, 0x0, sizeof *z);
|
|
memcpy(&z->z_info, zone_info, sizeof *zone_info);
|
|
z->z_lock = SPIN_LOCK_INIT;
|
|
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&z->z_lock, &flags);
|
|
|
|
struct memblock_iter it;
|
|
|
|
/* TODO this only creates page blocks for free memory regions, not reserved memory regions.
|
|
* this is faster for systems that have huge amounts of reserved memory, but it means
|
|
* that a call to vm_page_get() for a reserved memory region will return null
|
|
* rather than a reserved page.
|
|
*
|
|
* vm_page_get() should probably create reserved pages on-demand for these regions. */
|
|
|
|
size_t nr_pages_found = 0;
|
|
for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit) {
|
|
phys_addr_t block_start = it.it_base, block_end = it.it_limit;
|
|
int this_page_reserved = 0, last_page_reserved = -1;
|
|
|
|
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
|
struct vm_page *pg = vm_page_get(i);
|
|
|
|
if (pg) {
|
|
nr_pages_found++;
|
|
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
|
|
} else {
|
|
this_page_reserved = 1;
|
|
}
|
|
|
|
if (last_page_reserved == -1) {
|
|
last_page_reserved = this_page_reserved;
|
|
}
|
|
|
|
if (this_page_reserved == last_page_reserved) {
|
|
block_end = i;
|
|
continue;
|
|
}
|
|
|
|
convert_region_to_blocks(
|
|
z,
|
|
block_start, block_end + VM_PAGE_SIZE - 1,
|
|
last_page_reserved);
|
|
|
|
block_start = i;
|
|
last_page_reserved = this_page_reserved;
|
|
}
|
|
|
|
if (block_start != block_end) {
|
|
/* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */
|
|
if (nr_pages_found > 0) {
|
|
/* the entire zone is homogeneous :) */
|
|
convert_region_to_blocks(
|
|
z,
|
|
block_start, block_end + VM_PAGE_SIZE - 1,
|
|
this_page_reserved);
|
|
}
|
|
}
|
|
}
|
|
|
|
#if 0
|
|
for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
|
|
if (it.it_limit + 1 > plimit) {
|
|
plimit = it.it_limit + 1;
|
|
}
|
|
}
|
|
|
|
if (z->z_info.zd_limit > plimit) {
|
|
z->z_info.zd_limit = plimit;
|
|
}
|
|
|
|
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
|
|
struct vm_page *pg = vm_page_get(i);
|
|
|
|
if (pg) {
|
|
nr_pages_found++;
|
|
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
|
|
} else {
|
|
this_page_reserved = 1;
|
|
}
|
|
|
|
if (last_page_reserved == -1) {
|
|
last_page_reserved = this_page_reserved;
|
|
}
|
|
|
|
if (this_page_reserved == last_page_reserved) {
|
|
block_end = i;
|
|
continue;
|
|
}
|
|
|
|
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, last_page_reserved);
|
|
|
|
block_start = i;
|
|
last_page_reserved = this_page_reserved;
|
|
}
|
|
#endif
|
|
|
|
size_t free_bytes = zone_free_bytes(z);
|
|
spin_unlock_irqrestore(&z->z_lock, flags);
|
|
|
|
char free_bytes_str[64];
|
|
data_size_to_string(free_bytes, free_bytes_str, sizeof free_bytes_str);
|
|
|
|
printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str);
|
|
}
|
|
|
|
static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
|
{
|
|
if (!queue_empty(&z->z_free_pages[order])) {
|
|
/* we already have pages available. */
|
|
return 0;
|
|
}
|
|
|
|
if (order == VM_PAGE_MAX_ORDER) {
|
|
/* there are no larger pages to split, so just give up. */
|
|
return -1;
|
|
}
|
|
|
|
/* the lowest page order that is >= `order` and still has pages available */
|
|
enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS;
|
|
|
|
for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
|
if (!queue_empty(&z->z_free_pages[i])) {
|
|
first_order_with_free = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (first_order_with_free == VM_MAX_PAGE_ORDERS) {
|
|
/* there are no pages available to split */
|
|
return -1;
|
|
}
|
|
|
|
if (first_order_with_free == order) {
|
|
/* there are free pages of the requested order, so nothing needs to be done */
|
|
return 0;
|
|
}
|
|
|
|
/* starting from the first page list with free pages,
|
|
take a page, split it in half, and add the sub-pages
|
|
to the next order's free list. */
|
|
for (enum vm_page_order i = first_order_with_free; i > order; i--) {
|
|
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[i]);
|
|
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
|
|
|
struct vm_page *a, *b;
|
|
vm_page_split(pg, &a, &b);
|
|
|
|
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
|
|
queue_push_back(&z->z_free_pages[i - 1], &b->p_list);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags)
|
|
{
|
|
unsigned long irq_flags;
|
|
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
|
|
|
int result = replenish_free_page_list(z, order);
|
|
if (result != 0) {
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
|
return NULL;
|
|
}
|
|
|
|
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
|
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
|
vm_page_foreach (pg, i) {
|
|
i->p_flags |= VM_PAGE_ALLOC;
|
|
}
|
|
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
|
return pg;
|
|
}
|
|
|
|
void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg)
|
|
{
|
|
unsigned long irq_flags;
|
|
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
|
|
|
pg->p_flags &= ~VM_PAGE_ALLOC;
|
|
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
|
|
|
|
while (1) {
|
|
struct vm_page *buddy = vm_page_get_buddy(pg);
|
|
struct vm_page *huge = vm_page_merge(pg, buddy);
|
|
if (!huge) {
|
|
break;
|
|
}
|
|
|
|
queue_delete(&z->z_free_pages[buddy->p_order - 1], &buddy->p_list);
|
|
queue_delete(&z->z_free_pages[buddy->p_order - 1], &pg->p_list);
|
|
queue_push_back(&z->z_free_pages[huge->p_order], &huge->p_list);
|
|
|
|
pg = huge;
|
|
}
|
|
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
|
}
|