2026-02-21 11:01:58 +00:00
|
|
|
#include <kernel/libc/string.h>
|
2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/locks.h>
|
2026-02-21 11:01:58 +00:00
|
|
|
#include <kernel/machine/cpu.h>
|
2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/memblock.h>
|
2026-02-21 11:01:58 +00:00
|
|
|
#include <kernel/panic.h>
|
|
|
|
|
#include <kernel/printk.h>
|
|
|
|
|
#include <kernel/queue.h>
|
2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/types.h>
|
2026-02-21 11:01:58 +00:00
|
|
|
#include <kernel/util.h>
|
2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/vm.h>
|
2023-01-29 11:03:53 +00:00
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
static struct vm_page *group_pages_into_block(
|
|
|
|
|
struct vm_zone *z,
|
|
|
|
|
phys_addr_t base,
|
|
|
|
|
phys_addr_t limit,
|
|
|
|
|
int order)
|
2023-01-29 20:10:15 +00:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_page *first_page = NULL;
|
2023-01-29 20:10:15 +00:00
|
|
|
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_page *pg = vm_page_get(i);
|
2023-02-08 20:26:51 +00:00
|
|
|
if (!pg) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2023-02-07 15:58:37 +00:00
|
|
|
|
|
|
|
|
pg->p_order = order;
|
|
|
|
|
pg->p_node = z->z_info.zd_node;
|
|
|
|
|
pg->p_zone = z->z_info.zd_id;
|
2023-01-29 20:10:15 +00:00
|
|
|
|
|
|
|
|
if (order != VM_PAGE_MIN_ORDER) {
|
|
|
|
|
pg->p_flags |= VM_PAGE_HUGE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (i == base) {
|
|
|
|
|
pg->p_flags |= VM_PAGE_HEAD;
|
|
|
|
|
first_page = pg;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-07 15:58:37 +00:00
|
|
|
pg->p_list = QUEUE_ENTRY_INIT;
|
|
|
|
|
pg->p_slab = NULL;
|
2023-01-29 20:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return first_page;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
static void convert_region_to_blocks(
|
|
|
|
|
struct vm_zone *zone,
|
|
|
|
|
phys_addr_t base,
|
|
|
|
|
phys_addr_t limit,
|
|
|
|
|
int reserved)
|
2023-01-29 11:03:53 +00:00
|
|
|
{
|
2026-02-21 11:01:58 +00:00
|
|
|
if (base & VM_PAGE_MASK || (limit + 1) & VM_PAGE_MASK) {
|
|
|
|
|
panic("convert_region_to_blocks: region must be page-aligned");
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-29 20:10:15 +00:00
|
|
|
size_t block_frames = vm_bytes_to_pages(limit - base + 1);
|
2023-01-29 11:03:53 +00:00
|
|
|
int reset_order = 0;
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER;) {
|
2023-01-29 11:03:53 +00:00
|
|
|
size_t order_frames = vm_page_order_to_pages(order);
|
2026-02-21 11:01:58 +00:00
|
|
|
vm_alignment_t order_alignment
|
|
|
|
|
= vm_page_order_to_alignment(order);
|
2023-01-29 11:03:53 +00:00
|
|
|
|
|
|
|
|
if (order_frames > block_frames) {
|
|
|
|
|
order--;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!VM_CHECK_ALIGN(base, order_alignment)) {
|
|
|
|
|
reset_order = 1;
|
|
|
|
|
order--;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2023-04-09 17:14:19 +01:00
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
phys_addr_t block_limit
|
|
|
|
|
= base + (order_frames * VM_PAGE_SIZE) - 1;
|
|
|
|
|
struct vm_page *block_page = group_pages_into_block(
|
|
|
|
|
zone,
|
|
|
|
|
base,
|
|
|
|
|
block_limit,
|
|
|
|
|
order);
|
2023-01-29 20:10:15 +00:00
|
|
|
|
|
|
|
|
if (reserved == 0) {
|
2026-02-21 11:01:58 +00:00
|
|
|
queue_push_back(
|
|
|
|
|
&zone->z_free_pages[order],
|
|
|
|
|
&block_page->p_list);
|
2023-01-29 20:10:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
base = block_limit + 1;
|
2023-01-29 11:03:53 +00:00
|
|
|
block_frames -= order_frames;
|
|
|
|
|
|
|
|
|
|
if (reset_order) {
|
|
|
|
|
order = VM_PAGE_MAX_ORDER;
|
|
|
|
|
reset_order = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (base == limit) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-01-28 19:24:28 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static size_t zone_free_bytes(struct vm_zone *z)
|
2023-02-08 21:29:20 +00:00
|
|
|
{
|
|
|
|
|
size_t free_bytes = 0;
|
2026-02-21 11:01:58 +00:00
|
|
|
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER;
|
|
|
|
|
i++) {
|
2023-02-08 21:29:20 +00:00
|
|
|
size_t page_bytes = vm_page_order_to_bytes(i);
|
|
|
|
|
size_t nr_pages = 0;
|
2026-02-21 11:01:58 +00:00
|
|
|
queue_foreach(struct vm_page, pg, &z->z_free_pages[i], p_list)
|
|
|
|
|
{
|
2023-02-08 21:29:20 +00:00
|
|
|
free_bytes += page_bytes;
|
|
|
|
|
nr_pages++;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return free_bytes;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
2023-01-28 19:24:28 +00:00
|
|
|
{
|
|
|
|
|
memset(z, 0x0, sizeof *z);
|
2023-01-29 20:10:15 +00:00
|
|
|
memcpy(&z->z_info, zone_info, sizeof *zone_info);
|
2023-02-02 21:14:02 +00:00
|
|
|
z->z_lock = SPIN_LOCK_INIT;
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&z->z_lock, &flags);
|
2023-01-28 19:24:28 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_iter it;
|
2023-12-24 09:35:50 +00:00
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
/* TODO this only creates page blocks for free memory regions, not
|
|
|
|
|
* reserved memory regions. this is faster for systems that have huge
|
|
|
|
|
* amounts of reserved memory, but it means that a call to vm_page_get()
|
|
|
|
|
* for a reserved memory region will return null rather than a reserved
|
|
|
|
|
* page.
|
2023-12-24 09:35:50 +00:00
|
|
|
*
|
2026-02-21 11:01:58 +00:00
|
|
|
* vm_page_get() should probably create reserved pages on-demand for
|
|
|
|
|
* these regions. */
|
2023-12-24 09:35:50 +00:00
|
|
|
|
|
|
|
|
size_t nr_pages_found = 0;
|
2026-02-21 11:01:58 +00:00
|
|
|
for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit)
|
|
|
|
|
{
|
|
|
|
|
it.it_base &= ~VM_PAGE_MASK;
|
|
|
|
|
if (it.it_limit & VM_PAGE_MASK) {
|
|
|
|
|
it.it_limit &= ~VM_PAGE_MASK;
|
|
|
|
|
it.it_limit += VM_PAGE_SIZE;
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-24 09:35:50 +00:00
|
|
|
phys_addr_t block_start = it.it_base, block_end = it.it_limit;
|
2026-02-21 11:01:58 +00:00
|
|
|
|
2023-12-24 09:35:50 +00:00
|
|
|
int this_page_reserved = 0, last_page_reserved = -1;
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
for (uintptr_t i = it.it_base; i < it.it_limit;
|
|
|
|
|
i += VM_PAGE_SIZE) {
|
2023-12-24 09:35:50 +00:00
|
|
|
struct vm_page *pg = vm_page_get(i);
|
|
|
|
|
|
|
|
|
|
if (pg) {
|
|
|
|
|
nr_pages_found++;
|
2026-02-21 11:01:58 +00:00
|
|
|
this_page_reserved
|
|
|
|
|
= (pg->p_flags & VM_PAGE_RESERVED) ? 1
|
|
|
|
|
: 0;
|
2023-12-24 09:35:50 +00:00
|
|
|
} else {
|
|
|
|
|
this_page_reserved = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (last_page_reserved == -1) {
|
|
|
|
|
last_page_reserved = this_page_reserved;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (this_page_reserved == last_page_reserved) {
|
|
|
|
|
block_end = i;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
convert_region_to_blocks(
|
2026-02-21 11:01:58 +00:00
|
|
|
z,
|
|
|
|
|
block_start,
|
|
|
|
|
block_end + VM_PAGE_SIZE - 1,
|
|
|
|
|
last_page_reserved);
|
2023-12-24 09:35:50 +00:00
|
|
|
|
|
|
|
|
block_start = i;
|
2026-02-21 11:01:58 +00:00
|
|
|
if (block_start & VM_PAGE_MASK) {
|
|
|
|
|
block_start &= ~VM_PAGE_MASK;
|
|
|
|
|
block_start += VM_PAGE_SIZE;
|
|
|
|
|
}
|
2023-12-24 09:35:50 +00:00
|
|
|
last_page_reserved = this_page_reserved;
|
2026-02-21 11:01:58 +00:00
|
|
|
nr_pages_found = 0;
|
2023-12-24 09:35:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (block_start != block_end) {
|
2026-02-21 11:01:58 +00:00
|
|
|
/* either the entire zone is homogeneous (all free/all
|
|
|
|
|
* reserved) or the entire zone is empty. */
|
2023-12-24 09:35:50 +00:00
|
|
|
if (nr_pages_found > 0) {
|
|
|
|
|
/* the entire zone is homogeneous :) */
|
|
|
|
|
convert_region_to_blocks(
|
2026-02-21 11:01:58 +00:00
|
|
|
z,
|
|
|
|
|
block_start,
|
|
|
|
|
block_end + VM_PAGE_SIZE - 1,
|
|
|
|
|
this_page_reserved);
|
2023-12-24 09:35:50 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if 0
|
2023-02-08 20:26:51 +00:00
|
|
|
for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
|
|
|
|
|
if (it.it_limit + 1 > plimit) {
|
|
|
|
|
plimit = it.it_limit + 1;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (z->z_info.zd_limit > plimit) {
|
|
|
|
|
z->z_info.zd_limit = plimit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_page *pg = vm_page_get(i);
|
2023-02-08 20:26:51 +00:00
|
|
|
|
|
|
|
|
if (pg) {
|
|
|
|
|
nr_pages_found++;
|
|
|
|
|
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
|
|
|
|
|
} else {
|
|
|
|
|
this_page_reserved = 1;
|
2023-01-28 19:24:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (last_page_reserved == -1) {
|
|
|
|
|
last_page_reserved = this_page_reserved;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (this_page_reserved == last_page_reserved) {
|
2023-01-29 11:03:53 +00:00
|
|
|
block_end = i;
|
2023-01-28 19:24:28 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-29 20:10:15 +00:00
|
|
|
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, last_page_reserved);
|
2023-01-29 11:03:53 +00:00
|
|
|
|
2023-01-28 19:24:28 +00:00
|
|
|
block_start = i;
|
|
|
|
|
last_page_reserved = this_page_reserved;
|
|
|
|
|
}
|
2023-12-24 09:35:50 +00:00
|
|
|
#endif
|
2023-02-02 21:14:02 +00:00
|
|
|
|
2023-02-08 21:29:20 +00:00
|
|
|
size_t free_bytes = zone_free_bytes(z);
|
2023-02-02 21:14:02 +00:00
|
|
|
spin_unlock_irqrestore(&z->z_lock, flags);
|
2023-02-08 21:29:20 +00:00
|
|
|
|
|
|
|
|
char free_bytes_str[64];
|
|
|
|
|
data_size_to_string(free_bytes, free_bytes_str, sizeof free_bytes_str);
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
printk("vm: zone %u/%s: %s of memory online.",
|
|
|
|
|
z->z_info.zd_node,
|
|
|
|
|
z->z_info.zd_name,
|
|
|
|
|
free_bytes_str);
|
2023-01-28 19:24:28 +00:00
|
|
|
}
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
2023-02-01 15:03:42 +00:00
|
|
|
{
|
2023-02-02 21:10:37 +00:00
|
|
|
if (!queue_empty(&z->z_free_pages[order])) {
|
2023-02-01 15:03:42 +00:00
|
|
|
/* we already have pages available. */
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2023-04-09 17:14:19 +01:00
|
|
|
|
2023-02-01 15:03:42 +00:00
|
|
|
if (order == VM_PAGE_MAX_ORDER) {
|
|
|
|
|
/* there are no larger pages to split, so just give up. */
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
2023-04-09 17:14:19 +01:00
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
/* the lowest page order that is >= `order` and still has pages
|
|
|
|
|
* available */
|
2023-04-12 20:17:11 +01:00
|
|
|
enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS;
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
2023-02-02 21:10:37 +00:00
|
|
|
if (!queue_empty(&z->z_free_pages[i])) {
|
2023-02-01 15:03:42 +00:00
|
|
|
first_order_with_free = i;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (first_order_with_free == VM_MAX_PAGE_ORDERS) {
|
|
|
|
|
/* there are no pages available to split */
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (first_order_with_free == order) {
|
2026-02-21 11:01:58 +00:00
|
|
|
/* there are free pages of the requested order, so nothing needs
|
|
|
|
|
* to be done */
|
2023-02-01 15:03:42 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* starting from the first page list with free pages,
|
|
|
|
|
take a page, split it in half, and add the sub-pages
|
|
|
|
|
to the next order's free list. */
|
2023-04-12 20:17:11 +01:00
|
|
|
for (enum vm_page_order i = first_order_with_free; i > order; i--) {
|
2026-02-21 11:01:58 +00:00
|
|
|
struct queue_entry *pg_entry
|
|
|
|
|
= queue_pop_front(&z->z_free_pages[i]);
|
|
|
|
|
struct vm_page *pg
|
|
|
|
|
= QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_page *a, *b;
|
2023-02-01 15:03:42 +00:00
|
|
|
vm_page_split(pg, &a, &b);
|
|
|
|
|
|
2023-02-02 16:58:24 +00:00
|
|
|
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
|
|
|
|
|
queue_push_back(&z->z_free_pages[i - 1], &b->p_list);
|
2023-02-01 15:03:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
struct vm_page *vm_zone_alloc_page(
|
|
|
|
|
struct vm_zone *z,
|
|
|
|
|
enum vm_page_order order,
|
|
|
|
|
enum vm_flags flags)
|
2023-02-01 15:03:42 +00:00
|
|
|
{
|
2023-02-02 21:14:02 +00:00
|
|
|
unsigned long irq_flags;
|
|
|
|
|
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
|
|
|
|
|
2023-02-01 15:03:42 +00:00
|
|
|
int result = replenish_free_page_list(z, order);
|
|
|
|
|
if (result != 0) {
|
2023-02-02 21:14:02 +00:00
|
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
2023-02-01 15:03:42 +00:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2023-04-09 17:14:19 +01:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
|
|
|
|
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
2026-02-21 11:01:58 +00:00
|
|
|
vm_page_foreach(pg, i)
|
|
|
|
|
{
|
2023-02-01 17:05:14 +00:00
|
|
|
i->p_flags |= VM_PAGE_ALLOC;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-02 21:14:02 +00:00
|
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
2023-02-01 17:05:14 +00:00
|
|
|
return pg;
|
2023-02-01 15:03:42 +00:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg)
|
2023-02-01 15:03:42 +00:00
|
|
|
{
|
2023-02-02 21:14:02 +00:00
|
|
|
unsigned long irq_flags;
|
|
|
|
|
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
|
|
|
|
|
2023-02-01 17:05:14 +00:00
|
|
|
pg->p_flags &= ~VM_PAGE_ALLOC;
|
2023-02-02 16:58:24 +00:00
|
|
|
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-02-01 17:05:14 +00:00
|
|
|
while (1) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_page *buddy = vm_page_get_buddy(pg);
|
|
|
|
|
struct vm_page *huge = vm_page_merge(pg, buddy);
|
2023-02-01 17:05:14 +00:00
|
|
|
if (!huge) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-21 11:01:58 +00:00
|
|
|
queue_delete(
|
|
|
|
|
&z->z_free_pages[buddy->p_order - 1],
|
|
|
|
|
&buddy->p_list);
|
2023-02-02 16:58:24 +00:00
|
|
|
queue_delete(&z->z_free_pages[buddy->p_order - 1], &pg->p_list);
|
|
|
|
|
queue_push_back(&z->z_free_pages[huge->p_order], &huge->p_list);
|
2023-02-01 17:05:14 +00:00
|
|
|
|
|
|
|
|
pg = huge;
|
|
|
|
|
}
|
2023-02-02 21:14:02 +00:00
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
2023-02-01 15:03:42 +00:00
|
|
|
}
|