vm: zone: ensure memblock region bounds are page-aligned while creating zone blocks
This commit is contained in:
137
vm/zone.c
137
vm/zone.c
@@ -1,14 +1,19 @@
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/vm.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/libc/string.h>
|
||||
#include <kernel/locks.h>
|
||||
#include <kernel/machine/cpu.h>
|
||||
#include <kernel/memblock.h>
|
||||
#include <kernel/panic.h>
|
||||
#include <kernel/printk.h>
|
||||
#include <kernel/queue.h>
|
||||
#include <kernel/types.h>
|
||||
#include <kernel/util.h>
|
||||
#include <kernel/vm.h>
|
||||
|
||||
static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t base, phys_addr_t limit, int order)
|
||||
static struct vm_page *group_pages_into_block(
|
||||
struct vm_zone *z,
|
||||
phys_addr_t base,
|
||||
phys_addr_t limit,
|
||||
int order)
|
||||
{
|
||||
struct vm_page *first_page = NULL;
|
||||
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
||||
@@ -37,16 +42,23 @@ static struct vm_page *group_pages_into_block(struct vm_zone *z, phys_addr_t bas
|
||||
return first_page;
|
||||
}
|
||||
|
||||
static void convert_region_to_blocks(struct vm_zone *zone,
|
||||
phys_addr_t base, phys_addr_t limit,
|
||||
int reserved)
|
||||
static void convert_region_to_blocks(
|
||||
struct vm_zone *zone,
|
||||
phys_addr_t base,
|
||||
phys_addr_t limit,
|
||||
int reserved)
|
||||
{
|
||||
if (base & VM_PAGE_MASK || (limit + 1) & VM_PAGE_MASK) {
|
||||
panic("convert_region_to_blocks: region must be page-aligned");
|
||||
}
|
||||
|
||||
size_t block_frames = vm_bytes_to_pages(limit - base + 1);
|
||||
int reset_order = 0;
|
||||
|
||||
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER; ) {
|
||||
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER;) {
|
||||
size_t order_frames = vm_page_order_to_pages(order);
|
||||
vm_alignment_t order_alignment = vm_page_order_to_alignment(order);
|
||||
vm_alignment_t order_alignment
|
||||
= vm_page_order_to_alignment(order);
|
||||
|
||||
if (order_frames > block_frames) {
|
||||
order--;
|
||||
@@ -59,11 +71,18 @@ static void convert_region_to_blocks(struct vm_zone *zone,
|
||||
continue;
|
||||
}
|
||||
|
||||
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
|
||||
struct vm_page *block_page = group_pages_into_block(zone, base, block_limit, order);
|
||||
phys_addr_t block_limit
|
||||
= base + (order_frames * VM_PAGE_SIZE) - 1;
|
||||
struct vm_page *block_page = group_pages_into_block(
|
||||
zone,
|
||||
base,
|
||||
block_limit,
|
||||
order);
|
||||
|
||||
if (reserved == 0) {
|
||||
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
|
||||
queue_push_back(
|
||||
&zone->z_free_pages[order],
|
||||
&block_page->p_list);
|
||||
}
|
||||
|
||||
base = block_limit + 1;
|
||||
@@ -83,10 +102,12 @@ static void convert_region_to_blocks(struct vm_zone *zone,
|
||||
static size_t zone_free_bytes(struct vm_zone *z)
|
||||
{
|
||||
size_t free_bytes = 0;
|
||||
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
for (enum vm_page_order i = VM_PAGE_MIN_ORDER; i <= VM_PAGE_MAX_ORDER;
|
||||
i++) {
|
||||
size_t page_bytes = vm_page_order_to_bytes(i);
|
||||
size_t nr_pages = 0;
|
||||
queue_foreach (struct vm_page, pg, &z->z_free_pages[i], p_list) {
|
||||
queue_foreach(struct vm_page, pg, &z->z_free_pages[i], p_list)
|
||||
{
|
||||
free_bytes += page_bytes;
|
||||
nr_pages++;
|
||||
}
|
||||
@@ -106,24 +127,37 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
||||
|
||||
struct memblock_iter it;
|
||||
|
||||
/* TODO this only creates page blocks for free memory regions, not reserved memory regions.
|
||||
* this is faster for systems that have huge amounts of reserved memory, but it means
|
||||
* that a call to vm_page_get() for a reserved memory region will return null
|
||||
* rather than a reserved page.
|
||||
/* TODO this only creates page blocks for free memory regions, not
|
||||
* reserved memory regions. this is faster for systems that have huge
|
||||
* amounts of reserved memory, but it means that a call to vm_page_get()
|
||||
* for a reserved memory region will return null rather than a reserved
|
||||
* page.
|
||||
*
|
||||
* vm_page_get() should probably create reserved pages on-demand for these regions. */
|
||||
* vm_page_get() should probably create reserved pages on-demand for
|
||||
* these regions. */
|
||||
|
||||
size_t nr_pages_found = 0;
|
||||
for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit) {
|
||||
for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit)
|
||||
{
|
||||
it.it_base &= ~VM_PAGE_MASK;
|
||||
if (it.it_limit & VM_PAGE_MASK) {
|
||||
it.it_limit &= ~VM_PAGE_MASK;
|
||||
it.it_limit += VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
phys_addr_t block_start = it.it_base, block_end = it.it_limit;
|
||||
|
||||
int this_page_reserved = 0, last_page_reserved = -1;
|
||||
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit;
|
||||
i += VM_PAGE_SIZE) {
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
|
||||
if (pg) {
|
||||
nr_pages_found++;
|
||||
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
|
||||
this_page_reserved
|
||||
= (pg->p_flags & VM_PAGE_RESERVED) ? 1
|
||||
: 0;
|
||||
} else {
|
||||
this_page_reserved = 1;
|
||||
}
|
||||
@@ -138,22 +172,30 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
||||
}
|
||||
|
||||
convert_region_to_blocks(
|
||||
z,
|
||||
block_start, block_end + VM_PAGE_SIZE - 1,
|
||||
last_page_reserved);
|
||||
z,
|
||||
block_start,
|
||||
block_end + VM_PAGE_SIZE - 1,
|
||||
last_page_reserved);
|
||||
|
||||
block_start = i;
|
||||
if (block_start & VM_PAGE_MASK) {
|
||||
block_start &= ~VM_PAGE_MASK;
|
||||
block_start += VM_PAGE_SIZE;
|
||||
}
|
||||
last_page_reserved = this_page_reserved;
|
||||
nr_pages_found = 0;
|
||||
}
|
||||
|
||||
if (block_start != block_end) {
|
||||
/* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */
|
||||
/* either the entire zone is homogeneous (all free/all
|
||||
* reserved) or the entire zone is empty. */
|
||||
if (nr_pages_found > 0) {
|
||||
/* the entire zone is homogeneous :) */
|
||||
convert_region_to_blocks(
|
||||
z,
|
||||
block_start, block_end + VM_PAGE_SIZE - 1,
|
||||
this_page_reserved);
|
||||
z,
|
||||
block_start,
|
||||
block_end + VM_PAGE_SIZE - 1,
|
||||
this_page_reserved);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,7 +243,10 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
|
||||
char free_bytes_str[64];
|
||||
data_size_to_string(free_bytes, free_bytes_str, sizeof free_bytes_str);
|
||||
|
||||
printk("vm: zone %u/%s: %s of memory online.", z->z_info.zd_node, z->z_info.zd_name, free_bytes_str);
|
||||
printk("vm: zone %u/%s: %s of memory online.",
|
||||
z->z_info.zd_node,
|
||||
z->z_info.zd_name,
|
||||
free_bytes_str);
|
||||
}
|
||||
|
||||
static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
@@ -216,7 +261,8 @@ static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* the lowest page order that is >= `order` and still has pages available */
|
||||
/* the lowest page order that is >= `order` and still has pages
|
||||
* available */
|
||||
enum vm_page_order first_order_with_free = VM_MAX_PAGE_ORDERS;
|
||||
|
||||
for (enum vm_page_order i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
@@ -232,7 +278,8 @@ static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
}
|
||||
|
||||
if (first_order_with_free == order) {
|
||||
/* there are free pages of the requested order, so nothing needs to be done */
|
||||
/* there are free pages of the requested order, so nothing needs
|
||||
* to be done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -240,8 +287,10 @@ static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
take a page, split it in half, and add the sub-pages
|
||||
to the next order's free list. */
|
||||
for (enum vm_page_order i = first_order_with_free; i > order; i--) {
|
||||
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[i]);
|
||||
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
||||
struct queue_entry *pg_entry
|
||||
= queue_pop_front(&z->z_free_pages[i]);
|
||||
struct vm_page *pg
|
||||
= QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
||||
|
||||
struct vm_page *a, *b;
|
||||
vm_page_split(pg, &a, &b);
|
||||
@@ -253,7 +302,10 @@ static int replenish_free_page_list(struct vm_zone *z, enum vm_page_order order)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order, enum vm_flags flags)
|
||||
struct vm_page *vm_zone_alloc_page(
|
||||
struct vm_zone *z,
|
||||
enum vm_page_order order,
|
||||
enum vm_flags flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
||||
@@ -266,7 +318,8 @@ struct vm_page *vm_zone_alloc_page(struct vm_zone *z, enum vm_page_order order,
|
||||
|
||||
struct queue_entry *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
struct vm_page *pg = QUEUE_CONTAINER(struct vm_page, p_list, pg_entry);
|
||||
vm_page_foreach (pg, i) {
|
||||
vm_page_foreach(pg, i)
|
||||
{
|
||||
i->p_flags |= VM_PAGE_ALLOC;
|
||||
}
|
||||
|
||||
@@ -289,7 +342,9 @@ void vm_zone_free_page(struct vm_zone *z, struct vm_page *pg)
|
||||
break;
|
||||
}
|
||||
|
||||
queue_delete(&z->z_free_pages[buddy->p_order - 1], &buddy->p_list);
|
||||
queue_delete(
|
||||
&z->z_free_pages[buddy->p_order - 1],
|
||||
&buddy->p_list);
|
||||
queue_delete(&z->z_free_pages[buddy->p_order - 1], &pg->p_list);
|
||||
queue_push_back(&z->z_free_pages[huge->p_order], &huge->p_list);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user