vm: optimise vm_zone_init by only making blocks from free regions during boot

This commit is contained in:
2023-12-24 09:35:50 +00:00
parent 67c0b6eba9
commit 1cbab5f2f4

View File

@@ -104,11 +104,61 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&z->z_lock, &flags); spin_lock_irqsave(&z->z_lock, &flags);
phys_addr_t block_start = zone_info->zd_base, block_end = zone_info->zd_limit;
int this_page_reserved = 0, last_page_reserved = -1;
phys_addr_t plimit = 0;
struct memblock_iter it; struct memblock_iter it;
/* TODO this only creates page blocks for free memory regions, not reserved memory regions.
* this is faster for systems that have huge amounts of reserved memory, but it means
* that a call to vm_page_get() for a reserved memory region will return null
* rather than a reserved page.
*
* vm_page_get() should probably create reserved pages on-demand for these regions. */
size_t nr_pages_found = 0;
for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit) {
phys_addr_t block_start = it.it_base, block_end = it.it_limit;
int this_page_reserved = 0, last_page_reserved = -1;
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
struct vm_page *pg = vm_page_get(i);
if (pg) {
nr_pages_found++;
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
} else {
this_page_reserved = 1;
}
if (last_page_reserved == -1) {
last_page_reserved = this_page_reserved;
}
if (this_page_reserved == last_page_reserved) {
block_end = i;
continue;
}
convert_region_to_blocks(
z,
block_start, block_end + VM_PAGE_SIZE - 1,
last_page_reserved);
block_start = i;
last_page_reserved = this_page_reserved;
}
if (block_start != block_end) {
/* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */
if (nr_pages_found > 0) {
/* the entire zone is homogeneous :) */
convert_region_to_blocks(
z,
block_start, block_end + VM_PAGE_SIZE - 1,
this_page_reserved);
}
}
}
#if 0
for_each_mem_range (&it, 0x00, UINTPTR_MAX) { for_each_mem_range (&it, 0x00, UINTPTR_MAX) {
if (it.it_limit + 1 > plimit) { if (it.it_limit + 1 > plimit) {
plimit = it.it_limit + 1; plimit = it.it_limit + 1;
@@ -119,7 +169,6 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
z->z_info.zd_limit = plimit; z->z_info.zd_limit = plimit;
} }
size_t nr_pages_found = 0;
for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) { for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) {
struct vm_page *pg = vm_page_get(i); struct vm_page *pg = vm_page_get(i);
@@ -144,14 +193,7 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info)
block_start = i; block_start = i;
last_page_reserved = this_page_reserved; last_page_reserved = this_page_reserved;
} }
#endif
if (block_start != block_end) {
/* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */
if (nr_pages_found > 0) {
/* the entire zone is homogeneous :) */
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved);
}
}
size_t free_bytes = zone_free_bytes(z); size_t free_bytes = zone_free_bytes(z);
spin_unlock_irqrestore(&z->z_lock, flags); spin_unlock_irqrestore(&z->z_lock, flags);