From 1cbab5f2f47ebda5802babd2f94b89f0cf4d8eb3 Mon Sep 17 00:00:00 2001 From: Max Wash Date: Sun, 24 Dec 2023 09:35:50 +0000 Subject: [PATCH] vm: optimise vm_zone_init by only making blocks from free regions during boot --- vm/zone.c | 68 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/vm/zone.c b/vm/zone.c index 3722bd8..96556be 100644 --- a/vm/zone.c +++ b/vm/zone.c @@ -104,11 +104,61 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info) unsigned long flags; spin_lock_irqsave(&z->z_lock, &flags); - phys_addr_t block_start = zone_info->zd_base, block_end = zone_info->zd_limit; - int this_page_reserved = 0, last_page_reserved = -1; - - phys_addr_t plimit = 0; struct memblock_iter it; + + /* TODO this only creates page blocks for free memory regions, not reserved memory regions. + * this is faster for systems that have huge amounts of reserved memory, but it means + * that a call to vm_page_get() for a reserved memory region will return null + * rather than a reserved page. + * + * vm_page_get() should probably create reserved pages on-demand for these regions. */ + + size_t nr_pages_found = 0; + for_each_free_mem_range(&it, z->z_info.zd_base, z->z_info.zd_limit) { + phys_addr_t block_start = it.it_base, block_end = it.it_limit; + int this_page_reserved = 0, last_page_reserved = -1; + + for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) { + struct vm_page *pg = vm_page_get(i); + + if (pg) { + nr_pages_found++; + this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0; + } else { + this_page_reserved = 1; + } + + if (last_page_reserved == -1) { + last_page_reserved = this_page_reserved; + } + + if (this_page_reserved == last_page_reserved) { + block_end = i; + continue; + } + + convert_region_to_blocks( + z, + block_start, block_end + VM_PAGE_SIZE - 1, + last_page_reserved); + + block_start = i; + last_page_reserved = this_page_reserved; + } + + if (block_start != block_end) { + /* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */ + if (nr_pages_found > 0) { + /* the entire zone is homogeneous :) */ + convert_region_to_blocks( + z, + block_start, block_end + VM_PAGE_SIZE - 1, + this_page_reserved); + } + } + } + +#if 0 for_each_mem_range (&it, 0x00, UINTPTR_MAX) { if (it.it_limit + 1 > plimit) { plimit = it.it_limit + 1; @@ -119,7 +169,6 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info) z->z_info.zd_limit = plimit; } - size_t nr_pages_found = 0; for (uintptr_t i = z->z_info.zd_base; i < z->z_info.zd_limit; i += VM_PAGE_SIZE) { struct vm_page *pg = vm_page_get(i); @@ -144,14 +193,7 @@ void vm_zone_init(struct vm_zone *z, const struct vm_zone_descriptor *zone_info) block_start = i; last_page_reserved = this_page_reserved; } - - if (block_start != block_end) { - /* either the entire zone is homogeneous (all free/all reserved) or the entire zone is empty. */ - if (nr_pages_found > 0) { - /* the entire zone is homogeneous :) */ - convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved); - } - } +#endif size_t free_bytes = zone_free_bytes(z); spin_unlock_irqrestore(&z->z_lock, flags);