From 149f49bd28b72486b1df4559eed6f9de1e7af6dc Mon Sep 17 00:00:00 2001 From: Max Wash Date: Sat, 30 Dec 2023 15:29:48 +0000 Subject: [PATCH] vm: limit sparse page init loop to last free page frame when the sector coverage mode is set to free, the loop that initialises the vm_page structs for free and reserved pages is limited to the same upper bound that is used to calculate the sector size and count. --- vm/sparse.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/vm/sparse.c b/vm/sparse.c index 7ef0579..bcd0a45 100644 --- a/vm/sparse.c +++ b/vm/sparse.c @@ -123,7 +123,7 @@ static enum vm_page_order find_minimum_sector_size(phys_addr_t pmem_end) are in need of improvement to ensure that sparse works well on a wide range of systems. */ static void calculate_sector_size_and_count( - size_t last_reserved_pfn, size_t last_free_pfn, + size_t last_reserved_pfn, size_t last_free_pfn, size_t limit_pfn, size_t reserved_size, size_t free_size, unsigned int *out_sector_count, enum vm_page_order *out_sector_size) { @@ -131,17 +131,8 @@ static void calculate_sector_size_and_count( the minimum sector size is what ever is required to cover all of physical memory in the maximum number of sectors */ - enum sector_coverage_mode mode = get_sector_coverage_mode(); - phys_addr_t pmem_end = 0; - - enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn); - if (mode == SECTOR_COVERAGE_FREE) { - pmem_end = last_free_pfn * VM_PAGE_SIZE; - } else { - pmem_end = MAX(last_free_pfn, last_reserved_pfn) * VM_PAGE_SIZE; - } - - sector_size = find_minimum_sector_size(pmem_end); + phys_addr_t pmem_end = limit_pfn * VM_PAGE_SIZE; + enum vm_page_order sector_size = find_minimum_sector_size(pmem_end); if (sector_size <= VM_PAGE_2M) { /* override really small sector sizes with something @@ -218,11 +209,22 @@ void vm_sparse_init(void) } } - enum vm_page_order sector_size; + enum sector_coverage_mode mode = get_sector_coverage_mode(); + phys_addr_t pmem_end = 0; + + enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn); + if (mode == SECTOR_COVERAGE_FREE) { + pmem_end = last_free_pfn * VM_PAGE_SIZE; + } else { + pmem_end = MAX(last_free_pfn, last_reserved_pfn) * VM_PAGE_SIZE; + } + + printk("vm: last_pfn=0x%lx", pmem_end / VM_PAGE_SIZE); + size_t sector_bytes = 0; unsigned int nr_sectors = 0; calculate_sector_size_and_count( - last_reserved_pfn, last_free_pfn, + last_reserved_pfn, last_free_pfn, pmem_end / VM_PAGE_SIZE, reserved_size, free_size, &nr_sectors, §or_size); sector_bytes = vm_page_order_to_bytes(sector_size); @@ -241,7 +243,7 @@ void vm_sparse_init(void) size_t s, i; phys_addr_to_sector_and_index(0x3f00000, &s, &i); - for_each_free_mem_range(&it, 0x0, UINTPTR_MAX) { + for_each_free_mem_range(&it, 0x0, pmem_end) { if (it.it_base & VM_PAGE_MASK) { it.it_base &= ~VM_PAGE_MASK; it.it_base += VM_PAGE_SIZE; @@ -253,7 +255,7 @@ void vm_sparse_init(void) } } - for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX) { + for_each_reserved_mem_range(&it, 0x0, pmem_end) { if (it.it_base & VM_PAGE_MASK) { it.it_base &= ~VM_PAGE_MASK; it.it_base += VM_PAGE_SIZE;