vm: limit sparse page init loop to last free page frame
when the sector coverage mode is set to free, the loop that initialises the vm_page structs for free and reserved pages is limited to the same upper bound that is used to calculate the sector size and count.
This commit is contained in:
34
vm/sparse.c
34
vm/sparse.c
@@ -123,7 +123,7 @@ static enum vm_page_order find_minimum_sector_size(phys_addr_t pmem_end)
|
|||||||
are in need of improvement to ensure that sparse works well on a wide
|
are in need of improvement to ensure that sparse works well on a wide
|
||||||
range of systems. */
|
range of systems. */
|
||||||
static void calculate_sector_size_and_count(
|
static void calculate_sector_size_and_count(
|
||||||
size_t last_reserved_pfn, size_t last_free_pfn,
|
size_t last_reserved_pfn, size_t last_free_pfn, size_t limit_pfn,
|
||||||
size_t reserved_size, size_t free_size,
|
size_t reserved_size, size_t free_size,
|
||||||
unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
|
unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
|
||||||
{
|
{
|
||||||
@@ -131,17 +131,8 @@ static void calculate_sector_size_and_count(
|
|||||||
the minimum sector size is what ever is required
|
the minimum sector size is what ever is required
|
||||||
to cover all of physical memory in the maximum number of sectors */
|
to cover all of physical memory in the maximum number of sectors */
|
||||||
|
|
||||||
enum sector_coverage_mode mode = get_sector_coverage_mode();
|
phys_addr_t pmem_end = limit_pfn * VM_PAGE_SIZE;
|
||||||
phys_addr_t pmem_end = 0;
|
enum vm_page_order sector_size = find_minimum_sector_size(pmem_end);
|
||||||
|
|
||||||
enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn);
|
|
||||||
if (mode == SECTOR_COVERAGE_FREE) {
|
|
||||||
pmem_end = last_free_pfn * VM_PAGE_SIZE;
|
|
||||||
} else {
|
|
||||||
pmem_end = MAX(last_free_pfn, last_reserved_pfn) * VM_PAGE_SIZE;
|
|
||||||
}
|
|
||||||
|
|
||||||
sector_size = find_minimum_sector_size(pmem_end);
|
|
||||||
|
|
||||||
if (sector_size <= VM_PAGE_2M) {
|
if (sector_size <= VM_PAGE_2M) {
|
||||||
/* override really small sector sizes with something
|
/* override really small sector sizes with something
|
||||||
@@ -218,11 +209,22 @@ void vm_sparse_init(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum vm_page_order sector_size;
|
enum sector_coverage_mode mode = get_sector_coverage_mode();
|
||||||
|
phys_addr_t pmem_end = 0;
|
||||||
|
|
||||||
|
enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn);
|
||||||
|
if (mode == SECTOR_COVERAGE_FREE) {
|
||||||
|
pmem_end = last_free_pfn * VM_PAGE_SIZE;
|
||||||
|
} else {
|
||||||
|
pmem_end = MAX(last_free_pfn, last_reserved_pfn) * VM_PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
printk("vm: last_pfn=0x%lx", pmem_end / VM_PAGE_SIZE);
|
||||||
|
|
||||||
size_t sector_bytes = 0;
|
size_t sector_bytes = 0;
|
||||||
unsigned int nr_sectors = 0;
|
unsigned int nr_sectors = 0;
|
||||||
calculate_sector_size_and_count(
|
calculate_sector_size_and_count(
|
||||||
last_reserved_pfn, last_free_pfn,
|
last_reserved_pfn, last_free_pfn, pmem_end / VM_PAGE_SIZE,
|
||||||
reserved_size, free_size,
|
reserved_size, free_size,
|
||||||
&nr_sectors, §or_size);
|
&nr_sectors, §or_size);
|
||||||
sector_bytes = vm_page_order_to_bytes(sector_size);
|
sector_bytes = vm_page_order_to_bytes(sector_size);
|
||||||
@@ -241,7 +243,7 @@ void vm_sparse_init(void)
|
|||||||
size_t s, i;
|
size_t s, i;
|
||||||
phys_addr_to_sector_and_index(0x3f00000, &s, &i);
|
phys_addr_to_sector_and_index(0x3f00000, &s, &i);
|
||||||
|
|
||||||
for_each_free_mem_range(&it, 0x0, UINTPTR_MAX) {
|
for_each_free_mem_range(&it, 0x0, pmem_end) {
|
||||||
if (it.it_base & VM_PAGE_MASK) {
|
if (it.it_base & VM_PAGE_MASK) {
|
||||||
it.it_base &= ~VM_PAGE_MASK;
|
it.it_base &= ~VM_PAGE_MASK;
|
||||||
it.it_base += VM_PAGE_SIZE;
|
it.it_base += VM_PAGE_SIZE;
|
||||||
@@ -253,7 +255,7 @@ void vm_sparse_init(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX) {
|
for_each_reserved_mem_range(&it, 0x0, pmem_end) {
|
||||||
if (it.it_base & VM_PAGE_MASK) {
|
if (it.it_base & VM_PAGE_MASK) {
|
||||||
it.it_base &= ~VM_PAGE_MASK;
|
it.it_base &= ~VM_PAGE_MASK;
|
||||||
it.it_base += VM_PAGE_SIZE;
|
it.it_base += VM_PAGE_SIZE;
|
||||||
|
|||||||
Reference in New Issue
Block a user