vm: sparse sector map now extends to cover only all free pages by default

by default, the sector map created under the sparse model now only extends to the last non-reserved page frame, any reserved page frames afterwards are ignored.
This commit is contained in:
2023-12-29 19:53:31 +00:00
parent 36c7f3bbeb
commit 1c5c256c89

View File

@@ -23,6 +23,7 @@
overhead, and the flat memory model may be a better choice.
*/
#include <socks/vm.h>
#include <socks/arg.h>
#include <socks/printk.h>
#include <socks/panic.h>
#include <socks/memblock.h>
@@ -32,6 +33,30 @@
static struct vm_sector *sector_array = NULL;
static size_t sector_array_count = 0;
enum sector_coverage_mode {
SECTOR_COVERAGE_FREE,
SECTOR_COVERAGE_ALL,
};
static enum sector_coverage_mode get_sector_coverage_mode(void)
{
const char *arg = arg_value("vm.sector-coverage-mode");
if (!arg) {
return SECTOR_COVERAGE_FREE;
}
if (!strcmp(arg, "free")) {
return SECTOR_COVERAGE_FREE;
}
if (!strcmp(arg, "all")) {
return SECTOR_COVERAGE_ALL;
}
printk("vm: [sparse] ignoring unknown sector coverage mode '%s', using FREE", arg);
return SECTOR_COVERAGE_FREE;
}
static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
{
/* all sectors have the same size */
@@ -98,13 +123,25 @@ static enum vm_page_order find_minimum_sector_size(phys_addr_t pmem_end)
are in need of improvement to ensure that sparse works well on a wide
range of systems. */
static void calculate_sector_size_and_count(
phys_addr_t pmem_end, size_t reserved_size, size_t free_size,
size_t last_reserved_pfn, size_t last_free_pfn,
size_t reserved_size, size_t free_size,
unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
{
/* we can support up to VM_MAX_SECTORS memory sectors.
the minimum sector size is what ever is required
to cover all of physical memory in the maximum number of sectors */
enum vm_page_order sector_size = find_minimum_sector_size(pmem_end);
enum sector_coverage_mode mode = get_sector_coverage_mode();
phys_addr_t pmem_end = 0;
enum vm_page_order sector_size = find_minimum_sector_size(last_free_pfn);
if (mode == SECTOR_COVERAGE_FREE) {
pmem_end = last_free_pfn * VM_PAGE_SIZE;
} else {
pmem_end = MAX(last_free_pfn, last_reserved_pfn) * VM_PAGE_SIZE;
}
sector_size = find_minimum_sector_size(pmem_end);
if (sector_size <= VM_PAGE_2M) {
/* override really small sector sizes with something
@@ -152,6 +189,7 @@ static void calculate_sector_size_and_count(
void vm_sparse_init(void)
{
size_t pmem_limit = 0, reserved_size = 0, free_size = 0;
size_t last_reserved_pfn = 0, last_free_pfn = 0;
struct memblock_iter it;
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
@@ -162,17 +200,30 @@ void vm_sparse_init(void)
for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) {
free_size += it.it_limit - it.it_base + 1;
size_t last_pfn = it.it_limit / VM_PAGE_SIZE;
if (last_pfn > last_free_pfn) {
last_free_pfn = last_pfn;
}
}
for_each_reserved_mem_range (&it, 0x0, UINTPTR_MAX) {
reserved_size += it.it_limit - it.it_base + 1;
size_t last_pfn = it.it_limit / VM_PAGE_SIZE;
if (last_pfn > last_reserved_pfn) {
last_reserved_pfn = last_pfn;
}
}
enum vm_page_order sector_size;
size_t sector_bytes = 0;
unsigned int nr_sectors = 0;
calculate_sector_size_and_count(
pmem_limit, reserved_size, free_size,
last_reserved_pfn, last_free_pfn,
reserved_size, free_size,
&nr_sectors, &sector_size);
sector_bytes = vm_page_order_to_bytes(sector_size);