kernel: don't use typedef for enums or non-opaque structs
This commit is contained in:
36
vm/sparse.c
36
vm/sparse.c
@@ -28,10 +28,10 @@
|
||||
#include <socks/util.h>
|
||||
#include <socks/machine/cpu.h>
|
||||
|
||||
static vm_sector_t *sector_array = NULL;
|
||||
static struct vm_sector *sector_array = NULL;
|
||||
static size_t sector_array_count = 0;
|
||||
|
||||
static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
|
||||
static struct vm_sector *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sector_id, size_t *index)
|
||||
{
|
||||
/* all sectors have the same size */
|
||||
size_t step = vm_page_order_to_bytes(sector_array[0].s_size);
|
||||
@@ -52,16 +52,16 @@ static vm_sector_t *phys_addr_to_sector_and_index(phys_addr_t addr, size_t *sect
|
||||
return §or_array[sector];
|
||||
}
|
||||
|
||||
static vm_page_t *get_or_create_page(phys_addr_t addr)
|
||||
static struct vm_page *get_or_create_page(phys_addr_t addr)
|
||||
{
|
||||
size_t sector_number, page_number;
|
||||
phys_addr_to_sector_and_index(addr, §or_number, &page_number);
|
||||
|
||||
vm_sector_t *sector = §or_array[sector_number];
|
||||
struct vm_sector *sector = §or_array[sector_number];
|
||||
|
||||
if (!sector->s_pages) {
|
||||
size_t nr_pages = vm_page_order_to_pages(sector->s_size);
|
||||
sector->s_pages = kzalloc(nr_pages * sizeof(vm_page_t), 0);
|
||||
sector->s_pages = kzalloc(nr_pages * sizeof(struct vm_page), 0);
|
||||
|
||||
for (size_t i = 0; i < nr_pages; i++) {
|
||||
sector->s_pages[i].p_flags = VM_PAGE_RESERVED;
|
||||
@@ -73,9 +73,9 @@ static vm_page_t *get_or_create_page(phys_addr_t addr)
|
||||
return §or->s_pages[page_number];
|
||||
}
|
||||
|
||||
static vm_page_order_t find_minimum_sector_size(size_t pmem_size)
|
||||
static enum vm_page_order find_minimum_sector_size(size_t pmem_size)
|
||||
{
|
||||
for (vm_page_order_t i = VM_PAGE_4K; i < VM_PAGE_64G; i++) {
|
||||
for (enum vm_page_order i = VM_PAGE_4K; i < VM_PAGE_64G; i++) {
|
||||
size_t order_bytes = vm_page_order_to_bytes(i);
|
||||
if (order_bytes * VM_MAX_SECTORS >= pmem_size) {
|
||||
return i;
|
||||
@@ -93,12 +93,12 @@ static vm_page_order_t find_minimum_sector_size(size_t pmem_size)
|
||||
this function uses some heuristics and thresholds that are untested and
|
||||
are in need of improvement to ensure that sparse works well on a wide
|
||||
range of systems. */
|
||||
static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, vm_page_order_t *out_sector_size)
|
||||
static void calculate_sector_size_and_count(size_t pmem_size, size_t reserved_size, unsigned int *out_sector_count, enum vm_page_order *out_sector_size)
|
||||
{
|
||||
/* we can support up to VM_MAX_SECTORS memory sectors.
|
||||
the minimum sector size is what ever is required
|
||||
to cover all of physical memory in the maximum number of sectors */
|
||||
vm_page_order_t sector_size = find_minimum_sector_size(pmem_size);
|
||||
enum vm_page_order sector_size = find_minimum_sector_size(pmem_size);
|
||||
|
||||
if (sector_size <= VM_PAGE_2M) {
|
||||
/* override really small sector sizes with something
|
||||
@@ -148,7 +148,7 @@ void vm_sparse_init(void)
|
||||
{
|
||||
size_t pmem_size = 0, reserved_size = 0;
|
||||
|
||||
memblock_iter_t it;
|
||||
struct memblock_iter it;
|
||||
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
|
||||
if (pmem_size < it.it_limit + 1) {
|
||||
pmem_size = it.it_limit + 1;
|
||||
@@ -159,7 +159,7 @@ void vm_sparse_init(void)
|
||||
reserved_size += it.it_limit - it.it_base + 1;
|
||||
}
|
||||
|
||||
vm_page_order_t sector_size;
|
||||
enum vm_page_order sector_size;
|
||||
size_t sector_bytes = 0;
|
||||
unsigned int nr_sectors = 0;
|
||||
calculate_sector_size_and_count(pmem_size, reserved_size, &nr_sectors, §or_size);
|
||||
@@ -168,7 +168,7 @@ void vm_sparse_init(void)
|
||||
char sector_size_str[64];
|
||||
data_size_to_string(sector_bytes, sector_size_str, sizeof sector_size_str);
|
||||
|
||||
sector_array = kzalloc(sizeof(vm_sector_t) * nr_sectors, 0);
|
||||
sector_array = kzalloc(sizeof(struct vm_sector) * nr_sectors, 0);
|
||||
sector_array_count = nr_sectors;
|
||||
|
||||
for (unsigned int i = 0; i < nr_sectors; i++) {
|
||||
@@ -186,7 +186,7 @@ void vm_sparse_init(void)
|
||||
}
|
||||
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = get_or_create_page(i);
|
||||
struct vm_page *pg = get_or_create_page(i);
|
||||
pg->p_flags = 0;
|
||||
}
|
||||
}
|
||||
@@ -198,7 +198,7 @@ void vm_sparse_init(void)
|
||||
}
|
||||
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
struct vm_page *pg = vm_page_get(i);
|
||||
|
||||
if (!pg) {
|
||||
/* if the page doesn't exist, it is part of a sector
|
||||
@@ -214,7 +214,7 @@ void vm_sparse_init(void)
|
||||
printk("vm: [sparse] initialised %zu sectors of size %s", nr_sectors, sector_size_str);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
struct vm_page *vm_page_get_sparse(phys_addr_t addr)
|
||||
{
|
||||
size_t sector_number, page_number;
|
||||
phys_addr_to_sector_and_index(addr, §or_number, &page_number);
|
||||
@@ -222,7 +222,7 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vm_sector_t *sector = §or_array[sector_number];
|
||||
struct vm_sector *sector = §or_array[sector_number];
|
||||
|
||||
if (!sector->s_pages || page_number >= vm_page_order_to_pages(sector->s_size)) {
|
||||
return NULL;
|
||||
@@ -231,8 +231,8 @@ vm_page_t *vm_page_get_sparse(phys_addr_t addr)
|
||||
return §or->s_pages[page_number];
|
||||
}
|
||||
|
||||
size_t vm_page_get_pfn_sparse(vm_page_t *pg)
|
||||
size_t vm_page_get_pfn_sparse(struct vm_page *pg)
|
||||
{
|
||||
vm_sector_t *sector = §or_array[pg->p_sector];
|
||||
struct vm_sector *sector = §or_array[pg->p_sector];
|
||||
return sector->s_first_pfn + (((uintptr_t)pg - (uintptr_t)sector->s_pages) / sizeof *pg);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user