memblock can now self re-allocate its internal buffers

memory allocated by memblock_alloc() can now be limited to a certain region.
This commit is contained in:
2023-01-08 12:13:59 +00:00
parent 02211e6eba
commit 0d77d97561
6 changed files with 125 additions and 12 deletions

View File

@@ -1,19 +1,16 @@
#ifndef SOCKS_VM_H_
#define SOCKS_VM_H_
#include <stddef.h>
#include <socks/types.h>
#include <socks/status.h>
/*******************************************
* Beginning of machine-specific definitions
*******************************************/
typedef enum vm_zone_id {
VM_ZONE_DMA = 0x01u,
VM_ZONE_NORMAL = 0x02u,
VM_ZONE_HIGHMEM = 0x03u,
VM_ZONE_DMA = 1u,
VM_ZONE_NORMAL = 2u,
VM_ZONE_HIGHMEM = 3u,
VM_ZONE_COUNT
} vm_zone_id_t;
/*******************************************
* End of machine-specific definitions.
*******************************************/
typedef enum vm_memory_region_status {
VM_REGION_FREE = 0x01u,
@@ -30,6 +27,10 @@ typedef struct vm_zone {
unsigned z_reserved[32];
} vm_zone_t;
typedef struct vm_pg_data {
vm_zone_t pg_zones[VM_ZONE_COUNT];
} vm_pg_data_t;
typedef struct vm_region {
vm_memory_region_status_t r_status;
phys_addr_t r_base;
@@ -44,7 +45,7 @@ typedef struct vm_page {
uint32_t p_flags; /* vm_page_flags_t bitfield */
} __attribute__((packed)) vm_page_t;
extern void vm_bootstrap(void);
extern kern_status_t vm_bootstrap(const vm_region_t *mem_map, size_t nr_mem_map_entries);
extern void vm_page_init(vm_page_t *pg);

34
sandbox/vm/vm_bootstrap.c Normal file
View File

@@ -0,0 +1,34 @@
#include <limits.h>
#include <socks/vm.h>
#include <socks/memblock.h>
#include <stddef.h>
#include <limits.h>
/* One vm_pg_data_t per NUMA node. Right now we're only worrying about a single node */
static vm_pg_data_t node_data = {};
kern_status_t vm_bootstrap(const vm_region_t *mem_map, size_t nr_mem_map_entries)
{
uintptr_t pmap_min = UINTPTR_MAX, pmap_max = 0x0;
for (size_t i = 0; i < nr_mem_map_entries; i++) {
if (mem_map[i].r_base < pmap_min) {
pmap_min = mem_map[i].r_base;
}
if (mem_map[i].r_limit > pmap_max) {
pmap_max = mem_map[i].r_limit;
}
}
memblock_add(pmap_min, pmap_max);
for (size_t i = 0; i < nr_mem_map_entries; i++) {
if (mem_map[i].r_status == VM_REGION_RESERVED) {
memblock_reserve(mem_map[i].r_base, mem_map[i].r_limit);
}
}
return KERN_OK;
}