2022-12-29 19:22:16 +00:00
|
|
|
#ifndef SOCKS_VM_H_
|
|
|
|
|
#define SOCKS_VM_H_
|
|
|
|
|
|
2023-01-08 12:13:59 +00:00
|
|
|
#include <stddef.h>
|
2022-12-29 19:22:16 +00:00
|
|
|
#include <socks/types.h>
|
2023-01-08 12:13:59 +00:00
|
|
|
#include <socks/status.h>
|
2023-01-19 20:52:56 +00:00
|
|
|
#include <socks/queue.h>
|
2023-02-02 21:14:02 +00:00
|
|
|
#include <socks/locks.h>
|
2023-02-05 10:28:07 +00:00
|
|
|
#include <socks/machine/vm.h>
|
2023-01-19 20:52:56 +00:00
|
|
|
|
2023-02-01 12:26:49 +00:00
|
|
|
/* maximum number of NUMA nodes */
|
|
|
|
|
#define VM_MAX_NODES 64
|
|
|
|
|
/* maximum number of memory zones per node */
|
|
|
|
|
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
|
|
|
|
|
/* maximum number of supported page orders */
|
|
|
|
|
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
|
|
|
|
|
|
2023-01-29 11:03:53 +00:00
|
|
|
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
|
|
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
|
2023-02-01 17:05:14 +00:00
|
|
|
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-02-01 17:04:20 +00:00
|
|
|
#define vm_page_foreach(pg, i) \
|
|
|
|
|
for (vm_page_t *i = (pg); i; i = vm_page_get_next_tail(i))
|
|
|
|
|
|
2023-01-29 11:03:53 +00:00
|
|
|
typedef phys_addr_t vm_alignment_t;
|
2023-02-01 12:26:49 +00:00
|
|
|
typedef unsigned int vm_node_id_t;
|
2022-12-29 19:22:16 +00:00
|
|
|
|
2023-01-28 19:24:28 +00:00
|
|
|
typedef struct vm_object {
|
|
|
|
|
unsigned int reserved;
|
|
|
|
|
} vm_object_t;
|
|
|
|
|
|
2023-02-01 12:26:49 +00:00
|
|
|
typedef enum vm_flags {
|
|
|
|
|
VM_GET_DMA = 0x01u,
|
|
|
|
|
} vm_flags_t;
|
|
|
|
|
|
2022-12-29 19:22:16 +00:00
|
|
|
typedef enum vm_zone_id {
|
2023-02-01 12:26:49 +00:00
|
|
|
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
|
|
|
|
|
they need to be continuous, and must start at 0! */
|
2023-01-29 20:09:15 +00:00
|
|
|
VM_ZONE_DMA = 0u,
|
|
|
|
|
VM_ZONE_NORMAL = 1u,
|
|
|
|
|
VM_ZONE_HIGHMEM = 2u,
|
|
|
|
|
VM_ZONE_MIN = VM_ZONE_DMA,
|
|
|
|
|
VM_ZONE_MAX = VM_ZONE_HIGHMEM,
|
2022-12-29 19:22:16 +00:00
|
|
|
} vm_zone_id_t;
|
|
|
|
|
|
2023-01-28 19:24:28 +00:00
|
|
|
typedef enum vm_page_order {
|
|
|
|
|
VM_PAGE_4K = 0u,
|
|
|
|
|
VM_PAGE_8K,
|
|
|
|
|
VM_PAGE_16K,
|
|
|
|
|
VM_PAGE_32K,
|
|
|
|
|
VM_PAGE_64K,
|
|
|
|
|
VM_PAGE_128K,
|
|
|
|
|
VM_PAGE_256K,
|
|
|
|
|
VM_PAGE_512K,
|
|
|
|
|
VM_PAGE_1M,
|
|
|
|
|
VM_PAGE_2M,
|
|
|
|
|
VM_PAGE_4M,
|
|
|
|
|
VM_PAGE_8M,
|
|
|
|
|
VM_PAGE_16M,
|
|
|
|
|
VM_PAGE_32M,
|
|
|
|
|
VM_PAGE_64M,
|
|
|
|
|
VM_PAGE_128M,
|
2023-02-02 16:54:48 +00:00
|
|
|
#if 0
|
|
|
|
|
/* vm_page_t only has 4 bits to store the page order with.
|
|
|
|
|
the maximum order that can be stored in 4 bits is 15 (VM_PAGE_128M)
|
|
|
|
|
to use any of the page orders listed here, this field
|
|
|
|
|
will have to be expanded. */
|
2023-01-28 19:24:28 +00:00
|
|
|
VM_PAGE_256M,
|
|
|
|
|
VM_PAGE_512M,
|
|
|
|
|
VM_PAGE_1G,
|
2023-02-02 16:54:48 +00:00
|
|
|
#endif
|
2023-01-28 19:24:28 +00:00
|
|
|
} vm_page_order_t;
|
|
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
typedef enum vm_page_flags {
|
|
|
|
|
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
|
|
|
|
|
returned by any allocation function */
|
|
|
|
|
VM_PAGE_RESERVED = 0x01u,
|
|
|
|
|
/* page has been allocated by a zone's buddy allocator, and is in-use */
|
|
|
|
|
VM_PAGE_ALLOC = 0x02u,
|
|
|
|
|
/* page is the first page of a huge-page */
|
|
|
|
|
VM_PAGE_HEAD = 0x04u,
|
|
|
|
|
/* page is part of a huge-page */
|
|
|
|
|
VM_PAGE_HUGE = 0x08u,
|
|
|
|
|
} vm_page_flags_t;
|
|
|
|
|
|
2022-12-29 19:22:16 +00:00
|
|
|
typedef enum vm_memory_region_status {
|
|
|
|
|
VM_REGION_FREE = 0x01u,
|
|
|
|
|
VM_REGION_RESERVED = 0x02u,
|
|
|
|
|
} vm_memory_region_status_t;
|
|
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
typedef enum vm_cache_flags {
|
|
|
|
|
VM_CACHE_OFFSLAB = 0x01u,
|
|
|
|
|
VM_CACHE_DMA = 0x02u
|
|
|
|
|
} vm_cache_flags_t;
|
|
|
|
|
|
2022-12-29 19:22:16 +00:00
|
|
|
typedef struct vm_zone_descriptor {
|
|
|
|
|
vm_zone_id_t zd_id;
|
2023-02-01 12:26:49 +00:00
|
|
|
vm_node_id_t zd_node;
|
2023-01-29 20:10:15 +00:00
|
|
|
const char zd_name[32];
|
2022-12-29 19:22:16 +00:00
|
|
|
phys_addr_t zd_base;
|
|
|
|
|
phys_addr_t zd_limit;
|
|
|
|
|
} vm_zone_descriptor_t;
|
|
|
|
|
|
|
|
|
|
typedef struct vm_zone {
|
2023-01-29 20:10:15 +00:00
|
|
|
vm_zone_descriptor_t z_info;
|
2023-02-02 21:14:02 +00:00
|
|
|
spin_lock_t z_lock;
|
2023-01-28 19:24:28 +00:00
|
|
|
|
2023-02-01 12:26:49 +00:00
|
|
|
queue_t z_free_pages[VM_MAX_PAGE_ORDERS];
|
2023-01-28 19:24:28 +00:00
|
|
|
unsigned long z_size;
|
2022-12-29 19:22:16 +00:00
|
|
|
} vm_zone_t;
|
|
|
|
|
|
2023-01-08 12:13:59 +00:00
|
|
|
typedef struct vm_pg_data {
|
2023-02-01 12:26:49 +00:00
|
|
|
vm_zone_t pg_zones[VM_MAX_ZONES];
|
2023-01-08 12:13:59 +00:00
|
|
|
} vm_pg_data_t;
|
|
|
|
|
|
2022-12-29 19:22:16 +00:00
|
|
|
typedef struct vm_region {
|
|
|
|
|
vm_memory_region_status_t r_status;
|
|
|
|
|
phys_addr_t r_base;
|
|
|
|
|
phys_addr_t r_limit;
|
|
|
|
|
} vm_region_t;
|
|
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
typedef struct vm_cache {
|
|
|
|
|
const char *c_name;
|
|
|
|
|
vm_cache_flags_t c_flags;
|
|
|
|
|
queue_entry_t c_list;
|
|
|
|
|
|
|
|
|
|
queue_t c_slabs_full;
|
|
|
|
|
queue_t c_slabs_partial;
|
|
|
|
|
queue_t c_slabs_empty;
|
|
|
|
|
|
2023-02-02 21:14:02 +00:00
|
|
|
spin_lock_t c_lock;
|
|
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
/* number of objects that can be stored in a single slab */
|
|
|
|
|
unsigned int c_obj_count;
|
|
|
|
|
/* the size of object kept in the cache */
|
|
|
|
|
unsigned int c_obj_size;
|
|
|
|
|
/* combined size of vm_slab_t and the freelist */
|
|
|
|
|
unsigned int c_hdr_size;
|
|
|
|
|
/* offset from one object to the next in a slab.
|
|
|
|
|
this may be different from c_obj_size as
|
|
|
|
|
we enforce a 16-byte alignment on allocated objects */
|
|
|
|
|
unsigned int c_stride;
|
|
|
|
|
/* size of page used for slabs */
|
|
|
|
|
unsigned int c_page_order;
|
|
|
|
|
} vm_cache_t;
|
|
|
|
|
|
|
|
|
|
typedef struct vm_slab {
|
|
|
|
|
vm_cache_t *s_cache;
|
|
|
|
|
/* queue entry for vm_cache_t.c_slabs_* */
|
|
|
|
|
queue_entry_t s_list;
|
|
|
|
|
/* pointer to the first object slot. */
|
|
|
|
|
void *s_objects;
|
|
|
|
|
/* the number of objects allocated on the slab. */
|
|
|
|
|
unsigned int s_obj_allocated;
|
|
|
|
|
/* the index of the next free object.
|
|
|
|
|
if s_free is equal to FREELIST_END (defined in vm/cache.c)
|
|
|
|
|
there are no free slots left in the slab. */
|
|
|
|
|
unsigned int s_free;
|
|
|
|
|
/* list of free object slots.
|
|
|
|
|
when allocating:
|
|
|
|
|
- s_free should be set to the value of s_freelist[s_free]
|
|
|
|
|
when freeing:
|
|
|
|
|
- s_free should be set to the index of the object being freed.
|
|
|
|
|
- s_freelist[s_free] should be set to the previous value of s_free.
|
|
|
|
|
*/
|
|
|
|
|
unsigned int s_freelist[];
|
|
|
|
|
} vm_slab_t;
|
2022-12-29 19:22:16 +00:00
|
|
|
|
|
|
|
|
typedef struct vm_page {
|
2023-02-02 16:54:48 +00:00
|
|
|
/* order of the page block that this page belongs too */
|
|
|
|
|
uint16_t p_order : 4;
|
2023-02-01 12:26:49 +00:00
|
|
|
/* the id of the NUMA node that this page belongs to */
|
2023-02-02 16:54:48 +00:00
|
|
|
uint16_t p_node : 6;
|
2023-02-01 12:26:49 +00:00
|
|
|
/* the id of the memory zone that this page belongs to */
|
2023-02-02 16:54:48 +00:00
|
|
|
uint16_t p_zone : 3;
|
|
|
|
|
/* some unused bits */
|
|
|
|
|
uint16_t p_reserved : 3;
|
|
|
|
|
|
2023-02-01 12:26:49 +00:00
|
|
|
/* vm_page_flags_t bitfields. */
|
2023-02-02 16:54:48 +00:00
|
|
|
uint32_t p_flags;
|
2023-01-19 20:52:56 +00:00
|
|
|
|
2023-02-02 16:54:48 +00:00
|
|
|
/* multi-purpose list.
|
|
|
|
|
the owner of the page can decide what to do with this.
|
|
|
|
|
some examples:
|
|
|
|
|
- the buddy allocator uses this to maintain its per-zone free-page lists.
|
|
|
|
|
*/
|
2023-02-02 16:58:24 +00:00
|
|
|
queue_entry_t p_list;
|
2023-01-19 20:52:56 +00:00
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
/* owner-specific data */
|
|
|
|
|
union {
|
|
|
|
|
struct {
|
|
|
|
|
vm_slab_t *p_slab;
|
|
|
|
|
};
|
|
|
|
|
};
|
|
|
|
|
|
2023-01-28 19:24:28 +00:00
|
|
|
} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t;
|
|
|
|
|
|
|
|
|
|
extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones);
|
2022-12-29 19:22:16 +00:00
|
|
|
|
2023-02-01 12:26:49 +00:00
|
|
|
extern vm_pg_data_t *vm_pg_data_get(vm_node_id_t node);
|
2023-01-29 20:10:15 +00:00
|
|
|
|
2023-02-02 21:06:04 +00:00
|
|
|
extern phys_addr_t vm_virt_to_phys(void *p);
|
|
|
|
|
|
2023-01-28 19:24:28 +00:00
|
|
|
extern void vm_page_init_array();
|
|
|
|
|
extern vm_page_t *vm_page_get(phys_addr_t addr);
|
2023-01-29 20:09:15 +00:00
|
|
|
extern phys_addr_t vm_page_get_paddr(vm_page_t *pg);
|
|
|
|
|
extern vm_zone_t *vm_page_get_zone(vm_page_t *pg);
|
|
|
|
|
extern void *vm_page_get_vaddr(vm_page_t *pg);
|
|
|
|
|
extern size_t vm_page_get_pfn(vm_page_t *pg);
|
2023-01-28 19:24:28 +00:00
|
|
|
extern size_t vm_page_order_to_bytes(vm_page_order_t order);
|
2023-01-29 11:03:53 +00:00
|
|
|
extern size_t vm_page_order_to_pages(vm_page_order_t order);
|
|
|
|
|
extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order);
|
2023-02-01 12:26:49 +00:00
|
|
|
extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags);
|
|
|
|
|
extern void vm_page_free(vm_page_t *pg);
|
2023-01-29 11:03:53 +00:00
|
|
|
|
2023-02-01 15:03:42 +00:00
|
|
|
extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b);
|
|
|
|
|
extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b);
|
|
|
|
|
extern vm_page_t *vm_page_get_buddy(vm_page_t *pg);
|
2023-02-01 17:04:20 +00:00
|
|
|
extern vm_page_t *vm_page_get_next_tail(vm_page_t *pg);
|
2023-02-01 15:03:42 +00:00
|
|
|
|
2023-01-29 11:03:53 +00:00
|
|
|
extern size_t vm_bytes_to_pages(size_t bytes);
|
2022-12-29 19:22:16 +00:00
|
|
|
|
2023-01-29 20:10:15 +00:00
|
|
|
extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
|
2023-02-01 12:26:49 +00:00
|
|
|
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
|
2023-02-01 15:03:42 +00:00
|
|
|
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
|
2022-12-29 19:22:16 +00:00
|
|
|
|
2023-02-02 16:58:48 +00:00
|
|
|
extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags);
|
|
|
|
|
extern void vm_cache_init(vm_cache_t *cache);
|
|
|
|
|
extern void vm_cache_destroy(vm_cache_t *cache);
|
|
|
|
|
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
|
|
|
|
|
extern void vm_cache_free(vm_cache_t *cache, void *p);
|
|
|
|
|
|
|
|
|
|
extern void *kmalloc(size_t count, vm_flags_t flags);
|
|
|
|
|
extern void *kzalloc(size_t count, vm_flags_t flags);
|
|
|
|
|
extern void kfree(void *p);
|
|
|
|
|
|
2022-12-29 19:22:16 +00:00
|
|
|
#endif
|