372 lines
10 KiB
C
372 lines
10 KiB
C
#include "socks/types.h"
|
|
#include <stdio.h>
|
|
#include <stdbool.h>
|
|
#include <limits.h>
|
|
#include <string.h>
|
|
#include <socks/memblock.h>
|
|
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
|
|
#define ITER(a, b) ((uint64_t)(a) | ((uint64_t)(b) << 32))
|
|
#define ITER_END ULLONG_MAX
|
|
#define IDX_A(idx) ((idx) & 0xFFFFFFFF)
|
|
#define IDX_B(idx) (((idx) >> 32) & 0xFFFFFFFF)
|
|
|
|
/* the maximum possible value for a pointer type.
|
|
* Note that any pointers returned by the memblock API will still
|
|
* be bounded by the defined memory regions, and not by this constant. */
|
|
#define ADDR_MAX (~(uintptr_t)0)
|
|
|
|
static memblock_region_t init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
|
|
static memblock_region_t init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
|
|
|
|
static phys_addr_t do_alloc(size_t size);
|
|
|
|
memblock_t memblock = {
|
|
.memory.regions = init_memory_regions,
|
|
.memory.count = 0,
|
|
.memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT,
|
|
.memory.name = "memory",
|
|
|
|
.reserved.regions = init_reserved_regions,
|
|
.reserved.count = 0,
|
|
.reserved.max = MEMBLOCK_INIT_RESERVED_REGION_COUNT,
|
|
.reserved.name = "reserved",
|
|
};
|
|
|
|
static void memblock_double_capacity(memblock_type_t *type)
|
|
{
|
|
size_t new_max = type->max * 2;
|
|
|
|
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(memblock_region_t));
|
|
|
|
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
|
|
memcpy(new_regions, type->regions, type->count * sizeof(memblock_region_t));
|
|
|
|
type->regions = new_regions;
|
|
type->max = new_max;
|
|
}
|
|
|
|
static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_add)
|
|
{
|
|
unsigned int i = 0;
|
|
|
|
for (i = 0; i < type->count; i++) {
|
|
const memblock_region_t *cur = &type->regions[i];
|
|
|
|
if (cur->base >= to_add->limit) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
memblock_region_t *src = &type->regions[i];
|
|
memblock_region_t *dst = &type->regions[i + 1];
|
|
unsigned int count = type->count - i;
|
|
|
|
memmove(dst, src, count * sizeof *src);
|
|
|
|
*src = *to_add;
|
|
type->count++;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int memblock_remove_region(memblock_type_t *type, unsigned int i)
|
|
{
|
|
if (i >= type->count) {
|
|
return -1;
|
|
}
|
|
|
|
memblock_region_t *src = &type->regions[i + 1];
|
|
memblock_region_t *dst = &type->regions[i];
|
|
unsigned int count = type->count - i;
|
|
|
|
memmove(dst, src, count * sizeof *src);
|
|
type->count--;
|
|
return 0;
|
|
}
|
|
|
|
int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
|
|
{
|
|
memblock.m_alloc_start = alloc_start;
|
|
memblock.m_alloc_end =alloc_end;
|
|
memblock.m_voffset = voffset;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, memblock_region_status_t status)
|
|
{
|
|
if (size == 0) {
|
|
return 0;
|
|
}
|
|
|
|
uintptr_t limit = base + size - 1;
|
|
|
|
if (type->count == 0) {
|
|
type->regions[0].base = base;
|
|
type->regions[0].limit = limit;
|
|
type->count++;
|
|
return 0;
|
|
}
|
|
|
|
memblock_region_t new_region = { .base = base, .limit = limit, .status = status };
|
|
|
|
/* two regions with different statuses CANNOT intersect. we first need to check
|
|
* to make sure the region being added doesn't violate this rule. */
|
|
for (unsigned int i = 0; i < type->count; i++) {
|
|
memblock_region_t *cur_region = &type->regions[i];
|
|
|
|
if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) {
|
|
continue;
|
|
}
|
|
|
|
if (cur_region->status == new_region.status) {
|
|
continue;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
bool add_new = true;
|
|
|
|
for (unsigned int i = 0; i < type->count; i++) {
|
|
memblock_region_t *cur_region = &type->regions[i];
|
|
|
|
/* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */
|
|
if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) {
|
|
continue;
|
|
}
|
|
|
|
/* case 2: the region being added matches a region already in the list. */
|
|
if (cur_region->base == new_region.base && cur_region->limit == new_region.limit) {
|
|
/* nothing needs to be done */
|
|
add_new = false;
|
|
break;
|
|
}
|
|
|
|
|
|
/* case 3: the region being added completely contains a region already in the list. */
|
|
if (cur_region->base > new_region.base && cur_region->limit <= new_region.limit) {
|
|
memblock_remove_region(type, i);
|
|
|
|
/* after memblock_remove_region(), a different region will have moved into the array slot referenced by i.
|
|
* decrementing i means we'll stay at the current index and process this region. */
|
|
i--;
|
|
continue;
|
|
}
|
|
|
|
|
|
/* case 4: the region being added meets or partially overlaps a region already in the list. */
|
|
|
|
/* there can be an overlap at the beginning and the end of the region being added,
|
|
* anything else is either a full overlap (case 3) or not within the region being added at all.
|
|
* to handle this, remove the region that's already in the list and extend the region being added to cover it.
|
|
* the two regions may overlap and have incompatible statuses, but this case was handled earlier in this function. */
|
|
if ((new_region.base > cur_region->base || new_region.base == cur_region->limit - 1) && new_region.status == cur_region->status) {
|
|
/* the new region overlaps the END of the current region, change the base of the new region to match that of the current region. */
|
|
new_region.base = cur_region->base;
|
|
} else if ((new_region.base < cur_region->base || new_region.limit + 1 == cur_region->base) && new_region.status == cur_region->status) {
|
|
/* the new region overlaps the BEGINNING of the current region, change the limit of the new region to match that of the current region. */
|
|
new_region.limit = cur_region->limit;
|
|
} else {
|
|
continue;
|
|
}
|
|
|
|
/* with the new region updated to include the current region, we can remove the current region from the list */
|
|
memblock_remove_region(type, i);
|
|
i--;
|
|
}
|
|
|
|
if (add_new) {
|
|
memblock_insert_region(type, &new_region);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int memblock_add(uintptr_t base, size_t size)
|
|
{
|
|
if (memblock.memory.count >= memblock.memory.max - 2) {
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
memblock_double_capacity(&memblock.reserved);
|
|
}
|
|
|
|
memblock_double_capacity(&memblock.memory);
|
|
}
|
|
|
|
return memblock_add_range(&memblock.memory, base, size, MEMBLOCK_MEMORY);
|
|
}
|
|
|
|
int memblock_reserve(uintptr_t base, size_t size)
|
|
{
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
memblock_double_capacity(&memblock.reserved);
|
|
}
|
|
|
|
return memblock_add_range(&memblock.reserved, base, size, MEMBLOCK_RESERVED);
|
|
}
|
|
|
|
static phys_addr_t do_alloc(size_t size)
|
|
{
|
|
phys_addr_t allocated_base = ADDR_MAX;
|
|
|
|
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
|
|
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
|
|
|
|
memblock_iter_t it;
|
|
for_each_free_mem_range (&it, region_start, region_end) {
|
|
size_t region_size = it.it_limit - it.it_base + 1;
|
|
if (region_size >= size) {
|
|
allocated_base = it.it_base;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (allocated_base == ADDR_MAX) {
|
|
return 0;
|
|
}
|
|
|
|
int status = memblock_add_range(&memblock.reserved, allocated_base, size, MEMBLOCK_ALLOC);
|
|
if (status != 0) {
|
|
return 0;
|
|
}
|
|
|
|
return allocated_base;
|
|
}
|
|
|
|
void *memblock_alloc(size_t size)
|
|
{
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
memblock_double_capacity(&memblock.reserved);
|
|
}
|
|
|
|
return (void *)(do_alloc(size) + memblock.m_voffset);
|
|
}
|
|
|
|
phys_addr_t memblock_alloc_phys(size_t size)
|
|
{
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
memblock_double_capacity(&memblock.reserved);
|
|
}
|
|
|
|
return do_alloc(size);
|
|
}
|
|
|
|
int memblock_free(void *p, size_t size)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int memblock_free_phys(phys_addr_t addr, size_t size)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock_type_t *type_b, uintptr_t start, uintptr_t end)
|
|
{
|
|
unsigned int idx_a = IDX_A(it->__idx);
|
|
unsigned int idx_b = IDX_B(it->__idx);
|
|
|
|
for (; idx_a < type_a->count; idx_a++) {
|
|
memblock_region_t *m = &type_a->regions[idx_a];
|
|
|
|
uintptr_t m_start = m->base;
|
|
uintptr_t m_end = m->limit;
|
|
|
|
if (!type_b) {
|
|
it->it_base = m->base;
|
|
it->it_limit = m->limit;
|
|
it->it_status = m->status;
|
|
|
|
it->__idx = ITER(idx_a + 1, idx_b);
|
|
return;
|
|
}
|
|
|
|
if (m_end < start) {
|
|
/* we haven't reached the requested memory range yet */
|
|
continue;
|
|
}
|
|
|
|
if (m_start > end) {
|
|
/* we have gone past the requested memory range and can now stop */
|
|
break;
|
|
}
|
|
|
|
for (; idx_b < type_b->count + 1; idx_b++) {
|
|
memblock_region_t *r = &type_b->regions[idx_b];
|
|
|
|
/* r_start and r_end delimit the region of memory between the current and previous reserved regions.
|
|
* if we have gone past the last reserved region, these variables delimit the range between the end
|
|
* of the last reserved region and the end of memory. */
|
|
uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0;
|
|
uintptr_t r_end;
|
|
|
|
if (idx_b < type_b->count) {
|
|
r_end = r->base;
|
|
|
|
/* we decrement r_end to get the address of the last byte of the free region.
|
|
if r_end is already zero, there is a reserved region starting at address 0x0.
|
|
as long as r_end == r_start == 0x00000, we will skip this region. */
|
|
if (r_end) {
|
|
r_end--;
|
|
}
|
|
} else {
|
|
/* this maximum value will be clamped to the bounds of memblock.memory
|
|
before being returned to the caller */
|
|
r_end = ADDR_MAX;
|
|
}
|
|
|
|
if (r_start >= r_end) {
|
|
/* this free region has a length of zero, move to the next one */
|
|
continue;
|
|
}
|
|
|
|
if (r_start >= m_end) {
|
|
/* we've gone past the end of the current memory region, and need to go to the next one */
|
|
break;
|
|
}
|
|
|
|
/* we've already gone past this free memory region. move to the next one */
|
|
if (m_start >= r_end) {
|
|
continue;
|
|
}
|
|
|
|
/* we want the area that is overlapped by both
|
|
* region M (m_start - m_end) : The region defined as system memory.
|
|
* region R (r_start - r_end) : The region defined as free / outside of any reserved regions.
|
|
*/
|
|
it->it_base = MAX(m_start, r_start);
|
|
it->it_limit = MIN(m_end, r_end);
|
|
|
|
/* further limit the region to the intersection between the region itself and the
|
|
* specified iteration bounds */
|
|
it->it_base = MAX(it->it_base, start);
|
|
it->it_limit = MIN(it->it_limit, end);
|
|
|
|
if (it->it_limit <= it->it_base) {
|
|
/* this region is not part of the specified bounds, skip it. */
|
|
continue;
|
|
}
|
|
|
|
it->it_status = MEMBLOCK_MEMORY;
|
|
|
|
/* whichever region is smaller, increment the pointer for that type, so we can
|
|
* compare the larger region with the next region of the incremented type. */
|
|
if (m_end <= r_end) {
|
|
idx_a++;
|
|
} else {
|
|
idx_b++;
|
|
}
|
|
|
|
/* store the position for the next iteration */
|
|
it->__idx = ITER(idx_a, idx_b);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* ULLONG_MAX signals the end of the iteration */
|
|
it->__idx = ITER_END;
|
|
}
|