Files
mango/vm/memblock.c

429 lines
12 KiB
C
Raw Normal View History

/*
The Clear BSD License
Copyright (c) 2023 Max Wash
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted (subject to the limitations in the disclaimer
below) provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
*/
#include <stdbool.h>
#include <limits.h>
2023-02-03 20:51:23 +00:00
#include <socks/types.h>
#include <socks/libc/string.h>
#include <socks/memblock.h>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define ITER(a, b) ((uint64_t)(a) | ((uint64_t)(b) << 32))
#define ITER_END ULLONG_MAX
#define IDX_A(idx) ((idx) & 0xFFFFFFFF)
#define IDX_B(idx) (((idx) >> 32) & 0xFFFFFFFF)
/* the maximum possible value for a pointer type.
Note that any pointers returned by the memblock API will still
be bounded by the defined memory regions, and not by this constant. */
#define ADDR_MAX (~(uintptr_t)0)
static struct memblock_region init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
static struct memblock_region init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
static phys_addr_t do_alloc(size_t size, phys_addr_t align);
struct memblock memblock = {
.memory.regions = init_memory_regions,
.memory.count = 0,
.memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT,
.memory.name = "memory",
.reserved.regions = init_reserved_regions,
.reserved.count = 0,
.reserved.max = MEMBLOCK_INIT_RESERVED_REGION_COUNT,
.reserved.name = "reserved",
};
static void memblock_double_capacity(struct memblock_type *type)
{
size_t new_max = type->max * 2;
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(struct memblock_region), 8);
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
memcpy(new_regions, type->regions, type->count * sizeof(struct memblock_region));
type->regions = new_regions;
type->max = new_max;
}
static int memblock_insert_region(struct memblock_type *type, struct memblock_region *to_add)
{
unsigned int i = 0;
for (i = 0; i < type->count; i++) {
const struct memblock_region *cur = &type->regions[i];
if (cur->base >= to_add->limit) {
break;
}
}
struct memblock_region *src = &type->regions[i];
struct memblock_region *dst = &type->regions[i + 1];
unsigned int count = type->count - i;
memmove(dst, src, count * sizeof *src);
*src = *to_add;
type->count++;
return 0;
}
static int memblock_remove_region(struct memblock_type *type, unsigned int i)
{
if (i >= type->count) {
return -1;
}
struct memblock_region *src = &type->regions[i + 1];
struct memblock_region *dst = &type->regions[i];
unsigned int count = type->count - i;
memmove(dst, src, count * sizeof *src);
type->count--;
return 0;
}
int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
{
memblock.m_alloc_start = alloc_start;
memblock.m_alloc_end =alloc_end;
memblock.m_voffset = voffset;
return 0;
}
int memblock_add_range(struct memblock_type *type, uintptr_t base, size_t size, enum memblock_region_status status)
{
if (size == 0) {
return 0;
}
uintptr_t limit = base + size - 1;
if (type->count == 0) {
type->regions[0].base = base;
type->regions[0].limit = limit;
type->count++;
return 0;
}
struct memblock_region new_region = { .base = base, .limit = limit, .status = status };
2022-12-29 20:53:39 +00:00
/* two regions with different statuses CANNOT intersect. we first need to check
to make sure the region being added doesn't violate this rule. */
2022-12-29 20:53:39 +00:00
for (unsigned int i = 0; i < type->count; i++) {
struct memblock_region *cur_region = &type->regions[i];
2022-12-29 20:53:39 +00:00
if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) {
continue;
}
if (cur_region->status == new_region.status) {
continue;
}
return -1;
}
bool add_new = true;
for (unsigned int i = 0; i < type->count; i++) {
struct memblock_region *cur_region = &type->regions[i];
/* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */
if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) {
continue;
}
/* case 2: the region being added matches a region already in the list. */
if (cur_region->base == new_region.base && cur_region->limit == new_region.limit) {
/* nothing needs to be done */
add_new = false;
break;
}
/* case 3: the region being added completely contains a region already in the list. */
if (cur_region->base > new_region.base && cur_region->limit <= new_region.limit) {
memblock_remove_region(type, i);
/* after memblock_remove_region(), a different region will have moved into the array slot referenced by i.
decrementing i means we'll stay at the current index and process this region. */
i--;
continue;
}
/* case 4: the region being added meets or partially overlaps a region already in the list. */
/* there can be an overlap at the beginning and the end of the region being added,
anything else is either a full overlap (case 3) or not within the region being added at all.
to handle this, remove the region that's already in the list and extend the region being added to cover it.
the two regions may overlap and have incompatible statuses, but this case was handled earlier in this function. */
2022-12-29 20:53:39 +00:00
if ((new_region.base > cur_region->base || new_region.base == cur_region->limit - 1) && new_region.status == cur_region->status) {
/* the new region overlaps the END of the current region, change the base of the new region to match that of the current region. */
new_region.base = cur_region->base;
2022-12-29 20:53:39 +00:00
} else if ((new_region.base < cur_region->base || new_region.limit + 1 == cur_region->base) && new_region.status == cur_region->status) {
/* the new region overlaps the BEGINNING of the current region, change the limit of the new region to match that of the current region. */
new_region.limit = cur_region->limit;
} else {
continue;
}
/* with the new region updated to include the current region, we can remove the current region from the list */
memblock_remove_region(type, i);
i--;
}
if (add_new) {
memblock_insert_region(type, &new_region);
}
return 0;
}
int memblock_add(uintptr_t base, size_t size)
{
if (memblock.memory.count >= memblock.memory.max - 2) {
if (memblock.reserved.count >= memblock.reserved.max - 2) {
memblock_double_capacity(&memblock.reserved);
}
memblock_double_capacity(&memblock.memory);
}
2022-12-29 20:53:39 +00:00
return memblock_add_range(&memblock.memory, base, size, MEMBLOCK_MEMORY);
}
int memblock_reserve(uintptr_t base, size_t size)
{
if (memblock.reserved.count >= memblock.reserved.max - 2) {
memblock_double_capacity(&memblock.reserved);
}
2022-12-29 20:53:39 +00:00
return memblock_add_range(&memblock.reserved, base, size, MEMBLOCK_RESERVED);
}
static phys_addr_t do_alloc(size_t size, phys_addr_t align)
2022-12-29 20:53:39 +00:00
{
if (!align) {
/* align to 8-byte boundary by default */
align = 0x8;
}
/* the bounds of the memory region to reserve */
phys_addr_t allocated_base = ADDR_MAX, allocated_limit = 0;
/* the address to return to the caller. may be different from
allocated_base depending on alignment requirements. */
phys_addr_t returned_base = ADDR_MAX;
2022-12-29 20:53:39 +00:00
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
struct memblock_iter it;
for_each_free_mem_range (&it, region_start, region_end) {
phys_addr_t base = it.it_base;
if (base & (align - 1)) {
base &= ~(align - 1);
base += align;
}
size_t region_size = 0;
if (it.it_limit > base) {
region_size = it.it_limit - base + 1;
}
2022-12-29 20:53:39 +00:00
if (region_size >= size) {
allocated_base = it.it_base;
allocated_limit = base + size;
returned_base = base;
2022-12-29 20:53:39 +00:00
break;
}
}
if (allocated_base == ADDR_MAX) {
2023-02-03 20:51:23 +00:00
return 0;
2022-12-29 20:53:39 +00:00
}
int status = memblock_add_range(&memblock.reserved, allocated_base, allocated_limit - allocated_base, MEMBLOCK_ALLOC);
2022-12-29 20:53:39 +00:00
if (status != 0) {
return 0;
}
return returned_base;
}
void *memblock_alloc(size_t size, phys_addr_t align)
{
if (memblock.reserved.count >= memblock.reserved.max - 2) {
memblock_double_capacity(&memblock.reserved);
}
phys_addr_t p = do_alloc(size, align);
if (p) {
p += memblock.m_voffset;
}
return (void *)p;
}
phys_addr_t memblock_alloc_phys(size_t size, phys_addr_t align)
{
if (memblock.reserved.count >= memblock.reserved.max - 2) {
memblock_double_capacity(&memblock.reserved);
}
return do_alloc(size, align);
}
int memblock_free(void *p, size_t size)
{
return 0;
}
int memblock_free_phys(phys_addr_t addr, size_t size)
{
return 0;
}
void __next_memory_region(struct memblock_iter *it, struct memblock_type *type_a, struct memblock_type *type_b, uintptr_t start, uintptr_t end)
{
2022-12-29 20:53:39 +00:00
unsigned int idx_a = IDX_A(it->__idx);
unsigned int idx_b = IDX_B(it->__idx);
for (; idx_a < type_a->count; idx_a++) {
struct memblock_region *m = &type_a->regions[idx_a];
uintptr_t m_start = m->base;
uintptr_t m_end = m->limit;
if (!type_b) {
2022-12-29 20:53:39 +00:00
it->it_base = m->base;
it->it_limit = m->limit;
it->it_status = m->status;
2022-12-29 20:53:39 +00:00
it->__idx = ITER(idx_a + 1, idx_b);
return;
}
if (m_end < start) {
/* we haven't reached the requested memory range yet */
continue;
}
if (m_start > end) {
/* we have gone past the requested memory range and can now stop */
break;
}
for (; idx_b < type_b->count + 1; idx_b++) {
struct memblock_region *r = &type_b->regions[idx_b];
/* r_start and r_end delimit the region of memory between the current and previous reserved regions.
if we have gone past the last reserved region, these variables delimit the range between the end
of the last reserved region and the end of memory. */
uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0;
uintptr_t r_end;
if (idx_b < type_b->count) {
r_end = r->base;
/* we decrement r_end to get the address of the last byte of the free region.
if r_end is already zero, there is a reserved region starting at address 0x0.
as long as r_end == r_start == 0x00000, we will skip this region. */
if (r_end) {
r_end--;
}
} else {
/* this maximum value will be clamped to the bounds of memblock.memory
before being returned to the caller */
r_end = ADDR_MAX;
}
if (r_start >= r_end) {
/* this free region has a length of zero, move to the next one */
continue;
}
if (r_start >= m_end) {
/* we've gone past the end of the current memory region, and need to go to the next one */
break;
}
/* we've already gone past this free memory region. move to the next one */
if (m_start >= r_end) {
continue;
}
/* we want the area that is overlapped by both
region M (m_start - m_end) : The region defined as system memory.
region R (r_start - r_end) : The region defined as free / outside of any reserved regions.
*/
2022-12-29 20:53:39 +00:00
it->it_base = MAX(m_start, r_start);
it->it_limit = MIN(m_end, r_end);
/* further limit the region to the intersection between the region itself and the
specified iteration bounds */
2022-12-29 20:53:39 +00:00
it->it_base = MAX(it->it_base, start);
it->it_limit = MIN(it->it_limit, end);
if (it->it_limit <= it->it_base) {
/* this region is not part of the specified bounds, skip it. */
continue;
}
2022-12-29 20:53:39 +00:00
it->it_status = MEMBLOCK_MEMORY;
/* whichever region is smaller, increment the pointer for that type, so we can
compare the larger region with the next region of the incremented type. */
if (m_end <= r_end) {
idx_a++;
} else {
idx_b++;
}
/* store the position for the next iteration */
2022-12-29 20:53:39 +00:00
it->__idx = ITER(idx_a, idx_b);
return;
}
}
/* ULLONG_MAX signals the end of the iteration */
2022-12-29 20:53:39 +00:00
it->__idx = ITER_END;
}
phys_addr_t memblock_virt_to_phys(void *p)
{
return (phys_addr_t)p - memblock.m_voffset;
}
void *memblock_phys_to_virt(phys_addr_t p)
{
return (void *)(p + memblock.m_voffset);
}