2023-01-31 13:11:35 +00:00
|
|
|
/*
|
|
|
|
|
The Clear BSD License
|
|
|
|
|
|
|
|
|
|
Copyright (c) 2023 Max Wash
|
|
|
|
|
All rights reserved.
|
|
|
|
|
|
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
|
modification, are permitted (subject to the limitations in the disclaimer
|
|
|
|
|
below) provided that the following conditions are met:
|
|
|
|
|
|
|
|
|
|
- Redistributions of source code must retain the above copyright notice,
|
|
|
|
|
this list of conditions and the following disclaimer.
|
|
|
|
|
|
|
|
|
|
- Redistributions in binary form must reproduce the above copyright
|
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
|
|
|
|
documentation and/or other materials provided with the distribution.
|
2023-02-02 21:01:22 +00:00
|
|
|
|
2023-01-31 13:11:35 +00:00
|
|
|
- Neither the name of the copyright holder nor the names of its
|
|
|
|
|
contributors may be used to endorse or promote products derived from this
|
|
|
|
|
software without specific prior written permission.
|
|
|
|
|
*/
|
2022-12-28 23:03:30 +00:00
|
|
|
#include <limits.h>
|
2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/libc/string.h>
|
|
|
|
|
#include <kernel/memblock.h>
|
|
|
|
|
#include <kernel/types.h>
|
2026-02-08 12:17:27 +00:00
|
|
|
#include <stdbool.h>
|
2022-12-28 18:41:04 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
|
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
2022-12-28 23:03:30 +00:00
|
|
|
|
|
|
|
|
#define ITER(a, b) ((uint64_t)(a) | ((uint64_t)(b) << 32))
|
2026-02-08 12:17:27 +00:00
|
|
|
#define ITER_END ULLONG_MAX
|
2022-12-28 23:03:30 +00:00
|
|
|
#define IDX_A(idx) ((idx) & 0xFFFFFFFF)
|
|
|
|
|
#define IDX_B(idx) (((idx) >> 32) & 0xFFFFFFFF)
|
|
|
|
|
|
2022-12-29 10:19:57 +00:00
|
|
|
/* the maximum possible value for a pointer type.
|
2023-01-31 13:11:35 +00:00
|
|
|
Note that any pointers returned by the memblock API will still
|
|
|
|
|
be bounded by the defined memory regions, and not by this constant. */
|
2026-02-08 12:17:27 +00:00
|
|
|
#define ADDR_MAX (~(uintptr_t)0)
|
2022-12-28 23:03:30 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
static struct memblock_region
|
|
|
|
|
init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
|
|
|
|
|
static struct memblock_region
|
|
|
|
|
init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
|
2022-12-28 18:41:04 +00:00
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
static phys_addr_t do_alloc(size_t size, phys_addr_t align);
|
2023-01-08 12:13:59 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock memblock = {
|
2022-12-28 18:41:04 +00:00
|
|
|
.memory.regions = init_memory_regions,
|
|
|
|
|
.memory.count = 0,
|
|
|
|
|
.memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT,
|
|
|
|
|
.memory.name = "memory",
|
|
|
|
|
|
|
|
|
|
.reserved.regions = init_reserved_regions,
|
|
|
|
|
.reserved.count = 0,
|
|
|
|
|
.reserved.max = MEMBLOCK_INIT_RESERVED_REGION_COUNT,
|
|
|
|
|
.reserved.name = "reserved",
|
|
|
|
|
};
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static void memblock_double_capacity(struct memblock_type *type)
|
2023-01-08 12:13:59 +00:00
|
|
|
{
|
|
|
|
|
size_t new_max = type->max * 2;
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
phys_addr_t new_regions_p
|
|
|
|
|
= do_alloc(new_max * sizeof(struct memblock_region), 8);
|
2023-01-08 12:13:59 +00:00
|
|
|
|
|
|
|
|
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
|
2026-02-08 12:17:27 +00:00
|
|
|
memcpy(new_regions,
|
|
|
|
|
type->regions,
|
|
|
|
|
type->count * sizeof(struct memblock_region));
|
2023-01-08 12:13:59 +00:00
|
|
|
|
|
|
|
|
type->regions = new_regions;
|
|
|
|
|
type->max = new_max;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
static int memblock_insert_region(
|
|
|
|
|
struct memblock_type *type,
|
|
|
|
|
struct memblock_region *to_add)
|
2022-12-28 18:41:04 +00:00
|
|
|
{
|
|
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < type->count; i++) {
|
2023-04-12 20:17:11 +01:00
|
|
|
const struct memblock_region *cur = &type->regions[i];
|
2022-12-28 18:41:04 +00:00
|
|
|
|
|
|
|
|
if (cur->base >= to_add->limit) {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *src = &type->regions[i];
|
|
|
|
|
struct memblock_region *dst = &type->regions[i + 1];
|
2022-12-28 18:41:04 +00:00
|
|
|
unsigned int count = type->count - i;
|
|
|
|
|
|
|
|
|
|
memmove(dst, src, count * sizeof *src);
|
|
|
|
|
|
|
|
|
|
*src = *to_add;
|
|
|
|
|
type->count++;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static int memblock_remove_region(struct memblock_type *type, unsigned int i)
|
2022-12-28 18:41:04 +00:00
|
|
|
{
|
|
|
|
|
if (i >= type->count) {
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *src = &type->regions[i + 1];
|
|
|
|
|
struct memblock_region *dst = &type->regions[i];
|
2022-12-28 18:41:04 +00:00
|
|
|
unsigned int count = type->count - i;
|
|
|
|
|
|
|
|
|
|
memmove(dst, src, count * sizeof *src);
|
|
|
|
|
type->count--;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-08 12:13:59 +00:00
|
|
|
int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
|
|
|
|
|
{
|
|
|
|
|
memblock.m_alloc_start = alloc_start;
|
2026-02-08 12:17:27 +00:00
|
|
|
memblock.m_alloc_end = alloc_end;
|
2023-01-08 12:13:59 +00:00
|
|
|
memblock.m_voffset = voffset;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
int memblock_add_range(
|
|
|
|
|
struct memblock_type *type,
|
|
|
|
|
uintptr_t base,
|
|
|
|
|
size_t size,
|
|
|
|
|
enum memblock_region_status status)
|
2022-12-28 18:41:04 +00:00
|
|
|
{
|
|
|
|
|
if (size == 0) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uintptr_t limit = base + size - 1;
|
|
|
|
|
|
|
|
|
|
if (type->count == 0) {
|
|
|
|
|
type->regions[0].base = base;
|
|
|
|
|
type->regions[0].limit = limit;
|
|
|
|
|
type->count++;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
struct memblock_region new_region
|
|
|
|
|
= {.base = base, .limit = limit, .status = status};
|
2022-12-29 20:53:39 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* two regions with different statuses CANNOT intersect. we first need
|
|
|
|
|
to check to make sure the region being added doesn't violate this
|
|
|
|
|
rule. */
|
2022-12-29 20:53:39 +00:00
|
|
|
for (unsigned int i = 0; i < type->count; i++) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *cur_region = &type->regions[i];
|
2022-12-29 20:53:39 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
if (new_region.base > cur_region->limit
|
|
|
|
|
|| new_region.limit < cur_region->base) {
|
2022-12-29 20:53:39 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cur_region->status == new_region.status) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-28 18:41:04 +00:00
|
|
|
bool add_new = true;
|
|
|
|
|
|
|
|
|
|
for (unsigned int i = 0; i < type->count; i++) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *cur_region = &type->regions[i];
|
2022-12-28 18:41:04 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* case 1: the region being added and the current region have no
|
|
|
|
|
* connection what-so-ever (no overlaps) */
|
|
|
|
|
if (cur_region->limit + 1 < new_region.base
|
|
|
|
|
|| cur_region->base > new_region.limit) {
|
2022-12-28 18:41:04 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* case 2: the region being added matches a region already in
|
|
|
|
|
* the list. */
|
|
|
|
|
if (cur_region->base == new_region.base
|
|
|
|
|
&& cur_region->limit == new_region.limit) {
|
2022-12-28 18:41:04 +00:00
|
|
|
/* nothing needs to be done */
|
|
|
|
|
add_new = false;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* case 3: the region being added completely contains a region
|
|
|
|
|
* already in the list. */
|
|
|
|
|
if (cur_region->base > new_region.base
|
|
|
|
|
&& cur_region->limit <= new_region.limit) {
|
2022-12-28 18:41:04 +00:00
|
|
|
memblock_remove_region(type, i);
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* after memblock_remove_region(), a different region
|
|
|
|
|
will have moved into the array slot referenced by i.
|
|
|
|
|
decrementing i means we'll stay at the current index
|
|
|
|
|
and process this region. */
|
2022-12-28 18:41:04 +00:00
|
|
|
i--;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* case 4: the region being added meets or partially overlaps a
|
|
|
|
|
* region already in the list. */
|
|
|
|
|
|
|
|
|
|
/* there can be an overlap at the beginning and the end of the
|
|
|
|
|
region being added, anything else is either a full overlap
|
|
|
|
|
(case 3) or not within the region being added at all. to
|
|
|
|
|
handle this, remove the region that's already in the list and
|
|
|
|
|
extend the region being added to cover it. the two regions
|
|
|
|
|
may overlap and have incompatible statuses, but this case was
|
|
|
|
|
handled earlier in this function. */
|
|
|
|
|
if ((new_region.base > cur_region->base
|
|
|
|
|
|| new_region.base == cur_region->limit - 1)
|
|
|
|
|
&& new_region.status == cur_region->status) {
|
|
|
|
|
/* the new region overlaps the END of the current
|
|
|
|
|
* region, change the base of the new region to match
|
|
|
|
|
* that of the current region. */
|
2022-12-28 18:41:04 +00:00
|
|
|
new_region.base = cur_region->base;
|
2026-02-08 12:17:27 +00:00
|
|
|
} else if (
|
|
|
|
|
(new_region.base < cur_region->base
|
|
|
|
|
|| new_region.limit + 1 == cur_region->base)
|
|
|
|
|
&& new_region.status == cur_region->status) {
|
|
|
|
|
/* the new region overlaps the BEGINNING of the current
|
|
|
|
|
* region, change the limit of the new region to match
|
|
|
|
|
* that of the current region. */
|
2022-12-28 18:41:04 +00:00
|
|
|
new_region.limit = cur_region->limit;
|
|
|
|
|
} else {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* with the new region updated to include the current region, we
|
|
|
|
|
* can remove the current region from the list */
|
2022-12-28 18:41:04 +00:00
|
|
|
memblock_remove_region(type, i);
|
|
|
|
|
i--;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (add_new) {
|
|
|
|
|
memblock_insert_region(type, &new_region);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int memblock_add(uintptr_t base, size_t size)
|
|
|
|
|
{
|
2023-01-08 12:13:59 +00:00
|
|
|
if (memblock.memory.count >= memblock.memory.max - 2) {
|
|
|
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
|
|
|
memblock_double_capacity(&memblock.reserved);
|
|
|
|
|
}
|
2023-02-02 21:01:22 +00:00
|
|
|
|
2023-01-08 12:13:59 +00:00
|
|
|
memblock_double_capacity(&memblock.memory);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
return memblock_add_range(
|
|
|
|
|
&memblock.memory,
|
|
|
|
|
base,
|
|
|
|
|
size,
|
|
|
|
|
MEMBLOCK_MEMORY);
|
2022-12-28 18:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int memblock_reserve(uintptr_t base, size_t size)
|
|
|
|
|
{
|
2023-01-08 12:13:59 +00:00
|
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
|
|
|
memblock_double_capacity(&memblock.reserved);
|
|
|
|
|
}
|
2023-02-02 21:01:22 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
return memblock_add_range(
|
|
|
|
|
&memblock.reserved,
|
|
|
|
|
base,
|
|
|
|
|
size,
|
|
|
|
|
MEMBLOCK_RESERVED);
|
2022-12-29 20:53:39 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
static phys_addr_t do_alloc(size_t size, phys_addr_t align)
|
2022-12-29 20:53:39 +00:00
|
|
|
{
|
2023-02-06 20:38:32 +00:00
|
|
|
if (!align) {
|
|
|
|
|
/* align to 8-byte boundary by default */
|
|
|
|
|
align = 0x8;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-24 14:17:28 +00:00
|
|
|
/* the bounds of the memory region to reserve */
|
|
|
|
|
phys_addr_t allocated_base = ADDR_MAX, allocated_limit = 0;
|
2023-02-07 15:57:18 +00:00
|
|
|
/* the address to return to the caller. may be different from
|
|
|
|
|
allocated_base depending on alignment requirements. */
|
|
|
|
|
phys_addr_t returned_base = ADDR_MAX;
|
2022-12-29 20:53:39 +00:00
|
|
|
|
2023-01-08 12:13:59 +00:00
|
|
|
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
|
|
|
|
|
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_iter it;
|
2026-02-08 12:17:27 +00:00
|
|
|
for_each_free_mem_range(&it, region_start, region_end)
|
|
|
|
|
{
|
2023-02-07 15:57:18 +00:00
|
|
|
phys_addr_t base = it.it_base;
|
|
|
|
|
if (base & (align - 1)) {
|
|
|
|
|
base &= ~(align - 1);
|
|
|
|
|
base += align;
|
2023-02-02 21:01:22 +00:00
|
|
|
}
|
|
|
|
|
|
2023-12-24 09:37:52 +00:00
|
|
|
size_t region_size = 0;
|
|
|
|
|
|
|
|
|
|
if (it.it_limit > base) {
|
|
|
|
|
region_size = it.it_limit - base + 1;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-29 20:53:39 +00:00
|
|
|
if (region_size >= size) {
|
|
|
|
|
allocated_base = it.it_base;
|
2023-03-24 14:17:28 +00:00
|
|
|
allocated_limit = base + size;
|
2023-02-07 15:57:18 +00:00
|
|
|
returned_base = base;
|
2022-12-29 20:53:39 +00:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (allocated_base == ADDR_MAX) {
|
2023-02-03 20:51:23 +00:00
|
|
|
return 0;
|
2022-12-29 20:53:39 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
int status = memblock_add_range(
|
|
|
|
|
&memblock.reserved,
|
|
|
|
|
allocated_base,
|
|
|
|
|
allocated_limit - allocated_base,
|
|
|
|
|
MEMBLOCK_ALLOC);
|
2022-12-29 20:53:39 +00:00
|
|
|
if (status != 0) {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-07 15:57:18 +00:00
|
|
|
return returned_base;
|
2022-12-28 18:41:04 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
void *memblock_alloc(size_t size, phys_addr_t align)
|
2023-01-08 12:21:13 +00:00
|
|
|
{
|
|
|
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
|
|
|
memblock_double_capacity(&memblock.reserved);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
phys_addr_t p = do_alloc(size, align);
|
2023-02-05 10:49:59 +00:00
|
|
|
if (p) {
|
|
|
|
|
p += memblock.m_voffset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return (void *)p;
|
2023-01-08 12:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
phys_addr_t memblock_alloc_phys(size_t size, phys_addr_t align)
|
2023-01-08 12:13:59 +00:00
|
|
|
{
|
|
|
|
|
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
|
|
|
|
memblock_double_capacity(&memblock.reserved);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-06 20:38:32 +00:00
|
|
|
return do_alloc(size, align);
|
2023-01-08 12:13:59 +00:00
|
|
|
}
|
|
|
|
|
|
2023-01-08 12:21:13 +00:00
|
|
|
int memblock_free(void *p, size_t size)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int memblock_free_phys(phys_addr_t addr, size_t size)
|
2022-12-28 18:41:04 +00:00
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2022-12-28 23:03:30 +00:00
|
|
|
|
2023-12-30 15:29:13 +00:00
|
|
|
void __next_memory_region(
|
2026-02-08 12:17:27 +00:00
|
|
|
struct memblock_iter *it,
|
|
|
|
|
struct memblock_type *type_a,
|
|
|
|
|
struct memblock_type *type_b,
|
|
|
|
|
uintptr_t start,
|
|
|
|
|
uintptr_t end)
|
2022-12-28 23:03:30 +00:00
|
|
|
{
|
2022-12-29 20:53:39 +00:00
|
|
|
unsigned int idx_a = IDX_A(it->__idx);
|
|
|
|
|
unsigned int idx_b = IDX_B(it->__idx);
|
2022-12-28 23:03:30 +00:00
|
|
|
|
|
|
|
|
for (; idx_a < type_a->count; idx_a++) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *m = &type_a->regions[idx_a];
|
2023-01-09 18:25:35 +00:00
|
|
|
|
|
|
|
|
uintptr_t m_start = m->base;
|
|
|
|
|
uintptr_t m_end = m->limit;
|
|
|
|
|
|
2022-12-28 23:03:30 +00:00
|
|
|
if (!type_b) {
|
2023-12-30 15:29:13 +00:00
|
|
|
it->it_base = MAX(m->base, start);
|
|
|
|
|
it->it_limit = MIN(m->limit, end);
|
2022-12-29 20:53:39 +00:00
|
|
|
it->it_status = m->status;
|
2022-12-28 23:03:30 +00:00
|
|
|
|
2023-12-30 15:29:13 +00:00
|
|
|
if (it->it_base >= it->it_limit) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-29 20:53:39 +00:00
|
|
|
it->__idx = ITER(idx_a + 1, idx_b);
|
2022-12-28 23:03:30 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-29 10:19:57 +00:00
|
|
|
if (m_end < start) {
|
|
|
|
|
/* we haven't reached the requested memory range yet */
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (m_start > end) {
|
2026-02-08 12:17:27 +00:00
|
|
|
/* we have gone past the requested memory range and can
|
|
|
|
|
* now stop */
|
2022-12-29 10:19:57 +00:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-28 23:03:30 +00:00
|
|
|
for (; idx_b < type_b->count + 1; idx_b++) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct memblock_region *r = &type_b->regions[idx_b];
|
2022-12-28 23:03:30 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* r_start and r_end delimit the region of memory
|
|
|
|
|
between the current and previous reserved regions. if
|
|
|
|
|
we have gone past the last reserved region, these
|
|
|
|
|
variables delimit the range between the end of the
|
|
|
|
|
last reserved region and the end of memory. */
|
2022-12-28 23:03:30 +00:00
|
|
|
uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0;
|
2023-01-09 18:25:35 +00:00
|
|
|
uintptr_t r_end;
|
2022-12-29 20:53:24 +00:00
|
|
|
|
2023-01-09 18:25:35 +00:00
|
|
|
if (idx_b < type_b->count) {
|
|
|
|
|
r_end = r->base;
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* we decrement r_end to get the address of the
|
|
|
|
|
last byte of the free region. if r_end is
|
|
|
|
|
already zero, there is a reserved region
|
|
|
|
|
starting at address 0x0. as long as r_end ==
|
|
|
|
|
r_start == 0x00000, we will skip this region.
|
|
|
|
|
*/
|
2023-01-09 18:25:35 +00:00
|
|
|
if (r_end) {
|
|
|
|
|
r_end--;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2026-02-08 12:17:27 +00:00
|
|
|
/* this maximum value will be clamped to the
|
|
|
|
|
bounds of memblock.memory before being
|
|
|
|
|
returned to the caller */
|
2023-01-09 18:25:35 +00:00
|
|
|
r_end = ADDR_MAX;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (r_start >= r_end) {
|
2026-02-08 12:17:27 +00:00
|
|
|
/* this free region has a length of zero, move
|
|
|
|
|
* to the next one */
|
2022-12-29 20:53:24 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
2022-12-28 23:03:30 +00:00
|
|
|
|
|
|
|
|
if (r_start >= m_end) {
|
2026-02-08 12:17:27 +00:00
|
|
|
/* we've gone past the end of the current memory
|
|
|
|
|
* region, and need to go to the next one */
|
2022-12-28 23:03:30 +00:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* we've already gone past this free memory region. move
|
|
|
|
|
* to the next one */
|
2022-12-29 10:19:57 +00:00
|
|
|
if (m_start >= r_end) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2022-12-28 23:03:30 +00:00
|
|
|
|
2022-12-29 10:19:57 +00:00
|
|
|
/* we want the area that is overlapped by both
|
2026-02-08 12:17:27 +00:00
|
|
|
region M (m_start - m_end) : The region defined
|
|
|
|
|
as system memory. region R (r_start - r_end) : The
|
|
|
|
|
region defined as free / outside of any reserved
|
|
|
|
|
regions.
|
2022-12-29 10:19:57 +00:00
|
|
|
*/
|
2022-12-29 20:53:39 +00:00
|
|
|
it->it_base = MAX(m_start, r_start);
|
|
|
|
|
it->it_limit = MIN(m_end, r_end);
|
2022-12-29 10:19:57 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* further limit the region to the intersection between
|
|
|
|
|
the region itself and the specified iteration bounds
|
|
|
|
|
*/
|
2022-12-29 20:53:39 +00:00
|
|
|
it->it_base = MAX(it->it_base, start);
|
|
|
|
|
it->it_limit = MIN(it->it_limit, end);
|
|
|
|
|
|
2023-01-09 18:25:35 +00:00
|
|
|
if (it->it_limit <= it->it_base) {
|
2026-02-08 12:17:27 +00:00
|
|
|
/* this region is not part of the specified
|
|
|
|
|
* bounds, skip it. */
|
2023-01-09 18:25:35 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-29 20:53:39 +00:00
|
|
|
it->it_status = MEMBLOCK_MEMORY;
|
2022-12-29 10:19:57 +00:00
|
|
|
|
2026-02-08 12:17:27 +00:00
|
|
|
/* whichever region is smaller, increment the pointer
|
|
|
|
|
for that type, so we can compare the larger region
|
|
|
|
|
with the next region of the incremented type. */
|
2022-12-29 10:19:57 +00:00
|
|
|
if (m_end <= r_end) {
|
|
|
|
|
idx_a++;
|
|
|
|
|
} else {
|
|
|
|
|
idx_b++;
|
2022-12-28 23:03:30 +00:00
|
|
|
}
|
2022-12-29 10:19:57 +00:00
|
|
|
|
|
|
|
|
/* store the position for the next iteration */
|
2022-12-29 20:53:39 +00:00
|
|
|
it->__idx = ITER(idx_a, idx_b);
|
2022-12-29 10:19:57 +00:00
|
|
|
return;
|
2022-12-28 23:03:30 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-12-29 10:19:57 +00:00
|
|
|
/* ULLONG_MAX signals the end of the iteration */
|
2022-12-29 20:53:39 +00:00
|
|
|
it->__idx = ITER_END;
|
2022-12-28 23:03:30 +00:00
|
|
|
}
|
2023-02-06 20:39:33 +00:00
|
|
|
|
2026-02-08 12:33:03 +00:00
|
|
|
phys_addr_t memblock_virt_to_phys(const void *p)
|
2023-02-06 20:39:33 +00:00
|
|
|
{
|
|
|
|
|
return (phys_addr_t)p - memblock.m_voffset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *memblock_phys_to_virt(phys_addr_t p)
|
|
|
|
|
{
|
|
|
|
|
return (void *)(p + memblock.m_voffset);
|
|
|
|
|
}
|