memblock: mark bytes preceding an aligned allocation as reserved.

this prevents tiny holes of free memory appearing between
blocks of aligned allocations.
This commit is contained in:
2023-02-07 15:57:18 +00:00
parent e9d4b60181
commit 51ad3d48fd

View File

@@ -235,21 +235,27 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
align = 0x8; align = 0x8;
} }
/* the base address of the memory region to reserved */
phys_addr_t allocated_base = ADDR_MAX; phys_addr_t allocated_base = ADDR_MAX;
/* the address to return to the caller. may be different from
allocated_base depending on alignment requirements. */
phys_addr_t returned_base = ADDR_MAX;
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset; phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset; phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
memblock_iter_t it; memblock_iter_t it;
for_each_free_mem_range (&it, region_start, region_end) { for_each_free_mem_range (&it, region_start, region_end) {
if (it.it_base & (align - 1)) { phys_addr_t base = it.it_base;
it.it_base &= ~(align - 1); if (base & (align - 1)) {
it.it_base += align; base &= ~(align - 1);
base += align;
} }
size_t region_size = it.it_limit - it.it_base + 1; size_t region_size = it.it_limit - base + 1;
if (region_size >= size) { if (region_size >= size) {
allocated_base = it.it_base; allocated_base = it.it_base;
returned_base = base;
break; break;
} }
} }
@@ -263,7 +269,7 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
return 0; return 0;
} }
return allocated_base; return returned_base;
} }
void *memblock_alloc(size_t size, phys_addr_t align) void *memblock_alloc(size_t size, phys_addr_t align)