memblock: mark bytes preceding an aligned allocation as reserved.
this prevents tiny holes of free memory appearing between blocks of aligned allocations.
This commit is contained in:
@@ -235,21 +235,27 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
|
||||
align = 0x8;
|
||||
}
|
||||
|
||||
/* the base address of the memory region to reserved */
|
||||
phys_addr_t allocated_base = ADDR_MAX;
|
||||
/* the address to return to the caller. may be different from
|
||||
allocated_base depending on alignment requirements. */
|
||||
phys_addr_t returned_base = ADDR_MAX;
|
||||
|
||||
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
|
||||
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_free_mem_range (&it, region_start, region_end) {
|
||||
if (it.it_base & (align - 1)) {
|
||||
it.it_base &= ~(align - 1);
|
||||
it.it_base += align;
|
||||
phys_addr_t base = it.it_base;
|
||||
if (base & (align - 1)) {
|
||||
base &= ~(align - 1);
|
||||
base += align;
|
||||
}
|
||||
|
||||
size_t region_size = it.it_limit - it.it_base + 1;
|
||||
size_t region_size = it.it_limit - base + 1;
|
||||
if (region_size >= size) {
|
||||
allocated_base = it.it_base;
|
||||
returned_base = base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -263,7 +269,7 @@ static phys_addr_t do_alloc(size_t size, phys_addr_t align)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return allocated_base;
|
||||
return returned_base;
|
||||
}
|
||||
|
||||
void *memblock_alloc(size_t size, phys_addr_t align)
|
||||
|
||||
Reference in New Issue
Block a user