all kernel headers have been moved from include/mango to include/kernel and include definitions that are only relevant to kernel-space. any definitions that are relevant to both kernel- and user-space (i.e. type definitions, syscall IDs) have been moved to include/mango within libmango.
279 lines
5.8 KiB
C
279 lines
5.8 KiB
C
#include <kernel/libc/string.h>
|
|
#include <kernel/memblock.h>
|
|
#include <kernel/printk.h>
|
|
#include <kernel/types.h>
|
|
#include <kernel/vm.h>
|
|
|
|
/* Pre-calculated page order -> size conversion table */
|
|
static size_t page_order_bytes[] = {
|
|
[VM_PAGE_4K] = 0x1000,
|
|
[VM_PAGE_8K] = 0x2000,
|
|
[VM_PAGE_16K] = 0x4000,
|
|
[VM_PAGE_32K] = 0x8000,
|
|
[VM_PAGE_64K] = 0x10000,
|
|
[VM_PAGE_128K] = 0x20000,
|
|
[VM_PAGE_256K] = 0x40000,
|
|
[VM_PAGE_512K] = 0x80000,
|
|
[VM_PAGE_1M] = 0x100000,
|
|
[VM_PAGE_2M] = 0x200000,
|
|
[VM_PAGE_4M] = 0x400000,
|
|
[VM_PAGE_8M] = 0x800000,
|
|
[VM_PAGE_16M] = 0x1000000,
|
|
[VM_PAGE_32M] = 0x2000000,
|
|
[VM_PAGE_64M] = 0x4000000,
|
|
[VM_PAGE_128M] = 0x8000000,
|
|
|
|
/* vm can support pages of this size, but
|
|
struct vm_page only has 4 bits with which to store
|
|
the page order, which cannot accomodate these
|
|
larger order numbers */
|
|
[VM_PAGE_256M] = 0x10000000,
|
|
[VM_PAGE_512M] = 0x20000000,
|
|
[VM_PAGE_1G] = 0x40000000,
|
|
[VM_PAGE_2G] = 0x80000000,
|
|
[VM_PAGE_4G] = 0x100000000,
|
|
[VM_PAGE_8G] = 0x200000000,
|
|
[VM_PAGE_16G] = 0x400000000,
|
|
[VM_PAGE_32G] = 0x800000000,
|
|
[VM_PAGE_64G] = 0x1000000000,
|
|
};
|
|
|
|
phys_addr_t vm_virt_to_phys(const void *p)
|
|
{
|
|
uintptr_t pv = (uintptr_t)p;
|
|
|
|
if (pv >= memblock.m_alloc_start && pv < memblock.m_alloc_end) {
|
|
return memblock_virt_to_phys(p);
|
|
}
|
|
|
|
if (pv >= VM_PAGEMAP_BASE && pv <= VM_PAGEMAP_LIMIT) {
|
|
return pv - VM_PAGEMAP_BASE;
|
|
}
|
|
|
|
/* TODO use pmap to find the physical address */
|
|
return 0;
|
|
}
|
|
|
|
void *vm_phys_to_virt(phys_addr_t p)
|
|
{
|
|
if (p >= (memblock.m_alloc_start - memblock.m_voffset)
|
|
&& p < (memblock.m_alloc_end - memblock.m_voffset)) {
|
|
return memblock_phys_to_virt(p);
|
|
}
|
|
|
|
return (void *)(VM_PAGEMAP_BASE + p);
|
|
}
|
|
|
|
struct vm_page *vm_page_get(phys_addr_t addr)
|
|
{
|
|
switch (vm_memory_model()) {
|
|
case VM_MODEL_FLAT:
|
|
return vm_page_get_flat(addr);
|
|
case VM_MODEL_SPARSE:
|
|
return vm_page_get_sparse(addr);
|
|
default:
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
phys_addr_t vm_page_get_paddr(struct vm_page *pg)
|
|
{
|
|
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
|
|
}
|
|
|
|
void *vm_page_get_vaddr(struct vm_page *pg)
|
|
{
|
|
return (void *)(vm_phys_to_virt(vm_page_get_pfn(pg) * VM_PAGE_SIZE));
|
|
}
|
|
|
|
size_t vm_page_get_pfn(struct vm_page *pg)
|
|
{
|
|
switch (vm_memory_model()) {
|
|
case VM_MODEL_FLAT:
|
|
return vm_page_get_pfn_flat(pg);
|
|
case VM_MODEL_SPARSE:
|
|
return vm_page_get_pfn_sparse(pg);
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
size_t vm_page_order_to_bytes(enum vm_page_order order)
|
|
{
|
|
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
|
|
return 0;
|
|
}
|
|
|
|
return page_order_bytes[order];
|
|
}
|
|
|
|
phys_addr_t vm_page_order_to_pages(enum vm_page_order order)
|
|
{
|
|
if (order < VM_PAGE_4K || order > VM_PAGE_64G) {
|
|
return 0;
|
|
}
|
|
|
|
return page_order_bytes[order] >> VM_PAGE_SHIFT;
|
|
}
|
|
|
|
vm_alignment_t vm_page_order_to_alignment(enum vm_page_order order)
|
|
{
|
|
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
|
return 0;
|
|
}
|
|
|
|
return ~(page_order_bytes[order] - 1);
|
|
}
|
|
|
|
size_t vm_bytes_to_pages(size_t bytes)
|
|
{
|
|
if (bytes & (VM_PAGE_SIZE - 1)) {
|
|
bytes &= ~(VM_PAGE_SIZE - 1);
|
|
bytes += VM_PAGE_SIZE;
|
|
}
|
|
|
|
bytes >>= VM_PAGE_SHIFT;
|
|
return bytes;
|
|
}
|
|
|
|
struct vm_zone *vm_page_get_zone(struct vm_page *pg)
|
|
{
|
|
struct vm_pg_data *node = vm_pg_data_get(pg->p_node);
|
|
if (!node) {
|
|
return 0;
|
|
}
|
|
|
|
if (pg->p_zone >= VM_MAX_ZONES) {
|
|
return NULL;
|
|
}
|
|
|
|
return &node->pg_zones[pg->p_zone];
|
|
}
|
|
|
|
struct vm_page *vm_page_alloc(enum vm_page_order order, enum vm_flags flags)
|
|
{
|
|
/* TODO prefer nodes closer to us */
|
|
struct vm_pg_data *node = vm_pg_data_get(0);
|
|
enum vm_zone_id zone_id = VM_ZONE_MAX;
|
|
if (flags & VM_GET_DMA) {
|
|
zone_id = VM_ZONE_DMA;
|
|
}
|
|
|
|
while (1) {
|
|
struct vm_zone *z = &node->pg_zones[zone_id];
|
|
|
|
struct vm_page *pg = vm_zone_alloc_page(z, order, flags);
|
|
if (pg) {
|
|
return pg;
|
|
}
|
|
|
|
if (zone_id <= VM_ZONE_MIN) {
|
|
break;
|
|
}
|
|
|
|
zone_id--;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void vm_page_free(struct vm_page *pg)
|
|
{
|
|
struct vm_zone *z = vm_page_get_zone(pg);
|
|
if (!z) {
|
|
return;
|
|
}
|
|
|
|
vm_zone_free_page(z, pg);
|
|
}
|
|
|
|
int vm_page_split(struct vm_page *pg, struct vm_page **a, struct vm_page **b)
|
|
{
|
|
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
|
return -1;
|
|
}
|
|
|
|
/* NOTE that we cannot use vm_page_foreach here,
|
|
as we are modifying the flags that vm_page_foreach
|
|
uses to determine where a given page block ends */
|
|
size_t nr_frames = vm_page_order_to_pages(pg->p_order);
|
|
for (size_t i = 0; i < nr_frames; i++) {
|
|
pg[i].p_order--;
|
|
}
|
|
|
|
struct vm_page *buddy = vm_page_get_buddy(pg);
|
|
|
|
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
|
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
|
buddy->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
|
} else {
|
|
pg->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
|
buddy->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
|
}
|
|
|
|
*a = pg;
|
|
*b = buddy;
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct vm_page *vm_page_merge(struct vm_page *a, struct vm_page *b)
|
|
{
|
|
if (a->p_order != b->p_order) {
|
|
return NULL;
|
|
}
|
|
|
|
if (a->p_order == VM_PAGE_MAX_ORDER) {
|
|
return NULL;
|
|
}
|
|
|
|
if (vm_page_get_buddy(a) != b) {
|
|
return NULL;
|
|
}
|
|
|
|
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))
|
|
!= (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
|
|
return NULL;
|
|
}
|
|
|
|
/* make sure that a comes before b */
|
|
if (a > b) {
|
|
struct vm_page *tmp = a;
|
|
a = b;
|
|
b = tmp;
|
|
}
|
|
|
|
a->p_order++;
|
|
|
|
/* NOTE that we cannot use vm_page_foreach here,
|
|
as we are modifying the flags that vm_page_foreach
|
|
uses to determine where a given page block ends */
|
|
size_t nr_frames = vm_page_order_to_pages(a->p_order);
|
|
for (size_t i = 0; i < nr_frames; i++) {
|
|
a[i].p_flags &= ~VM_PAGE_HEAD;
|
|
a[i].p_flags |= VM_PAGE_HUGE;
|
|
a[i].p_order = a->p_order;
|
|
}
|
|
|
|
a->p_flags |= VM_PAGE_HEAD;
|
|
|
|
return a;
|
|
}
|
|
|
|
struct vm_page *vm_page_get_buddy(struct vm_page *pg)
|
|
{
|
|
phys_addr_t paddr = vm_page_get_paddr(pg);
|
|
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
|
|
return vm_page_get(paddr);
|
|
}
|
|
|
|
struct vm_page *vm_page_get_next_tail(struct vm_page *pg)
|
|
{
|
|
struct vm_page *next = pg + 1;
|
|
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
|
|
return NULL;
|
|
}
|
|
|
|
return next;
|
|
}
|