sandbox: moved all sources to main kernel tree
This commit is contained in:
373
include/socks/btree.h
Normal file
373
include/socks/btree.h
Normal file
@@ -0,0 +1,373 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
|
||||
#ifndef SOCKS_BTREE_H_
|
||||
#define SOCKS_BTREE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/* if your custom structure contains a btree_node_t (i.e. it can be part of a btree),
|
||||
you can use this macro to convert a btree_node_t* to a your_type*
|
||||
|
||||
@param t the name of your custom type (something that can be passed to offsetof)
|
||||
@param m the name of the btree_node_t member variable within your custom type.
|
||||
@param v the btree_node_t pointer that you wish to convert. if this is NULL, NULL will be returned.
|
||||
*/
|
||||
#define BTREE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
|
||||
/* defines a simple node insertion function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key, my_tree_node_insert);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_INSERT(node_type, container_node_member, container_key_member, function_name) \
|
||||
static void function_name(btree_t *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
btree_node_t *next = NULL; \
|
||||
\
|
||||
if (node->container_key_member > cur_node->container_key_member) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (node->container_key_member < cur_node->container_key_member) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a node insertion function.
|
||||
this function should be used for trees with complex node keys that cannot be directly compared.
|
||||
a comparator for your keys must be supplied.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
complex_key_t key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would need to define a comparator function or macro with the following signature:
|
||||
|
||||
int my_comparator(struct my_tree_node *a, struct my_tree_node *b);
|
||||
|
||||
Which implements the following:
|
||||
|
||||
return -1 if a < b
|
||||
return 0 if a == b
|
||||
return 1 if a > b
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert, my_comparator);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
@param comparator the name of a comparator function or functional-macro that conforms to the
|
||||
requirements listed above.
|
||||
*/
|
||||
#define BTREE_DEFINE_INSERT(node_type, container_node_member, container_key_member, function_name, comparator) \
|
||||
static void function_name(btree_t *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
btree_node_t *next = NULL; \
|
||||
int cmp = comparator(node, cur_node); \
|
||||
\
|
||||
if (cmp == 1) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (cmp == -1) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a simple tree search function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would use the following call to generate a search function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key, my_tree_node_get);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_get(btree_t *tree, int key);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param key_type the type name of the key embedded in your custom tree node type. this type must be
|
||||
compatible with the builtin comparison operators.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_GET(node_type, key_type, container_node_member, container_key_member, function_name) \
|
||||
node_type *get(btree_t *tree, key_type key) \
|
||||
{ \
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (cur) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
if (key > cur_node->container_key_member) { \
|
||||
cur = btree_right(cur); \
|
||||
} else if (key < cur_node->container_key_member) { \
|
||||
cur = btree_left(cur); \
|
||||
} else { \
|
||||
return cur_node; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
/* perform an in-order traversal of a binary tree
|
||||
|
||||
If you have a tree defined like:
|
||||
|
||||
btree_t my_tree;
|
||||
|
||||
with nodes defined like:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
|
||||
foreach (struct my_tree_node *node : my_tree) { ... }
|
||||
|
||||
you should use this:
|
||||
|
||||
btree_foreach (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the btree_node_t member variable within the tree node type.
|
||||
*/
|
||||
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_first(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_next(&((iter_name)->node_member))))
|
||||
|
||||
/* perform an reverse in-order traversal of a binary tree
|
||||
|
||||
If you have a tree defined like:
|
||||
|
||||
btree_t my_tree;
|
||||
|
||||
with nodes defined like:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
|
||||
foreach (struct my_tree_node *node : reverse(my_tree)) { ... }
|
||||
|
||||
you should use this:
|
||||
|
||||
btree_foreach_r (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the btree_node_t member variable within the tree node type.
|
||||
*/
|
||||
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_prev(&((iter_name)->node_member))))
|
||||
|
||||
/* binary tree nodes. this *cannot* be used directly. you need to define a custom node type
|
||||
that contains a member variable of type btree_node_t.
|
||||
|
||||
you would then use the supplied macros to define functions to manipulate your custom binary tree.
|
||||
*/
|
||||
typedef struct btree_node {
|
||||
struct btree_node *b_parent, *b_left, *b_right;
|
||||
unsigned short b_height;
|
||||
} btree_node_t;
|
||||
|
||||
/* binary tree. unlike btree_node_t, you can define variables of type btree_t. */
|
||||
typedef struct btree {
|
||||
struct btree_node *b_root;
|
||||
} btree_t;
|
||||
|
||||
/* re-balance a binary tree after an insertion operation.
|
||||
|
||||
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or similar,
|
||||
this function will automatically called for you.
|
||||
|
||||
@param tree the tree to re-balance.
|
||||
@param node the node that was just inserted into the tree.
|
||||
*/
|
||||
extern void btree_insert_fixup(btree_t *tree, btree_node_t *node);
|
||||
|
||||
/* delete a node from a binary tree and re-balance the tree afterwards.
|
||||
|
||||
@param tree the tree to delete from
|
||||
@param node the node to delete.
|
||||
*/
|
||||
extern void btree_delete(btree_t *tree, btree_node_t *node);
|
||||
|
||||
/* get the first node in a binary tree.
|
||||
|
||||
this will be the node with the smallest key (i.e. the node that is furthest-left from the root)
|
||||
*/
|
||||
extern btree_node_t *btree_first(btree_t *tree);
|
||||
|
||||
/* get the last node in a binary tree.
|
||||
|
||||
this will be the node with the largest key (i.e. the node that is furthest-right from the root)
|
||||
*/
|
||||
extern btree_node_t *btree_last(btree_t *tree);
|
||||
/* for any binary tree node, this function returns the node with the next-largest key value */
|
||||
extern btree_node_t *btree_next(btree_node_t *node);
|
||||
/* for any binary tree node, this function returns the node with the next-smallest key value */
|
||||
extern btree_node_t *btree_prev(btree_node_t *node);
|
||||
|
||||
/* sets `child` as the immediate left-child of `parent` */
|
||||
static inline void btree_put_left(btree_node_t *parent, btree_node_t *child)
|
||||
{
|
||||
parent->b_left = child;
|
||||
child->b_parent = parent;
|
||||
}
|
||||
|
||||
/* sets `child` as the immediate right-child of `parent` */
|
||||
static inline void btree_put_right(btree_node_t *parent, btree_node_t *child)
|
||||
{
|
||||
parent->b_right = child;
|
||||
child->b_parent = parent;
|
||||
}
|
||||
|
||||
/* get the immediate left-child of `node` */
|
||||
static inline btree_node_t *btree_left(btree_node_t *node)
|
||||
{
|
||||
return node->b_left;
|
||||
}
|
||||
|
||||
/* get the immediate right-child of `node` */
|
||||
static inline btree_node_t *btree_right(btree_node_t *node)
|
||||
{
|
||||
return node->b_right;
|
||||
}
|
||||
|
||||
/* get the immediate parent of `node` */
|
||||
static inline btree_node_t *btree_parent(btree_node_t *node)
|
||||
{
|
||||
return node->b_parent;
|
||||
}
|
||||
|
||||
/* get the height of `node`.
|
||||
|
||||
the height of a node is defined as the length of the longest path
|
||||
between the node and a leaf node.
|
||||
|
||||
this count includes the node itself, so the height of a leaf node will be 1.
|
||||
*/
|
||||
static inline unsigned short btree_height(btree_node_t *node)
|
||||
{
|
||||
return node->b_height;
|
||||
}
|
||||
|
||||
#endif
|
||||
11
include/socks/locks.h
Normal file
11
include/socks/locks.h
Normal file
@@ -0,0 +1,11 @@
|
||||
#ifndef SOCKS_LOCKS_H_
|
||||
#define SOCKS_LOCKS_H_
|
||||
|
||||
typedef int __attribute__((aligned(8))) spin_lock_t;
|
||||
|
||||
#define SPIN_LOCK_INIT ((spin_lock_t)0)
|
||||
|
||||
extern void spin_lock_irqsave(spin_lock_t *lck, unsigned long *flags);
|
||||
extern void spin_unlock_irqrestore(spin_lock_t *lck, unsigned long flags);
|
||||
|
||||
#endif
|
||||
302
include/socks/memblock.h
Normal file
302
include/socks/memblock.h
Normal file
@@ -0,0 +1,302 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
#ifndef SOCKS_MEMBLOCK_H_
|
||||
#define SOCKS_MEMBLOCK_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
#include <socks/types.h>
|
||||
|
||||
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
|
||||
#define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128
|
||||
|
||||
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
|
||||
for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \
|
||||
(i)->__idx != ULLONG_MAX; \
|
||||
__next_memory_region(i, type_a, type_b, p_start, p_end))
|
||||
|
||||
/* iterate through all memory regions known to memblock.
|
||||
|
||||
this consists of all regions that have been registered
|
||||
with memblock using memblock_add().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: to iterate through all memory regions (with no bounds):
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_mem_region (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
|
||||
EXAMPLE: to iterate through all memory regions between physical
|
||||
addresses 0x40000 and 0x80000:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_mem_region (&it, 0x40000, 0x80000) { ... }
|
||||
*/
|
||||
#define for_each_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.memory, NULL, p_start, p_end)
|
||||
|
||||
/* iterate through all memory regions reserved using memblock.
|
||||
|
||||
this consists of all regions that have been registered
|
||||
with memblock using memblock_reserve().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: to iterate through all reserved memory regions (with no bounds):
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_reserved_mem_region (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
|
||||
EXAMPLE: to iterate through all reserved memory regions between physical
|
||||
addresses 0x40000 and 0x80000:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... }
|
||||
*/
|
||||
#define for_each_reserved_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.reserved, NULL, p_start, p_end)
|
||||
|
||||
/* iterate through all memory regions known by memblock to be free.
|
||||
|
||||
this consists of all regions BETWEEN those regions that have been
|
||||
registered using memblock_reserve(), bounded within the memory
|
||||
regions added using memblock_add().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: if you have added the following memory regions to
|
||||
memblock using memblock_add():
|
||||
|
||||
- 0x00000 -> 0x05fff
|
||||
- 0x08000 -> 0x1ffff
|
||||
|
||||
...and you have reserved the following memory regions using
|
||||
memblock_reserve():
|
||||
|
||||
- 0x01000 -> 0x04fff
|
||||
- 0x09000 -> 0x0ffff
|
||||
|
||||
the following call:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
would iterate through the following sequence of free memory ranges:
|
||||
|
||||
- 0x00000 -> 0x00fff
|
||||
- 0x05000 -> 0x05fff
|
||||
- 0x08000 -> 0x08fff
|
||||
- 0x10000 -> 0x1ffff
|
||||
*/
|
||||
#define for_each_free_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end)
|
||||
|
||||
typedef uint64_t memblock_index_t;
|
||||
|
||||
typedef enum memblock_region_status {
|
||||
/* Used in memblock.memory regions, indicates that the memory region exists */
|
||||
MEMBLOCK_MEMORY = 0,
|
||||
/* Used in memblock.reserved regions, indicates that the memory region was reserved
|
||||
* by a call to memblock_alloc() */
|
||||
MEMBLOCK_ALLOC,
|
||||
/* Used in memblock.reserved regions, indicates that the memory region was reserved
|
||||
* by a call to memblock_reserve() */
|
||||
MEMBLOCK_RESERVED,
|
||||
} memblock_region_status_t;
|
||||
|
||||
typedef struct memblock_region {
|
||||
/* the status of the memory region (free, reserved, allocated, etc) */
|
||||
memblock_region_status_t status;
|
||||
/* the address of the first byte that makes up the region */
|
||||
phys_addr_t base;
|
||||
/* the address of the last byte that makes up the region */
|
||||
phys_addr_t limit;
|
||||
} memblock_region_t;
|
||||
|
||||
/* buffer of memblock regions, all of which are the same type
|
||||
(memory, reserved, etc) */
|
||||
typedef struct memblock_type {
|
||||
struct memblock_region *regions;
|
||||
unsigned int count;
|
||||
unsigned int max;
|
||||
const char *name;
|
||||
} memblock_type_t;
|
||||
|
||||
typedef struct memblock {
|
||||
/* bounds of the memory region that can be used by memblock_alloc()
|
||||
both of these are virtual addresses */
|
||||
uintptr_t m_alloc_start, m_alloc_end;
|
||||
/* memblock assumes that all memory in the alloc zone is contiguously mapped
|
||||
(if paging is enabled). m_voffset is the offset that needs to be added to
|
||||
a given physical address to get the corresponding virtual address */
|
||||
uintptr_t m_voffset;
|
||||
|
||||
struct memblock_type memory;
|
||||
struct memblock_type reserved;
|
||||
} memblock_t;
|
||||
|
||||
typedef struct memblock_iter {
|
||||
memblock_index_t __idx;
|
||||
phys_addr_t it_base;
|
||||
phys_addr_t it_limit;
|
||||
memblock_region_status_t it_status;
|
||||
} memblock_iter_t;
|
||||
|
||||
/* global memblock state. */
|
||||
extern memblock_t memblock;
|
||||
|
||||
extern int __next_mem_range(memblock_iter_t *it);
|
||||
|
||||
/* initialise the global memblock state.
|
||||
this function must be called before any other memblock functions can be used.
|
||||
|
||||
this function sets the bounds of the heap area. memory allocation requests
|
||||
using memblock_alloc() will be constrained to this zone.
|
||||
|
||||
memblock assumes that all physical memory in the system is mapped to
|
||||
an area in virtual memory, such that converting a physical address to
|
||||
a valid virtual address can be done by simply applying an offset.
|
||||
|
||||
@param alloc_start the virtual address of the start of the heap area.
|
||||
@param alloc_end the virtual address of the end of the heap area.
|
||||
@param voffset the offset between the physical address of a given page and
|
||||
its corresponding virtual address.
|
||||
*/
|
||||
extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset);
|
||||
|
||||
/* add a region of memory to memblock.
|
||||
|
||||
this function is used to define regions of memory that are accessible, but
|
||||
says nothing about the STATE of the given memory.
|
||||
|
||||
all memory is free by default. once a region of memory is added,
|
||||
memblock_reserve() can be used to mark the memory as reserved.
|
||||
|
||||
@param base the physical address of the start of the memory region to add.
|
||||
@oaram size the size of the memory region to add in bytes.
|
||||
*/
|
||||
extern int memblock_add(phys_addr_t base, size_t size);
|
||||
/* mark a region of memory as reserved.
|
||||
|
||||
this function can only operate on regions of memory that have been previously
|
||||
registered with memblock using memblock_add().
|
||||
|
||||
reserved memory will not be used by memblock_alloc(), and will remain
|
||||
reserved when the vm_page memory map is initialised.
|
||||
|
||||
@param base the physical address of the start of the memory region to reserve.
|
||||
@oaram size the size of the memory region to reserve in bytes.
|
||||
*/
|
||||
extern int memblock_reserve(phys_addr_t base, size_t size);
|
||||
|
||||
/* allocate a block of memory, returning a virtual address.
|
||||
|
||||
this function selects the first available region of memory that satisfies
|
||||
the requested allocation size, marks `size` bytes of this region as reserved,
|
||||
and returns the virtual address of the region.
|
||||
|
||||
when looking for a suitable region of memory, this function searches the
|
||||
intersection of the following memory zones:
|
||||
- the regions of memory added with memblock_alloc().
|
||||
- the region of memory specified as the heap bounds during the call
|
||||
to memblock_init().
|
||||
and excludes the following regions:
|
||||
- the regions of memory marked as reserved by memblock_reserve() and
|
||||
previous calls to memblock_alloc()
|
||||
|
||||
@param size the size of the buffer to allocate in bytes.
|
||||
*/
|
||||
extern void *memblock_alloc(size_t size);
|
||||
|
||||
/* allocate a block of memory, returning a physical address.
|
||||
|
||||
this function selects the first available region of memory that satisfies
|
||||
the requested allocation size, marks `size` bytes of this region as reserved,
|
||||
and returns the virtual address of the region.
|
||||
|
||||
when looking for a suitable region of memory, this function searches the
|
||||
intersection of the following memory zones:
|
||||
- the regions of memory added with memblock_alloc().
|
||||
- the region of memory specified as the heap bounds during the call
|
||||
to memblock_init().
|
||||
and excludes the following regions:
|
||||
- the regions of memory marked as reserved by memblock_reserve() and
|
||||
previous calls to memblock_alloc()
|
||||
|
||||
@param size the size of the buffer to allocate in bytes.
|
||||
*/
|
||||
extern phys_addr_t memblock_alloc_phys(size_t size);
|
||||
|
||||
/* free a block of memory using its virtual address.
|
||||
|
||||
due to limitations in memblock (as it is meant to be a simple,
|
||||
early-boot allocator), you must specify the size of the memory
|
||||
region you intend to free.
|
||||
|
||||
@param addr the virtual address of the region to free.
|
||||
@param size the size of the region to free in bytes.
|
||||
*/
|
||||
extern int memblock_free(void *addr, size_t size);
|
||||
|
||||
/* free a block of memory using its physical address.
|
||||
|
||||
due to limitations in memblock (as it is meant to be a simple,
|
||||
early-boot allocator), you must specify the size of the memory
|
||||
region you intend to free.
|
||||
|
||||
@param addr the physical address of the region to free.
|
||||
@param size the size of the region to free in bytes.
|
||||
*/
|
||||
extern int memblock_free_phys(phys_addr_t addr, size_t size);
|
||||
|
||||
extern void __next_memory_region(memblock_iter_t *it, \
|
||||
memblock_type_t *type_a, memblock_type_t *type_b,
|
||||
phys_addr_t start, phys_addr_t end);
|
||||
|
||||
#endif
|
||||
54
include/socks/queue.h
Normal file
54
include/socks/queue.h
Normal file
@@ -0,0 +1,54 @@
|
||||
#ifndef SOCKS_QUEUE_H_
|
||||
#define SOCKS_QUEUE_H_
|
||||
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define QUEUE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
|
||||
#define QUEUE_INIT ((queue_t){ .q_first = NULL, .q_last = NULL })
|
||||
#define QUEUE_ENTRY_INIT ((queue_entry_t){ .qe_next = NULL, .qe_prev = NULL })
|
||||
|
||||
#define queue_foreach(iter_type, iter_name, queue_name, node_member) \
|
||||
for (iter_type *iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_first(queue_name)); \
|
||||
iter_name; \
|
||||
iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_next(&((iter_name)->node_member))))
|
||||
|
||||
#define queue_foreach_r(iter_type, iter_name, queue_name, node_member) \
|
||||
for (iter_type *iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_last(queue_name)); \
|
||||
iter_name; \
|
||||
iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_prev(&((iter_name)->node_member))))
|
||||
|
||||
typedef struct queue_entry {
|
||||
struct queue_entry *qe_next;
|
||||
struct queue_entry *qe_prev;
|
||||
} queue_entry_t;
|
||||
|
||||
typedef struct queue {
|
||||
queue_entry_t *q_first;
|
||||
queue_entry_t *q_last;
|
||||
} queue_t;
|
||||
|
||||
static inline void queue_init(queue_t *q) { memset(q, 0x00, sizeof *q); }
|
||||
static inline bool queue_empty(queue_t *q) { return q->q_first == NULL; }
|
||||
|
||||
static inline queue_entry_t *queue_first(queue_t *q) { return q->q_first; }
|
||||
static inline queue_entry_t *queue_last(queue_t *q) { return q->q_last; }
|
||||
static inline queue_entry_t *queue_next(queue_entry_t *entry) { return entry->qe_next; }
|
||||
static inline queue_entry_t *queue_prev(queue_entry_t *entry) { return entry->qe_prev; }
|
||||
|
||||
extern size_t queue_length(queue_t *q);
|
||||
|
||||
extern void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before);
|
||||
extern void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after);
|
||||
|
||||
extern void queue_push_front(queue_t *q, queue_entry_t *entry);
|
||||
extern void queue_push_back(queue_t *q, queue_entry_t *entry);
|
||||
|
||||
extern queue_entry_t *queue_pop_front(queue_t *q);
|
||||
extern queue_entry_t *queue_pop_back(queue_t *q);
|
||||
|
||||
extern void queue_delete(queue_t *q, queue_entry_t *entry);
|
||||
extern void queue_delete_all(queue_t *q);
|
||||
|
||||
#endif
|
||||
8
include/socks/types.h
Normal file
8
include/socks/types.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef SOCKS_TYPES_H_
|
||||
#define SOCKS_TYPES_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef uintptr_t phys_addr_t;
|
||||
|
||||
#endif
|
||||
8
include/socks/util.h
Normal file
8
include/socks/util.h
Normal file
@@ -0,0 +1,8 @@
|
||||
#ifndef SOCKS_UTIL_H_
|
||||
#define SOCKS_UTIL_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
extern void data_size_to_string(size_t value, char *out, size_t outsz);
|
||||
|
||||
#endif
|
||||
241
include/socks/vm.h
Normal file
241
include/socks/vm.h
Normal file
@@ -0,0 +1,241 @@
|
||||
#ifndef SOCKS_VM_H_
|
||||
#define SOCKS_VM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <socks/types.h>
|
||||
#include <socks/status.h>
|
||||
#include <socks/queue.h>
|
||||
#include <socks/locks.h>
|
||||
|
||||
/* maximum number of NUMA nodes */
|
||||
#define VM_MAX_NODES 64
|
||||
/* maximum number of memory zones per node */
|
||||
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
|
||||
/* maximum number of supported page orders */
|
||||
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
|
||||
|
||||
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
|
||||
#define VM_PAGE_SIZE 0x1000
|
||||
#define VM_PAGE_SHIFT 12
|
||||
|
||||
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
|
||||
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
|
||||
|
||||
#define vm_page_foreach(pg, i) \
|
||||
for (vm_page_t *i = (pg); i; i = vm_page_get_next_tail(i))
|
||||
|
||||
typedef phys_addr_t vm_alignment_t;
|
||||
typedef unsigned int vm_node_id_t;
|
||||
|
||||
typedef struct vm_object {
|
||||
unsigned int reserved;
|
||||
} vm_object_t;
|
||||
|
||||
typedef enum vm_flags {
|
||||
VM_GET_DMA = 0x01u,
|
||||
} vm_flags_t;
|
||||
|
||||
typedef enum vm_zone_id {
|
||||
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
|
||||
they need to be continuous, and must start at 0! */
|
||||
VM_ZONE_DMA = 0u,
|
||||
VM_ZONE_NORMAL = 1u,
|
||||
VM_ZONE_HIGHMEM = 2u,
|
||||
VM_ZONE_MIN = VM_ZONE_DMA,
|
||||
VM_ZONE_MAX = VM_ZONE_HIGHMEM,
|
||||
} vm_zone_id_t;
|
||||
|
||||
typedef enum vm_page_order {
|
||||
VM_PAGE_4K = 0u,
|
||||
VM_PAGE_8K,
|
||||
VM_PAGE_16K,
|
||||
VM_PAGE_32K,
|
||||
VM_PAGE_64K,
|
||||
VM_PAGE_128K,
|
||||
VM_PAGE_256K,
|
||||
VM_PAGE_512K,
|
||||
VM_PAGE_1M,
|
||||
VM_PAGE_2M,
|
||||
VM_PAGE_4M,
|
||||
VM_PAGE_8M,
|
||||
VM_PAGE_16M,
|
||||
VM_PAGE_32M,
|
||||
VM_PAGE_64M,
|
||||
VM_PAGE_128M,
|
||||
#if 0
|
||||
/* vm_page_t only has 4 bits to store the page order with.
|
||||
the maximum order that can be stored in 4 bits is 15 (VM_PAGE_128M)
|
||||
to use any of the page orders listed here, this field
|
||||
will have to be expanded. */
|
||||
VM_PAGE_256M,
|
||||
VM_PAGE_512M,
|
||||
VM_PAGE_1G,
|
||||
#endif
|
||||
VM_PAGE_MIN_ORDER = VM_PAGE_4K,
|
||||
VM_PAGE_MAX_ORDER = VM_PAGE_8M,
|
||||
} vm_page_order_t;
|
||||
|
||||
typedef enum vm_page_flags {
|
||||
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
|
||||
returned by any allocation function */
|
||||
VM_PAGE_RESERVED = 0x01u,
|
||||
/* page has been allocated by a zone's buddy allocator, and is in-use */
|
||||
VM_PAGE_ALLOC = 0x02u,
|
||||
/* page is the first page of a huge-page */
|
||||
VM_PAGE_HEAD = 0x04u,
|
||||
/* page is part of a huge-page */
|
||||
VM_PAGE_HUGE = 0x08u,
|
||||
} vm_page_flags_t;
|
||||
|
||||
typedef enum vm_memory_region_status {
|
||||
VM_REGION_FREE = 0x01u,
|
||||
VM_REGION_RESERVED = 0x02u,
|
||||
} vm_memory_region_status_t;
|
||||
|
||||
typedef enum vm_cache_flags {
|
||||
VM_CACHE_OFFSLAB = 0x01u,
|
||||
VM_CACHE_DMA = 0x02u
|
||||
} vm_cache_flags_t;
|
||||
|
||||
typedef struct vm_zone_descriptor {
|
||||
vm_zone_id_t zd_id;
|
||||
vm_node_id_t zd_node;
|
||||
const char zd_name[32];
|
||||
phys_addr_t zd_base;
|
||||
phys_addr_t zd_limit;
|
||||
} vm_zone_descriptor_t;
|
||||
|
||||
typedef struct vm_zone {
|
||||
vm_zone_descriptor_t z_info;
|
||||
spin_lock_t z_lock;
|
||||
|
||||
queue_t z_free_pages[VM_MAX_PAGE_ORDERS];
|
||||
unsigned long z_size;
|
||||
} vm_zone_t;
|
||||
|
||||
typedef struct vm_pg_data {
|
||||
vm_zone_t pg_zones[VM_MAX_ZONES];
|
||||
} vm_pg_data_t;
|
||||
|
||||
typedef struct vm_region {
|
||||
vm_memory_region_status_t r_status;
|
||||
phys_addr_t r_base;
|
||||
phys_addr_t r_limit;
|
||||
} vm_region_t;
|
||||
|
||||
typedef struct vm_cache {
|
||||
const char *c_name;
|
||||
vm_cache_flags_t c_flags;
|
||||
queue_entry_t c_list;
|
||||
|
||||
queue_t c_slabs_full;
|
||||
queue_t c_slabs_partial;
|
||||
queue_t c_slabs_empty;
|
||||
|
||||
spin_lock_t c_lock;
|
||||
|
||||
/* number of objects that can be stored in a single slab */
|
||||
unsigned int c_obj_count;
|
||||
/* the size of object kept in the cache */
|
||||
unsigned int c_obj_size;
|
||||
/* combined size of vm_slab_t and the freelist */
|
||||
unsigned int c_hdr_size;
|
||||
/* offset from one object to the next in a slab.
|
||||
this may be different from c_obj_size as
|
||||
we enforce a 16-byte alignment on allocated objects */
|
||||
unsigned int c_stride;
|
||||
/* size of page used for slabs */
|
||||
unsigned int c_page_order;
|
||||
} vm_cache_t;
|
||||
|
||||
typedef struct vm_slab {
|
||||
vm_cache_t *s_cache;
|
||||
/* queue entry for vm_cache_t.c_slabs_* */
|
||||
queue_entry_t s_list;
|
||||
/* pointer to the first object slot. */
|
||||
void *s_objects;
|
||||
/* the number of objects allocated on the slab. */
|
||||
unsigned int s_obj_allocated;
|
||||
/* the index of the next free object.
|
||||
if s_free is equal to FREELIST_END (defined in vm/cache.c)
|
||||
there are no free slots left in the slab. */
|
||||
unsigned int s_free;
|
||||
/* list of free object slots.
|
||||
when allocating:
|
||||
- s_free should be set to the value of s_freelist[s_free]
|
||||
when freeing:
|
||||
- s_free should be set to the index of the object being freed.
|
||||
- s_freelist[s_free] should be set to the previous value of s_free.
|
||||
*/
|
||||
unsigned int s_freelist[];
|
||||
} vm_slab_t;
|
||||
|
||||
typedef struct vm_page {
|
||||
/* order of the page block that this page belongs too */
|
||||
uint16_t p_order : 4;
|
||||
/* the id of the NUMA node that this page belongs to */
|
||||
uint16_t p_node : 6;
|
||||
/* the id of the memory zone that this page belongs to */
|
||||
uint16_t p_zone : 3;
|
||||
/* some unused bits */
|
||||
uint16_t p_reserved : 3;
|
||||
|
||||
/* vm_page_flags_t bitfields. */
|
||||
uint32_t p_flags;
|
||||
|
||||
/* multi-purpose list.
|
||||
the owner of the page can decide what to do with this.
|
||||
some examples:
|
||||
- the buddy allocator uses this to maintain its per-zone free-page lists.
|
||||
*/
|
||||
queue_entry_t p_list;
|
||||
|
||||
/* owner-specific data */
|
||||
union {
|
||||
struct {
|
||||
vm_slab_t *p_slab;
|
||||
};
|
||||
};
|
||||
|
||||
} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t;
|
||||
|
||||
extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones);
|
||||
|
||||
extern vm_pg_data_t *vm_pg_data_get(vm_node_id_t node);
|
||||
|
||||
extern phys_addr_t vm_virt_to_phys(void *p);
|
||||
|
||||
extern void vm_page_init_array();
|
||||
extern vm_page_t *vm_page_get(phys_addr_t addr);
|
||||
extern phys_addr_t vm_page_get_paddr(vm_page_t *pg);
|
||||
extern vm_zone_t *vm_page_get_zone(vm_page_t *pg);
|
||||
extern void *vm_page_get_vaddr(vm_page_t *pg);
|
||||
extern size_t vm_page_get_pfn(vm_page_t *pg);
|
||||
extern size_t vm_page_order_to_bytes(vm_page_order_t order);
|
||||
extern size_t vm_page_order_to_pages(vm_page_order_t order);
|
||||
extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order);
|
||||
extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_page_free(vm_page_t *pg);
|
||||
|
||||
extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b);
|
||||
extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b);
|
||||
extern vm_page_t *vm_page_get_buddy(vm_page_t *pg);
|
||||
extern vm_page_t *vm_page_get_next_tail(vm_page_t *pg);
|
||||
|
||||
extern size_t vm_bytes_to_pages(size_t bytes);
|
||||
|
||||
extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
|
||||
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
|
||||
|
||||
extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags);
|
||||
extern void vm_cache_init(vm_cache_t *cache);
|
||||
extern void vm_cache_destroy(vm_cache_t *cache);
|
||||
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
|
||||
extern void vm_cache_free(vm_cache_t *cache, void *p);
|
||||
|
||||
extern void *kmalloc(size_t count, vm_flags_t flags);
|
||||
extern void *kzalloc(size_t count, vm_flags_t flags);
|
||||
extern void kfree(void *p);
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user