sandbox: moved all sources to main kernel tree
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
#ifndef SOCKS_STATUS_H_
|
||||
#define SOCKS_STATUS_H_
|
||||
|
||||
typedef unsigned int kern_status_t;
|
||||
|
||||
#define KERN_OK (0)
|
||||
#define KERN_ERR_UNIMPLEMENTED (1)
|
||||
|
||||
#endif
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifndef SOCKS_TYPES_H_
|
||||
#define SOCKS_TYPES_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
typedef uintptr_t phys_addr_t;
|
||||
|
||||
#endif
|
||||
@@ -1,704 +0,0 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
|
||||
/* templated AVL binary tree implementation
|
||||
|
||||
this file implements an extensible AVL binary tree data structure.
|
||||
|
||||
the primary rule of an AVL binary tree is that for a given node N,
|
||||
the heights of N's left and right subtrees can differ by at most 1.
|
||||
|
||||
the height of a subtree is the length of the longest path between
|
||||
the root of the subtree and a leaf node, including the root node itself.
|
||||
|
||||
the height of a leaf node is 1.
|
||||
|
||||
when a node is inserted into or deleted from the tree, this rule may
|
||||
be broken, in which the tree must be rotated to restore the balance.
|
||||
|
||||
no more than one rotation is required for any insert operations,
|
||||
while multiple rotations may be required for a delete operation.
|
||||
|
||||
there are four types of rotations that can be applied to a tree:
|
||||
- left rotation
|
||||
- right rotation
|
||||
- double left rotations
|
||||
- double right rotations
|
||||
|
||||
by enforcing the balance rule, for a tree with n nodes, the worst-case
|
||||
performance for insert, delete, and search operations is guaranteed
|
||||
to be O(log n).
|
||||
|
||||
this file intentionally excludes any kind of search function implementation.
|
||||
it is up to the programmer to implement their own tree node type
|
||||
using btree_node_t, and their own search function using btree_t.
|
||||
this allows the programmer to define their own node types with complex
|
||||
non-integer key types. btree.h contains a number of macros to help
|
||||
define these functions. the macros do all the work, you just have to
|
||||
provide a comparator function.
|
||||
*/
|
||||
|
||||
#include <socks/btree.h>
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
|
||||
#define IS_LEFT_CHILD(p, c) ((p) && (c) && ((p)->b_left == (c)))
|
||||
#define IS_RIGHT_CHILD(p, c) ((p) && (c) && ((p)->b_right == (c)))
|
||||
|
||||
#define HAS_LEFT_CHILD(x) ((x) && ((x)->b_left))
|
||||
#define HAS_RIGHT_CHILD(x) ((x) && ((x)->b_right))
|
||||
|
||||
#define HAS_NO_CHILDREN(x) ((x) && (!(x)->b_left) && (!(x)->b_right))
|
||||
#define HAS_ONE_CHILD(x) ((HAS_LEFT_CHILD(x) && !HAS_RIGHT_CHILD(x)) || (!HAS_LEFT_CHILD(x) && HAS_RIGHT_CHILD(x)))
|
||||
#define HAS_TWO_CHILDREN(x) (HAS_LEFT_CHILD(x) && HAS_RIGHT_CHILD(x))
|
||||
|
||||
#define HEIGHT(x) ((x) ? (x)->b_height : 0)
|
||||
|
||||
static inline void update_height(btree_node_t *x)
|
||||
{
|
||||
x->b_height = MAX(HEIGHT(x->b_left), HEIGHT((x->b_right))) + 1;
|
||||
}
|
||||
|
||||
static inline int bf(btree_node_t *x)
|
||||
{
|
||||
int bf = 0;
|
||||
|
||||
if (!x) {
|
||||
return bf;
|
||||
}
|
||||
|
||||
if (x->b_right) {
|
||||
bf += x->b_right->b_height;
|
||||
}
|
||||
|
||||
if (x->b_left) {
|
||||
bf -= x->b_left->b_height;
|
||||
}
|
||||
|
||||
return bf;
|
||||
}
|
||||
|
||||
/* perform a left rotation on a subtree
|
||||
|
||||
if you have a tree like this:
|
||||
|
||||
Z
|
||||
/ \
|
||||
X .
|
||||
/ \
|
||||
. Y
|
||||
/ \
|
||||
. .
|
||||
|
||||
and you perform a left rotation on node X,
|
||||
you will get the following tree:
|
||||
|
||||
Z
|
||||
/ \
|
||||
Y .
|
||||
/ \
|
||||
X .
|
||||
/ \
|
||||
. .
|
||||
|
||||
note that this function does NOT update b_height for the rotated
|
||||
nodes. it is up to you to call update_height_to_root().
|
||||
*/
|
||||
static void rotate_left(btree_t *tree, btree_node_t *x)
|
||||
{
|
||||
assert(x != NULL);
|
||||
|
||||
btree_node_t *y = x->b_right;
|
||||
assert(y != NULL);
|
||||
|
||||
assert(y == x->b_left || y == x->b_right);
|
||||
if (x->b_parent) {
|
||||
assert(x == x->b_parent->b_left || x == x->b_parent->b_right);
|
||||
}
|
||||
|
||||
btree_node_t *p = x->b_parent;
|
||||
|
||||
if (y->b_left) {
|
||||
y->b_left->b_parent = x;
|
||||
}
|
||||
|
||||
x->b_right = y->b_left;
|
||||
|
||||
if (!p) {
|
||||
tree->b_root = y;
|
||||
} else if (x == p->b_left) {
|
||||
p->b_left = y;
|
||||
} else {
|
||||
p->b_right = y;
|
||||
}
|
||||
|
||||
x->b_parent = y;
|
||||
y->b_left = x;
|
||||
y->b_parent = p;
|
||||
}
|
||||
|
||||
static void update_height_to_root(btree_node_t *x)
|
||||
{
|
||||
while (x) {
|
||||
update_height(x);
|
||||
x = x->b_parent;
|
||||
}
|
||||
}
|
||||
|
||||
/* perform a right rotation on a subtree
|
||||
|
||||
if you have a tree like this:
|
||||
|
||||
Z
|
||||
/ \
|
||||
. X
|
||||
/ \
|
||||
Y .
|
||||
/ \
|
||||
. .
|
||||
|
||||
and you perform a right rotation on node X,
|
||||
you will get the following tree:
|
||||
|
||||
Z
|
||||
/ \
|
||||
. Y
|
||||
/ \
|
||||
. X
|
||||
/ \
|
||||
. .
|
||||
|
||||
note that this function does NOT update b_height for the rotated
|
||||
nodes. it is up to you to call update_height_to_root().
|
||||
*/
|
||||
static void rotate_right(btree_t *tree, btree_node_t *y)
|
||||
{
|
||||
assert(y);
|
||||
|
||||
btree_node_t *x = y->b_left;
|
||||
assert(x);
|
||||
|
||||
assert(x == y->b_left || x == y->b_right);
|
||||
if (y->b_parent) {
|
||||
assert(y == y->b_parent->b_left || y == y->b_parent->b_right);
|
||||
}
|
||||
|
||||
btree_node_t *p = y->b_parent;
|
||||
|
||||
if (x->b_right) {
|
||||
x->b_right->b_parent = y;
|
||||
}
|
||||
|
||||
y->b_left = x->b_right;
|
||||
|
||||
if (!p) {
|
||||
tree->b_root = x;
|
||||
} else if (y == p->b_left) {
|
||||
p->b_left = x;
|
||||
} else {
|
||||
p->b_right = x;
|
||||
}
|
||||
|
||||
y->b_parent = x;
|
||||
x->b_right = y;
|
||||
x->b_parent = p;
|
||||
}
|
||||
|
||||
/* for a given node Z, perform a right rotation on Z's right child,
|
||||
followed by a left rotation on Z itself.
|
||||
|
||||
if you have a tree like this:
|
||||
|
||||
Z
|
||||
/ \
|
||||
. X
|
||||
/ \
|
||||
Y .
|
||||
/ \
|
||||
. .
|
||||
|
||||
and you perform a double-left rotation on node Z,
|
||||
you will get the following tree:
|
||||
|
||||
Y
|
||||
/ \
|
||||
/ \
|
||||
Z X
|
||||
/ \ / \
|
||||
. . . .
|
||||
|
||||
note that, unlike rotate_left and rotate_right, this function
|
||||
DOES update b_height for the rotated nodes (since it needs to be
|
||||
done in a certain order).
|
||||
*/
|
||||
static void rotate_double_left(btree_t *tree, btree_node_t *z)
|
||||
{
|
||||
btree_node_t *x = z->b_right;
|
||||
btree_node_t *y = x->b_left;
|
||||
|
||||
rotate_right(tree, x);
|
||||
rotate_left(tree, z);
|
||||
|
||||
update_height(z);
|
||||
update_height(x);
|
||||
|
||||
while (y) {
|
||||
update_height(y);
|
||||
y = y->b_parent;
|
||||
}
|
||||
}
|
||||
|
||||
/* for a given node Z, perform a left rotation on Z's left child,
|
||||
followed by a right rotation on Z itself.
|
||||
|
||||
if you have a tree like this:
|
||||
|
||||
Z
|
||||
/ \
|
||||
X .
|
||||
/ \
|
||||
. Y
|
||||
/ \
|
||||
. .
|
||||
|
||||
and you perform a double-right rotation on node Z,
|
||||
you will get the following tree:
|
||||
|
||||
Y
|
||||
/ \
|
||||
/ \
|
||||
X Z
|
||||
/ \ / \
|
||||
. . . .
|
||||
|
||||
note that, unlike rotate_left and rotate_right, this function
|
||||
DOES update b_height for the rotated nodes (since it needs to be
|
||||
done in a certain order).
|
||||
*/
|
||||
static void rotate_double_right(btree_t *tree, btree_node_t *z)
|
||||
{
|
||||
btree_node_t *x = z->b_left;
|
||||
btree_node_t *y = x->b_right;
|
||||
|
||||
rotate_left(tree, x);
|
||||
rotate_right(tree, z);
|
||||
|
||||
update_height(z);
|
||||
update_height(x);
|
||||
|
||||
while (y) {
|
||||
update_height(y);
|
||||
y = y->b_parent;
|
||||
}
|
||||
}
|
||||
|
||||
/* run after an insert operation. checks that the balance factor
|
||||
of the local subtree is within the range -1 <= BF <= 1. if it
|
||||
is not, rotate the subtree to restore balance.
|
||||
|
||||
note that at most one rotation should be required after a node
|
||||
is inserted into the tree.
|
||||
|
||||
this function depends on all nodes in the tree having
|
||||
correct b_height values.
|
||||
|
||||
@param w the node that was just inserted into the tree
|
||||
*/
|
||||
static void insert_fixup(btree_t *tree, btree_node_t *w)
|
||||
{
|
||||
btree_node_t *z = NULL, *y = NULL, *x = NULL;
|
||||
|
||||
z = w;
|
||||
while (z) {
|
||||
if (bf(z) >= -1 && bf(z) <= 1) {
|
||||
goto next_ancestor;
|
||||
}
|
||||
|
||||
assert(x && y && z);
|
||||
assert(x == y->b_left || x == y->b_right);
|
||||
assert(y == z->b_left || y == z->b_right);
|
||||
|
||||
if (IS_LEFT_CHILD(z, y)) {
|
||||
if (IS_LEFT_CHILD(y, x)) {
|
||||
rotate_right(tree, z);
|
||||
update_height_to_root(z);
|
||||
} else {
|
||||
rotate_double_right(tree, z);
|
||||
}
|
||||
} else {
|
||||
if (IS_LEFT_CHILD(y, x)) {
|
||||
rotate_double_left(tree, z);
|
||||
} else {
|
||||
rotate_left(tree, z);
|
||||
update_height_to_root(z);
|
||||
}
|
||||
}
|
||||
|
||||
next_ancestor:
|
||||
x = y;
|
||||
y = z;
|
||||
z = z->b_parent;
|
||||
}
|
||||
}
|
||||
|
||||
/* run after a delete operation. checks that the balance factor
|
||||
of the local subtree is within the range -1 <= BF <= 1. if it
|
||||
is not, rotate the subtree to restore balance.
|
||||
|
||||
note that, unlike insert_fixup, multiple rotations may be required
|
||||
to restore balance after a node is deleted.
|
||||
|
||||
this function depends on all nodes in the tree having
|
||||
correct b_height values.
|
||||
|
||||
@param w one of the following:
|
||||
- the parent of the node that was deleted if the node
|
||||
had no children.
|
||||
- the parent of the node that replaced the deleted node
|
||||
if the deleted node had two children.
|
||||
- the node that replaced the node that was deleted, if
|
||||
the node that was deleted had one child.
|
||||
*/
|
||||
static void delete_fixup(btree_t *tree, btree_node_t *w)
|
||||
{
|
||||
btree_node_t *z = w;
|
||||
|
||||
while (z) {
|
||||
if (bf(z) > 1) {
|
||||
if (bf(z->b_right) >= 0) {
|
||||
rotate_left(tree, z);
|
||||
update_height_to_root(z);
|
||||
} else {
|
||||
rotate_double_left(tree, z);
|
||||
}
|
||||
} else if (bf(z) < -1) {
|
||||
if (bf(z->b_left) <= 0) {
|
||||
rotate_right(tree, z);
|
||||
update_height_to_root(z);
|
||||
} else {
|
||||
rotate_double_right(tree, z);
|
||||
}
|
||||
}
|
||||
|
||||
z = z->b_parent;
|
||||
}
|
||||
}
|
||||
|
||||
/* updates b_height for all nodes between the inserted node and the root
|
||||
of the tree, and calls insert_fixup.
|
||||
|
||||
@param node the node that was just inserted into the tree.
|
||||
*/
|
||||
void btree_insert_fixup(btree_t *tree, btree_node_t *node)
|
||||
{
|
||||
node->b_height = 0;
|
||||
|
||||
btree_node_t *cur = node;
|
||||
while (cur) {
|
||||
update_height(cur);
|
||||
cur = cur->b_parent;
|
||||
}
|
||||
|
||||
insert_fixup(tree, node);
|
||||
}
|
||||
|
||||
/* remove a node from a tree.
|
||||
|
||||
this function assumes that `node` has no children, and therefore
|
||||
doesn't need to be replaced.
|
||||
|
||||
updates b_height for all nodes between `node` and the tree root.
|
||||
|
||||
@param node the node to delete.
|
||||
*/
|
||||
static btree_node_t *remove_node_with_no_children(btree_t *tree, btree_node_t *node)
|
||||
{
|
||||
btree_node_t *w = node->b_parent;
|
||||
btree_node_t *p = node->b_parent;
|
||||
node->b_parent = NULL;
|
||||
|
||||
if (!p) {
|
||||
tree->b_root = NULL;
|
||||
} else if (IS_LEFT_CHILD(p, node)) {
|
||||
p->b_left = NULL;
|
||||
} else {
|
||||
p->b_right = NULL;
|
||||
}
|
||||
|
||||
while (p) {
|
||||
update_height(p);
|
||||
p = p->b_parent;
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
/* remove a node from a tree.
|
||||
|
||||
this function assumes that `node` has one child.
|
||||
the child of `node` is inherited by `node`'s parent, and `node` is removed.
|
||||
|
||||
updates b_height for all nodes between the node that replaced
|
||||
`node` and the tree root.
|
||||
|
||||
@param node the node to delete.
|
||||
*/
|
||||
static btree_node_t *replace_node_with_one_subtree(btree_t *tree, btree_node_t *node)
|
||||
{
|
||||
btree_node_t *p = node->b_parent;
|
||||
btree_node_t *z = NULL;
|
||||
|
||||
if (HAS_LEFT_CHILD(node)) {
|
||||
z = node->b_left;
|
||||
} else {
|
||||
z = node->b_right;
|
||||
}
|
||||
|
||||
btree_node_t *w = z;
|
||||
if (!p) {
|
||||
tree->b_root = z;
|
||||
} else if (IS_LEFT_CHILD(p, node)) {
|
||||
p->b_left = z;
|
||||
} else if (IS_RIGHT_CHILD(p, node)) {
|
||||
p->b_right = z;
|
||||
}
|
||||
|
||||
z->b_parent = p;
|
||||
|
||||
node->b_parent = NULL;
|
||||
node->b_left = node->b_right = NULL;
|
||||
|
||||
while (z) {
|
||||
update_height(z);
|
||||
z = z->b_parent;
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
/* remove a node from a tree.
|
||||
|
||||
this function assumes that `node` has two children.
|
||||
find the in-order successor Y of `node` (the largest node in `node`'s left sub-tree),
|
||||
removes `node` from the tree and moves Y to where `node` used to be.
|
||||
|
||||
if Y has a child (it will never have more than one), have Y's parent inherit
|
||||
Y's child.
|
||||
|
||||
updates b_height for all nodes between the deepest node that was modified
|
||||
and the tree root.
|
||||
|
||||
@param z the node to delete.
|
||||
*/
|
||||
static btree_node_t *replace_node_with_two_subtrees(btree_t *tree, btree_node_t *z)
|
||||
{
|
||||
/* x will replace z */
|
||||
btree_node_t *x = z->b_left;
|
||||
|
||||
while (x->b_right) {
|
||||
x = x->b_right;
|
||||
}
|
||||
|
||||
/* y is the node that will replace x (if x has a left child) */
|
||||
btree_node_t *y = x->b_left;
|
||||
|
||||
/* w is the starting point for the height update and fixup */
|
||||
btree_node_t *w = x;
|
||||
if (w->b_parent != z) {
|
||||
w = w->b_parent;
|
||||
}
|
||||
|
||||
if (y) {
|
||||
w = y;
|
||||
}
|
||||
|
||||
if (IS_LEFT_CHILD(x->b_parent, x)) {
|
||||
x->b_parent->b_left = y;
|
||||
} else if (IS_RIGHT_CHILD(x->b_parent, x)) {
|
||||
x->b_parent->b_right = y;
|
||||
}
|
||||
|
||||
if (y) {
|
||||
y->b_parent = x->b_parent;
|
||||
}
|
||||
|
||||
if (IS_LEFT_CHILD(z->b_parent, z)) {
|
||||
z->b_parent->b_left = x;
|
||||
} else if (IS_RIGHT_CHILD(z->b_parent, z)) {
|
||||
z->b_parent->b_right = x;
|
||||
}
|
||||
|
||||
x->b_parent = z->b_parent;
|
||||
x->b_left = z->b_left;
|
||||
x->b_right = z->b_right;
|
||||
|
||||
if (x->b_left) {
|
||||
x->b_left->b_parent = x;
|
||||
}
|
||||
|
||||
if (x->b_right) {
|
||||
x->b_right->b_parent = x;
|
||||
}
|
||||
|
||||
if (!x->b_parent) {
|
||||
tree->b_root = x;
|
||||
}
|
||||
|
||||
btree_node_t *cur = w;
|
||||
while (cur) {
|
||||
update_height(cur);
|
||||
cur = cur->b_parent;
|
||||
}
|
||||
|
||||
return w;
|
||||
}
|
||||
|
||||
/* delete a node from the tree and re-balance it afterwards */
|
||||
void btree_delete(btree_t *tree, btree_node_t *node)
|
||||
{
|
||||
btree_node_t *w = NULL;
|
||||
|
||||
if (HAS_NO_CHILDREN(node)) {
|
||||
w = remove_node_with_no_children(tree, node);
|
||||
} else if (HAS_ONE_CHILD(node)) {
|
||||
w = replace_node_with_one_subtree(tree, node);
|
||||
} else if (HAS_TWO_CHILDREN(node)) {
|
||||
w = replace_node_with_two_subtrees(tree, node);
|
||||
}
|
||||
|
||||
if (w) {
|
||||
delete_fixup(tree, w);
|
||||
}
|
||||
|
||||
node->b_left = node->b_right = node->b_parent = NULL;
|
||||
}
|
||||
|
||||
btree_node_t *btree_first(btree_t *tree)
|
||||
{
|
||||
/* the first node in the tree is the node with the smallest key.
|
||||
we keep moving left until we can't go any further */
|
||||
btree_node_t *cur = tree->b_root;
|
||||
if (!cur) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (cur->b_left) {
|
||||
cur = cur->b_left;
|
||||
}
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
btree_node_t *btree_last(btree_t *tree)
|
||||
{
|
||||
/* the first node in the tree is the node with the largest key.
|
||||
we keep moving right until we can't go any further */
|
||||
btree_node_t *cur = tree->b_root;
|
||||
if (!cur) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (cur->b_right) {
|
||||
cur = cur->b_right;
|
||||
}
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
btree_node_t *btree_next(btree_node_t *node)
|
||||
{
|
||||
if (!node) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* there are two possibilities for the next node:
|
||||
|
||||
1. if `node` has a right sub-tree, every node in this sub-tree is bigger
|
||||
than node. the in-order successor of `node` is the smallest node in
|
||||
this subtree.
|
||||
2. if `node` has no right sub-tree, we've reached the largest node in
|
||||
the sub-tree rooted at `node`. we need to go back to our parent
|
||||
and continue the search elsewhere.
|
||||
*/
|
||||
if (node->b_right) {
|
||||
/* case 1: step into `node`'s right sub-tree and keep going
|
||||
left to find the smallest node */
|
||||
btree_node_t *cur = node->b_right;
|
||||
while (cur->b_left) {
|
||||
cur = cur->b_left;
|
||||
}
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* case 2: keep stepping back up towards the root of the tree.
|
||||
if we encounter a step where we are our parent's left child,
|
||||
we've found a parent with a value larger than us. this parent
|
||||
is the in-order successor of `node` */
|
||||
while (node->b_parent && node->b_parent->b_left != node) {
|
||||
node = node->b_parent;
|
||||
}
|
||||
|
||||
return node->b_parent;
|
||||
}
|
||||
|
||||
btree_node_t *btree_prev(btree_node_t *node)
|
||||
{
|
||||
if (!node) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* there are two possibilities for the previous node:
|
||||
|
||||
1. if `node` has a left sub-tree, every node in this sub-tree is smaller
|
||||
than `node`. the in-order predecessor of `node` is the largest node in
|
||||
this subtree.
|
||||
2. if `node` has no left sub-tree, we've reached the smallest node in
|
||||
the sub-tree rooted at `node`. we need to go back to our parent
|
||||
and continue the search elsewhere.
|
||||
*/
|
||||
if (node->b_left) {
|
||||
/* case 1: step into `node`'s left sub-tree and keep going
|
||||
right to find the largest node */
|
||||
btree_node_t *cur = node->b_left;
|
||||
while (cur->b_right) {
|
||||
cur = cur->b_right;
|
||||
}
|
||||
|
||||
return cur;
|
||||
}
|
||||
|
||||
/* case 2: keep stepping back up towards the root of the tree.
|
||||
if we encounter a step where we are our parent's right child,
|
||||
we've found a parent with a value smaller than us. this parent
|
||||
is the in-order predecessor of `node`. */
|
||||
while (node->b_parent && node->b_parent->b_right != node) {
|
||||
node = node->b_parent;
|
||||
}
|
||||
|
||||
return node->b_parent;
|
||||
}
|
||||
@@ -1,373 +0,0 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
|
||||
#ifndef SOCKS_BTREE_H_
|
||||
#define SOCKS_BTREE_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/* if your custom structure contains a btree_node_t (i.e. it can be part of a btree),
|
||||
you can use this macro to convert a btree_node_t* to a your_type*
|
||||
|
||||
@param t the name of your custom type (something that can be passed to offsetof)
|
||||
@param m the name of the btree_node_t member variable within your custom type.
|
||||
@param v the btree_node_t pointer that you wish to convert. if this is NULL, NULL will be returned.
|
||||
*/
|
||||
#define BTREE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
|
||||
/* defines a simple node insertion function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_INSERT(struct my_tree_node, base, key, my_tree_node_insert);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_INSERT(node_type, container_node_member, container_key_member, function_name) \
|
||||
static void function_name(btree_t *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
btree_node_t *next = NULL; \
|
||||
\
|
||||
if (node->container_key_member > cur_node->container_key_member) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (node->container_key_member < cur_node->container_key_member) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a node insertion function.
|
||||
this function should be used for trees with complex node keys that cannot be directly compared.
|
||||
a comparator for your keys must be supplied.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
complex_key_t key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would need to define a comparator function or macro with the following signature:
|
||||
|
||||
int my_comparator(struct my_tree_node *a, struct my_tree_node *b);
|
||||
|
||||
Which implements the following:
|
||||
|
||||
return -1 if a < b
|
||||
return 0 if a == b
|
||||
return 1 if a > b
|
||||
|
||||
You would use the following call to generate an insert function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_INSERT(struct my_tree_node, base, key, my_tree_node_insert, my_comparator);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_insert(btree_t *tree, struct my_tree_node *node);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
@param comparator the name of a comparator function or functional-macro that conforms to the
|
||||
requirements listed above.
|
||||
*/
|
||||
#define BTREE_DEFINE_INSERT(node_type, container_node_member, container_key_member, function_name, comparator) \
|
||||
static void function_name(btree_t *tree, node_type *node) \
|
||||
{ \
|
||||
if (!tree->b_root) { \
|
||||
tree->b_root = &node->container_node_member; \
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (1) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
btree_node_t *next = NULL; \
|
||||
int cmp = comparator(node, cur_node); \
|
||||
\
|
||||
if (cmp == 1) { \
|
||||
next = btree_right(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_right(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else if (cmp == -1) { \
|
||||
next = btree_left(cur); \
|
||||
\
|
||||
if (!next) { \
|
||||
btree_put_left(cur, &node->container_node_member); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
return; \
|
||||
} \
|
||||
\
|
||||
cur = next; \
|
||||
} \
|
||||
\
|
||||
btree_insert_fixup(tree, &node->container_node_member); \
|
||||
}
|
||||
|
||||
/* defines a simple tree search function.
|
||||
this function assumes that your nodes have simple integer keys that can be compared with the usual operators.
|
||||
|
||||
EXAMPLE:
|
||||
if you have a tree node type like this:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
You would use the following call to generate a search function for a tree with this node type:
|
||||
|
||||
BTREE_DEFINE_SIMPLE_GET(struct my_tree_node, int, base, key, my_tree_node_get);
|
||||
|
||||
Which would emit a function defined like:
|
||||
|
||||
static void my_tree_node_get(btree_t *tree, int key);
|
||||
|
||||
@param node_type your custom tree node type. usually a structure that contains a btree_node_t member.
|
||||
@param key_type the type name of the key embedded in your custom tree node type. this type must be
|
||||
compatible with the builtin comparison operators.
|
||||
@param container_node_member the name of the btree_node_t member variable within your custom type.
|
||||
@param container_key_member the name of the key member variable within your custom type.
|
||||
@param function_name the name of the function to generate.
|
||||
*/
|
||||
#define BTREE_DEFINE_SIMPLE_GET(node_type, key_type, container_node_member, container_key_member, function_name) \
|
||||
node_type *get(btree_t *tree, key_type key) \
|
||||
{ \
|
||||
btree_node_t *cur = tree->b_root; \
|
||||
while (cur) { \
|
||||
node_type *cur_node = BTREE_CONTAINER(node_type, container_node_member, cur); \
|
||||
if (key > cur_node->container_key_member) { \
|
||||
cur = btree_right(cur); \
|
||||
} else if (key < cur_node->container_key_member) { \
|
||||
cur = btree_left(cur); \
|
||||
} else { \
|
||||
return cur_node; \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
/* perform an in-order traversal of a binary tree
|
||||
|
||||
If you have a tree defined like:
|
||||
|
||||
btree_t my_tree;
|
||||
|
||||
with nodes defined like:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
|
||||
foreach (struct my_tree_node *node : my_tree) { ... }
|
||||
|
||||
you should use this:
|
||||
|
||||
btree_foreach (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the btree_node_t member variable within the tree node type.
|
||||
*/
|
||||
#define btree_foreach(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_first(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_next(&((iter_name)->node_member))))
|
||||
|
||||
/* perform an reverse in-order traversal of a binary tree
|
||||
|
||||
If you have a tree defined like:
|
||||
|
||||
btree_t my_tree;
|
||||
|
||||
with nodes defined like:
|
||||
|
||||
struct my_tree_node {
|
||||
int key;
|
||||
btree_node_t base;
|
||||
}
|
||||
|
||||
and you want to do something like:
|
||||
|
||||
foreach (struct my_tree_node *node : reverse(my_tree)) { ... }
|
||||
|
||||
you should use this:
|
||||
|
||||
btree_foreach_r (struct my_tree_node, node, &my_tree, base) { ... }
|
||||
|
||||
@param iter_type the type name of the iterator variable. this should be the tree's node type, and shouldn't be a pointer.
|
||||
@param iter_name the name of the iterator variable.
|
||||
@param tree_name a pointer to the tree to traverse.
|
||||
@param node_member the name of the btree_node_t member variable within the tree node type.
|
||||
*/
|
||||
#define btree_foreach_r(iter_type, iter_name, tree_name, node_member) \
|
||||
for (iter_type *iter_name = BTREE_CONTAINER(iter_type, node_member, btree_last(tree_name)); \
|
||||
iter_name; \
|
||||
iter_name = BTREE_CONTAINER(iter_type, node_member, btree_prev(&((iter_name)->node_member))))
|
||||
|
||||
/* binary tree nodes. this *cannot* be used directly. you need to define a custom node type
|
||||
that contains a member variable of type btree_node_t.
|
||||
|
||||
you would then use the supplied macros to define functions to manipulate your custom binary tree.
|
||||
*/
|
||||
typedef struct btree_node {
|
||||
struct btree_node *b_parent, *b_left, *b_right;
|
||||
unsigned short b_height;
|
||||
} btree_node_t;
|
||||
|
||||
/* binary tree. unlike btree_node_t, you can define variables of type btree_t. */
|
||||
typedef struct btree {
|
||||
struct btree_node *b_root;
|
||||
} btree_t;
|
||||
|
||||
/* re-balance a binary tree after an insertion operation.
|
||||
|
||||
NOTE that, if you define an insertion function using BTREE_DEFINE_INSERT or similar,
|
||||
this function will automatically called for you.
|
||||
|
||||
@param tree the tree to re-balance.
|
||||
@param node the node that was just inserted into the tree.
|
||||
*/
|
||||
extern void btree_insert_fixup(btree_t *tree, btree_node_t *node);
|
||||
|
||||
/* delete a node from a binary tree and re-balance the tree afterwards.
|
||||
|
||||
@param tree the tree to delete from
|
||||
@param node the node to delete.
|
||||
*/
|
||||
extern void btree_delete(btree_t *tree, btree_node_t *node);
|
||||
|
||||
/* get the first node in a binary tree.
|
||||
|
||||
this will be the node with the smallest key (i.e. the node that is furthest-left from the root)
|
||||
*/
|
||||
extern btree_node_t *btree_first(btree_t *tree);
|
||||
|
||||
/* get the last node in a binary tree.
|
||||
|
||||
this will be the node with the largest key (i.e. the node that is furthest-right from the root)
|
||||
*/
|
||||
extern btree_node_t *btree_last(btree_t *tree);
|
||||
/* for any binary tree node, this function returns the node with the next-largest key value */
|
||||
extern btree_node_t *btree_next(btree_node_t *node);
|
||||
/* for any binary tree node, this function returns the node with the next-smallest key value */
|
||||
extern btree_node_t *btree_prev(btree_node_t *node);
|
||||
|
||||
/* sets `child` as the immediate left-child of `parent` */
|
||||
static inline void btree_put_left(btree_node_t *parent, btree_node_t *child)
|
||||
{
|
||||
parent->b_left = child;
|
||||
child->b_parent = parent;
|
||||
}
|
||||
|
||||
/* sets `child` as the immediate right-child of `parent` */
|
||||
static inline void btree_put_right(btree_node_t *parent, btree_node_t *child)
|
||||
{
|
||||
parent->b_right = child;
|
||||
child->b_parent = parent;
|
||||
}
|
||||
|
||||
/* get the immediate left-child of `node` */
|
||||
static inline btree_node_t *btree_left(btree_node_t *node)
|
||||
{
|
||||
return node->b_left;
|
||||
}
|
||||
|
||||
/* get the immediate right-child of `node` */
|
||||
static inline btree_node_t *btree_right(btree_node_t *node)
|
||||
{
|
||||
return node->b_right;
|
||||
}
|
||||
|
||||
/* get the immediate parent of `node` */
|
||||
static inline btree_node_t *btree_parent(btree_node_t *node)
|
||||
{
|
||||
return node->b_parent;
|
||||
}
|
||||
|
||||
/* get the height of `node`.
|
||||
|
||||
the height of a node is defined as the length of the longest path
|
||||
between the node and a leaf node.
|
||||
|
||||
this count includes the node itself, so the height of a leaf node will be 1.
|
||||
*/
|
||||
static inline unsigned short btree_height(btree_node_t *node)
|
||||
{
|
||||
return node->b_height;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,11 +0,0 @@
|
||||
#ifndef SOCKS_LOCKS_H_
|
||||
#define SOCKS_LOCKS_H_
|
||||
|
||||
typedef int __attribute__((aligned(8))) spin_lock_t;
|
||||
|
||||
#define SPIN_LOCK_INIT ((spin_lock_t)0)
|
||||
|
||||
extern void spin_lock_irqsave(spin_lock_t *lck, unsigned long *flags);
|
||||
extern void spin_unlock_irqrestore(spin_lock_t *lck, unsigned long flags);
|
||||
|
||||
#endif
|
||||
@@ -1,14 +0,0 @@
|
||||
#include <socks/locks.h>
|
||||
|
||||
void spin_lock_irqsave(spin_lock_t *lck, unsigned long *flags)
|
||||
{
|
||||
while (!__sync_bool_compare_and_swap(lck, 0, 1)) {
|
||||
/* pause */
|
||||
}
|
||||
}
|
||||
|
||||
void spin_unlock_irqrestore(spin_lock_t *lck, unsigned long flags)
|
||||
{
|
||||
__sync_lock_release(lck);
|
||||
}
|
||||
|
||||
@@ -1,302 +0,0 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
#ifndef SOCKS_MEMBLOCK_H_
|
||||
#define SOCKS_MEMBLOCK_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
#include <socks/types.h>
|
||||
|
||||
#define MEMBLOCK_INIT_MEMORY_REGION_COUNT 128
|
||||
#define MEMBLOCK_INIT_RESERVED_REGION_COUNT 128
|
||||
|
||||
#define __for_each_mem_range(i, type_a, type_b, p_start, p_end) \
|
||||
for ((i)->__idx = 0, __next_memory_region(i, type_a, type_b, p_start, p_end); \
|
||||
(i)->__idx != ULLONG_MAX; \
|
||||
__next_memory_region(i, type_a, type_b, p_start, p_end))
|
||||
|
||||
/* iterate through all memory regions known to memblock.
|
||||
|
||||
this consists of all regions that have been registered
|
||||
with memblock using memblock_add().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: to iterate through all memory regions (with no bounds):
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_mem_region (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
|
||||
EXAMPLE: to iterate through all memory regions between physical
|
||||
addresses 0x40000 and 0x80000:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_mem_region (&it, 0x40000, 0x80000) { ... }
|
||||
*/
|
||||
#define for_each_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.memory, NULL, p_start, p_end)
|
||||
|
||||
/* iterate through all memory regions reserved using memblock.
|
||||
|
||||
this consists of all regions that have been registered
|
||||
with memblock using memblock_reserve().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: to iterate through all reserved memory regions (with no bounds):
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_reserved_mem_region (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
|
||||
EXAMPLE: to iterate through all reserved memory regions between physical
|
||||
addresses 0x40000 and 0x80000:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_reserved_mem_region (&it, 0x40000, 0x80000) { ... }
|
||||
*/
|
||||
#define for_each_reserved_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.reserved, NULL, p_start, p_end)
|
||||
|
||||
/* iterate through all memory regions known by memblock to be free.
|
||||
|
||||
this consists of all regions BETWEEN those regions that have been
|
||||
registered using memblock_reserve(), bounded within the memory
|
||||
regions added using memblock_add().
|
||||
|
||||
this iteration can be optionally constrained to a given region.
|
||||
|
||||
@param i the iterator. this should be a pointer of type memblock_iter_t.
|
||||
for each iteration, this structure will be filled with details about
|
||||
the current memory region.
|
||||
@param p_start the lower bound of the memory region to iterate through.
|
||||
if you don't want to use a lower bound, pass 0.
|
||||
@param p_end the upper bound of the memory region to iterate through.
|
||||
if you don't want to use an upper bound, pass UINTPTR_MAX.
|
||||
|
||||
EXAMPLE: if you have added the following memory regions to
|
||||
memblock using memblock_add():
|
||||
|
||||
- 0x00000 -> 0x05fff
|
||||
- 0x08000 -> 0x1ffff
|
||||
|
||||
...and you have reserved the following memory regions using
|
||||
memblock_reserve():
|
||||
|
||||
- 0x01000 -> 0x04fff
|
||||
- 0x09000 -> 0x0ffff
|
||||
|
||||
the following call:
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_free_mem_range (&it, 0x0, UINTPTR_MAX) { ... }
|
||||
|
||||
would iterate through the following sequence of free memory ranges:
|
||||
|
||||
- 0x00000 -> 0x00fff
|
||||
- 0x05000 -> 0x05fff
|
||||
- 0x08000 -> 0x08fff
|
||||
- 0x10000 -> 0x1ffff
|
||||
*/
|
||||
#define for_each_free_mem_range(i, p_start, p_end) \
|
||||
__for_each_mem_range(i, &memblock.memory, &memblock.reserved, p_start, p_end)
|
||||
|
||||
typedef uint64_t memblock_index_t;
|
||||
|
||||
typedef enum memblock_region_status {
|
||||
/* Used in memblock.memory regions, indicates that the memory region exists */
|
||||
MEMBLOCK_MEMORY = 0,
|
||||
/* Used in memblock.reserved regions, indicates that the memory region was reserved
|
||||
* by a call to memblock_alloc() */
|
||||
MEMBLOCK_ALLOC,
|
||||
/* Used in memblock.reserved regions, indicates that the memory region was reserved
|
||||
* by a call to memblock_reserve() */
|
||||
MEMBLOCK_RESERVED,
|
||||
} memblock_region_status_t;
|
||||
|
||||
typedef struct memblock_region {
|
||||
/* the status of the memory region (free, reserved, allocated, etc) */
|
||||
memblock_region_status_t status;
|
||||
/* the address of the first byte that makes up the region */
|
||||
phys_addr_t base;
|
||||
/* the address of the last byte that makes up the region */
|
||||
phys_addr_t limit;
|
||||
} memblock_region_t;
|
||||
|
||||
/* buffer of memblock regions, all of which are the same type
|
||||
(memory, reserved, etc) */
|
||||
typedef struct memblock_type {
|
||||
struct memblock_region *regions;
|
||||
unsigned int count;
|
||||
unsigned int max;
|
||||
const char *name;
|
||||
} memblock_type_t;
|
||||
|
||||
typedef struct memblock {
|
||||
/* bounds of the memory region that can be used by memblock_alloc()
|
||||
both of these are virtual addresses */
|
||||
uintptr_t m_alloc_start, m_alloc_end;
|
||||
/* memblock assumes that all memory in the alloc zone is contiguously mapped
|
||||
(if paging is enabled). m_voffset is the offset that needs to be added to
|
||||
a given physical address to get the corresponding virtual address */
|
||||
uintptr_t m_voffset;
|
||||
|
||||
struct memblock_type memory;
|
||||
struct memblock_type reserved;
|
||||
} memblock_t;
|
||||
|
||||
typedef struct memblock_iter {
|
||||
memblock_index_t __idx;
|
||||
phys_addr_t it_base;
|
||||
phys_addr_t it_limit;
|
||||
memblock_region_status_t it_status;
|
||||
} memblock_iter_t;
|
||||
|
||||
/* global memblock state. */
|
||||
extern memblock_t memblock;
|
||||
|
||||
extern int __next_mem_range(memblock_iter_t *it);
|
||||
|
||||
/* initialise the global memblock state.
|
||||
this function must be called before any other memblock functions can be used.
|
||||
|
||||
this function sets the bounds of the heap area. memory allocation requests
|
||||
using memblock_alloc() will be constrained to this zone.
|
||||
|
||||
memblock assumes that all physical memory in the system is mapped to
|
||||
an area in virtual memory, such that converting a physical address to
|
||||
a valid virtual address can be done by simply applying an offset.
|
||||
|
||||
@param alloc_start the virtual address of the start of the heap area.
|
||||
@param alloc_end the virtual address of the end of the heap area.
|
||||
@param voffset the offset between the physical address of a given page and
|
||||
its corresponding virtual address.
|
||||
*/
|
||||
extern int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset);
|
||||
|
||||
/* add a region of memory to memblock.
|
||||
|
||||
this function is used to define regions of memory that are accessible, but
|
||||
says nothing about the STATE of the given memory.
|
||||
|
||||
all memory is free by default. once a region of memory is added,
|
||||
memblock_reserve() can be used to mark the memory as reserved.
|
||||
|
||||
@param base the physical address of the start of the memory region to add.
|
||||
@oaram size the size of the memory region to add in bytes.
|
||||
*/
|
||||
extern int memblock_add(phys_addr_t base, size_t size);
|
||||
/* mark a region of memory as reserved.
|
||||
|
||||
this function can only operate on regions of memory that have been previously
|
||||
registered with memblock using memblock_add().
|
||||
|
||||
reserved memory will not be used by memblock_alloc(), and will remain
|
||||
reserved when the vm_page memory map is initialised.
|
||||
|
||||
@param base the physical address of the start of the memory region to reserve.
|
||||
@oaram size the size of the memory region to reserve in bytes.
|
||||
*/
|
||||
extern int memblock_reserve(phys_addr_t base, size_t size);
|
||||
|
||||
/* allocate a block of memory, returning a virtual address.
|
||||
|
||||
this function selects the first available region of memory that satisfies
|
||||
the requested allocation size, marks `size` bytes of this region as reserved,
|
||||
and returns the virtual address of the region.
|
||||
|
||||
when looking for a suitable region of memory, this function searches the
|
||||
intersection of the following memory zones:
|
||||
- the regions of memory added with memblock_alloc().
|
||||
- the region of memory specified as the heap bounds during the call
|
||||
to memblock_init().
|
||||
and excludes the following regions:
|
||||
- the regions of memory marked as reserved by memblock_reserve() and
|
||||
previous calls to memblock_alloc()
|
||||
|
||||
@param size the size of the buffer to allocate in bytes.
|
||||
*/
|
||||
extern void *memblock_alloc(size_t size);
|
||||
|
||||
/* allocate a block of memory, returning a physical address.
|
||||
|
||||
this function selects the first available region of memory that satisfies
|
||||
the requested allocation size, marks `size` bytes of this region as reserved,
|
||||
and returns the virtual address of the region.
|
||||
|
||||
when looking for a suitable region of memory, this function searches the
|
||||
intersection of the following memory zones:
|
||||
- the regions of memory added with memblock_alloc().
|
||||
- the region of memory specified as the heap bounds during the call
|
||||
to memblock_init().
|
||||
and excludes the following regions:
|
||||
- the regions of memory marked as reserved by memblock_reserve() and
|
||||
previous calls to memblock_alloc()
|
||||
|
||||
@param size the size of the buffer to allocate in bytes.
|
||||
*/
|
||||
extern phys_addr_t memblock_alloc_phys(size_t size);
|
||||
|
||||
/* free a block of memory using its virtual address.
|
||||
|
||||
due to limitations in memblock (as it is meant to be a simple,
|
||||
early-boot allocator), you must specify the size of the memory
|
||||
region you intend to free.
|
||||
|
||||
@param addr the virtual address of the region to free.
|
||||
@param size the size of the region to free in bytes.
|
||||
*/
|
||||
extern int memblock_free(void *addr, size_t size);
|
||||
|
||||
/* free a block of memory using its physical address.
|
||||
|
||||
due to limitations in memblock (as it is meant to be a simple,
|
||||
early-boot allocator), you must specify the size of the memory
|
||||
region you intend to free.
|
||||
|
||||
@param addr the physical address of the region to free.
|
||||
@param size the size of the region to free in bytes.
|
||||
*/
|
||||
extern int memblock_free_phys(phys_addr_t addr, size_t size);
|
||||
|
||||
extern void __next_memory_region(memblock_iter_t *it, \
|
||||
memblock_type_t *type_a, memblock_type_t *type_b,
|
||||
phys_addr_t start, phys_addr_t end);
|
||||
|
||||
#endif
|
||||
@@ -1,399 +0,0 @@
|
||||
/*
|
||||
The Clear BSD License
|
||||
|
||||
Copyright (c) 2023 Max Wash
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted (subject to the limitations in the disclaimer
|
||||
below) provided that the following conditions are met:
|
||||
|
||||
- Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
- Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
- Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
*/
|
||||
#include "socks/types.h"
|
||||
#include <stdio.h>
|
||||
#include <stdbool.h>
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <socks/memblock.h>
|
||||
|
||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
#define ITER(a, b) ((uint64_t)(a) | ((uint64_t)(b) << 32))
|
||||
#define ITER_END ULLONG_MAX
|
||||
#define IDX_A(idx) ((idx) & 0xFFFFFFFF)
|
||||
#define IDX_B(idx) (((idx) >> 32) & 0xFFFFFFFF)
|
||||
|
||||
/* the maximum possible value for a pointer type.
|
||||
Note that any pointers returned by the memblock API will still
|
||||
be bounded by the defined memory regions, and not by this constant. */
|
||||
#define ADDR_MAX (~(uintptr_t)0)
|
||||
|
||||
static memblock_region_t init_memory_regions[MEMBLOCK_INIT_MEMORY_REGION_COUNT];
|
||||
static memblock_region_t init_reserved_regions[MEMBLOCK_INIT_RESERVED_REGION_COUNT];
|
||||
|
||||
static phys_addr_t do_alloc(size_t size);
|
||||
|
||||
memblock_t memblock = {
|
||||
.memory.regions = init_memory_regions,
|
||||
.memory.count = 0,
|
||||
.memory.max = MEMBLOCK_INIT_MEMORY_REGION_COUNT,
|
||||
.memory.name = "memory",
|
||||
|
||||
.reserved.regions = init_reserved_regions,
|
||||
.reserved.count = 0,
|
||||
.reserved.max = MEMBLOCK_INIT_RESERVED_REGION_COUNT,
|
||||
.reserved.name = "reserved",
|
||||
};
|
||||
|
||||
static void memblock_double_capacity(memblock_type_t *type)
|
||||
{
|
||||
size_t new_max = type->max * 2;
|
||||
|
||||
phys_addr_t new_regions_p = do_alloc(new_max * sizeof(memblock_region_t));
|
||||
|
||||
void *new_regions = (void *)(new_regions_p + memblock.m_voffset);
|
||||
memcpy(new_regions, type->regions, type->count * sizeof(memblock_region_t));
|
||||
|
||||
type->regions = new_regions;
|
||||
type->max = new_max;
|
||||
}
|
||||
|
||||
static int memblock_insert_region(memblock_type_t *type, memblock_region_t *to_add)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
|
||||
for (i = 0; i < type->count; i++) {
|
||||
const memblock_region_t *cur = &type->regions[i];
|
||||
|
||||
if (cur->base >= to_add->limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
memblock_region_t *src = &type->regions[i];
|
||||
memblock_region_t *dst = &type->regions[i + 1];
|
||||
unsigned int count = type->count - i;
|
||||
|
||||
memmove(dst, src, count * sizeof *src);
|
||||
|
||||
*src = *to_add;
|
||||
type->count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int memblock_remove_region(memblock_type_t *type, unsigned int i)
|
||||
{
|
||||
if (i >= type->count) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
memblock_region_t *src = &type->regions[i + 1];
|
||||
memblock_region_t *dst = &type->regions[i];
|
||||
unsigned int count = type->count - i;
|
||||
|
||||
memmove(dst, src, count * sizeof *src);
|
||||
type->count--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memblock_init(uintptr_t alloc_start, uintptr_t alloc_end, uintptr_t voffset)
|
||||
{
|
||||
memblock.m_alloc_start = alloc_start;
|
||||
memblock.m_alloc_end =alloc_end;
|
||||
memblock.m_voffset = voffset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memblock_add_range(memblock_type_t *type, uintptr_t base, size_t size, memblock_region_status_t status)
|
||||
{
|
||||
if (size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uintptr_t limit = base + size - 1;
|
||||
|
||||
if (type->count == 0) {
|
||||
type->regions[0].base = base;
|
||||
type->regions[0].limit = limit;
|
||||
type->count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
memblock_region_t new_region = { .base = base, .limit = limit, .status = status };
|
||||
|
||||
/* two regions with different statuses CANNOT intersect. we first need to check
|
||||
to make sure the region being added doesn't violate this rule. */
|
||||
for (unsigned int i = 0; i < type->count; i++) {
|
||||
memblock_region_t *cur_region = &type->regions[i];
|
||||
|
||||
if (new_region.base > cur_region->limit || new_region.limit < cur_region->base) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cur_region->status == new_region.status) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool add_new = true;
|
||||
|
||||
for (unsigned int i = 0; i < type->count; i++) {
|
||||
memblock_region_t *cur_region = &type->regions[i];
|
||||
|
||||
/* case 1: the region being added and the current region have no connection what-so-ever (no overlaps) */
|
||||
if (cur_region->limit + 1 < new_region.base || cur_region->base > new_region.limit) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* case 2: the region being added matches a region already in the list. */
|
||||
if (cur_region->base == new_region.base && cur_region->limit == new_region.limit) {
|
||||
/* nothing needs to be done */
|
||||
add_new = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
/* case 3: the region being added completely contains a region already in the list. */
|
||||
if (cur_region->base > new_region.base && cur_region->limit <= new_region.limit) {
|
||||
memblock_remove_region(type, i);
|
||||
|
||||
/* after memblock_remove_region(), a different region will have moved into the array slot referenced by i.
|
||||
decrementing i means we'll stay at the current index and process this region. */
|
||||
i--;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
/* case 4: the region being added meets or partially overlaps a region already in the list. */
|
||||
|
||||
/* there can be an overlap at the beginning and the end of the region being added,
|
||||
anything else is either a full overlap (case 3) or not within the region being added at all.
|
||||
to handle this, remove the region that's already in the list and extend the region being added to cover it.
|
||||
the two regions may overlap and have incompatible statuses, but this case was handled earlier in this function. */
|
||||
if ((new_region.base > cur_region->base || new_region.base == cur_region->limit - 1) && new_region.status == cur_region->status) {
|
||||
/* the new region overlaps the END of the current region, change the base of the new region to match that of the current region. */
|
||||
new_region.base = cur_region->base;
|
||||
} else if ((new_region.base < cur_region->base || new_region.limit + 1 == cur_region->base) && new_region.status == cur_region->status) {
|
||||
/* the new region overlaps the BEGINNING of the current region, change the limit of the new region to match that of the current region. */
|
||||
new_region.limit = cur_region->limit;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* with the new region updated to include the current region, we can remove the current region from the list */
|
||||
memblock_remove_region(type, i);
|
||||
i--;
|
||||
}
|
||||
|
||||
if (add_new) {
|
||||
memblock_insert_region(type, &new_region);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memblock_add(uintptr_t base, size_t size)
|
||||
{
|
||||
if (memblock.memory.count >= memblock.memory.max - 2) {
|
||||
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
||||
memblock_double_capacity(&memblock.reserved);
|
||||
}
|
||||
|
||||
memblock_double_capacity(&memblock.memory);
|
||||
}
|
||||
|
||||
return memblock_add_range(&memblock.memory, base, size, MEMBLOCK_MEMORY);
|
||||
}
|
||||
|
||||
int memblock_reserve(uintptr_t base, size_t size)
|
||||
{
|
||||
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
||||
memblock_double_capacity(&memblock.reserved);
|
||||
}
|
||||
|
||||
return memblock_add_range(&memblock.reserved, base, size, MEMBLOCK_RESERVED);
|
||||
}
|
||||
|
||||
static phys_addr_t do_alloc(size_t size)
|
||||
{
|
||||
phys_addr_t allocated_base = ADDR_MAX;
|
||||
|
||||
phys_addr_t region_start = memblock.m_alloc_start - memblock.m_voffset;
|
||||
phys_addr_t region_end = memblock.m_alloc_end - memblock.m_voffset;
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_free_mem_range (&it, region_start, region_end) {
|
||||
if (it.it_base & 0xF) {
|
||||
it.it_base &= ~0xF;
|
||||
it.it_base += 0x10;
|
||||
}
|
||||
|
||||
size_t region_size = it.it_limit - it.it_base + 1;
|
||||
if (region_size >= size) {
|
||||
allocated_base = it.it_base;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (allocated_base == ADDR_MAX) {
|
||||
fprintf(stderr, "memblock: cannot allocate %zu byte buffer!\n", size);
|
||||
abort();
|
||||
}
|
||||
|
||||
int status = memblock_add_range(&memblock.reserved, allocated_base, size, MEMBLOCK_ALLOC);
|
||||
if (status != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return allocated_base;
|
||||
}
|
||||
|
||||
void *memblock_alloc(size_t size)
|
||||
{
|
||||
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
||||
memblock_double_capacity(&memblock.reserved);
|
||||
}
|
||||
|
||||
return (void *)(do_alloc(size) + memblock.m_voffset);
|
||||
}
|
||||
|
||||
phys_addr_t memblock_alloc_phys(size_t size)
|
||||
{
|
||||
if (memblock.reserved.count >= memblock.reserved.max - 2) {
|
||||
memblock_double_capacity(&memblock.reserved);
|
||||
}
|
||||
|
||||
return do_alloc(size);
|
||||
}
|
||||
|
||||
int memblock_free(void *p, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memblock_free_phys(phys_addr_t addr, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __next_memory_region(memblock_iter_t *it, memblock_type_t *type_a, memblock_type_t *type_b, uintptr_t start, uintptr_t end)
|
||||
{
|
||||
unsigned int idx_a = IDX_A(it->__idx);
|
||||
unsigned int idx_b = IDX_B(it->__idx);
|
||||
|
||||
for (; idx_a < type_a->count; idx_a++) {
|
||||
memblock_region_t *m = &type_a->regions[idx_a];
|
||||
|
||||
uintptr_t m_start = m->base;
|
||||
uintptr_t m_end = m->limit;
|
||||
|
||||
if (!type_b) {
|
||||
it->it_base = m->base;
|
||||
it->it_limit = m->limit;
|
||||
it->it_status = m->status;
|
||||
|
||||
it->__idx = ITER(idx_a + 1, idx_b);
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_end < start) {
|
||||
/* we haven't reached the requested memory range yet */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (m_start > end) {
|
||||
/* we have gone past the requested memory range and can now stop */
|
||||
break;
|
||||
}
|
||||
|
||||
for (; idx_b < type_b->count + 1; idx_b++) {
|
||||
memblock_region_t *r = &type_b->regions[idx_b];
|
||||
|
||||
/* r_start and r_end delimit the region of memory between the current and previous reserved regions.
|
||||
if we have gone past the last reserved region, these variables delimit the range between the end
|
||||
of the last reserved region and the end of memory. */
|
||||
uintptr_t r_start = idx_b > 0 ? r[-1].limit + 1 : 0;
|
||||
uintptr_t r_end;
|
||||
|
||||
if (idx_b < type_b->count) {
|
||||
r_end = r->base;
|
||||
|
||||
/* we decrement r_end to get the address of the last byte of the free region.
|
||||
if r_end is already zero, there is a reserved region starting at address 0x0.
|
||||
as long as r_end == r_start == 0x00000, we will skip this region. */
|
||||
if (r_end) {
|
||||
r_end--;
|
||||
}
|
||||
} else {
|
||||
/* this maximum value will be clamped to the bounds of memblock.memory
|
||||
before being returned to the caller */
|
||||
r_end = ADDR_MAX;
|
||||
}
|
||||
|
||||
if (r_start >= r_end) {
|
||||
/* this free region has a length of zero, move to the next one */
|
||||
continue;
|
||||
}
|
||||
|
||||
if (r_start >= m_end) {
|
||||
/* we've gone past the end of the current memory region, and need to go to the next one */
|
||||
break;
|
||||
}
|
||||
|
||||
/* we've already gone past this free memory region. move to the next one */
|
||||
if (m_start >= r_end) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* we want the area that is overlapped by both
|
||||
region M (m_start - m_end) : The region defined as system memory.
|
||||
region R (r_start - r_end) : The region defined as free / outside of any reserved regions.
|
||||
*/
|
||||
it->it_base = MAX(m_start, r_start);
|
||||
it->it_limit = MIN(m_end, r_end);
|
||||
|
||||
/* further limit the region to the intersection between the region itself and the
|
||||
specified iteration bounds */
|
||||
it->it_base = MAX(it->it_base, start);
|
||||
it->it_limit = MIN(it->it_limit, end);
|
||||
|
||||
if (it->it_limit <= it->it_base) {
|
||||
/* this region is not part of the specified bounds, skip it. */
|
||||
continue;
|
||||
}
|
||||
|
||||
it->it_status = MEMBLOCK_MEMORY;
|
||||
|
||||
/* whichever region is smaller, increment the pointer for that type, so we can
|
||||
compare the larger region with the next region of the incremented type. */
|
||||
if (m_end <= r_end) {
|
||||
idx_a++;
|
||||
} else {
|
||||
idx_b++;
|
||||
}
|
||||
|
||||
/* store the position for the next iteration */
|
||||
it->__idx = ITER(idx_a, idx_b);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* ULLONG_MAX signals the end of the iteration */
|
||||
it->__idx = ITER_END;
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
#ifndef SOCKS_QUEUE_H_
|
||||
#define SOCKS_QUEUE_H_
|
||||
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define QUEUE_CONTAINER(t, m, v) ((void *)((v) ? (uintptr_t)(v) - (offsetof(t, m)) : 0))
|
||||
|
||||
#define QUEUE_INIT ((queue_t){ .q_first = NULL, .q_last = NULL })
|
||||
#define QUEUE_ENTRY_INIT ((queue_entry_t){ .qe_next = NULL, .qe_prev = NULL })
|
||||
|
||||
#define queue_foreach(iter_type, iter_name, queue_name, node_member) \
|
||||
for (iter_type *iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_first(queue_name)); \
|
||||
iter_name; \
|
||||
iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_next(&((iter_name)->node_member))))
|
||||
|
||||
#define queue_foreach_r(iter_type, iter_name, queue_name, node_member) \
|
||||
for (iter_type *iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_last(queue_name)); \
|
||||
iter_name; \
|
||||
iter_name = QUEUE_CONTAINER(iter_type, node_member, queue_prev(&((iter_name)->node_member))))
|
||||
|
||||
typedef struct queue_entry {
|
||||
struct queue_entry *qe_next;
|
||||
struct queue_entry *qe_prev;
|
||||
} queue_entry_t;
|
||||
|
||||
typedef struct queue {
|
||||
queue_entry_t *q_first;
|
||||
queue_entry_t *q_last;
|
||||
} queue_t;
|
||||
|
||||
static inline void queue_init(queue_t *q) { memset(q, 0x00, sizeof *q); }
|
||||
static inline bool queue_empty(queue_t *q) { return q->q_first == NULL; }
|
||||
|
||||
static inline queue_entry_t *queue_first(queue_t *q) { return q->q_first; }
|
||||
static inline queue_entry_t *queue_last(queue_t *q) { return q->q_last; }
|
||||
static inline queue_entry_t *queue_next(queue_entry_t *entry) { return entry->qe_next; }
|
||||
static inline queue_entry_t *queue_prev(queue_entry_t *entry) { return entry->qe_prev; }
|
||||
|
||||
extern size_t queue_length(queue_t *q);
|
||||
|
||||
extern void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before);
|
||||
extern void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after);
|
||||
|
||||
extern void queue_push_front(queue_t *q, queue_entry_t *entry);
|
||||
extern void queue_push_back(queue_t *q, queue_entry_t *entry);
|
||||
|
||||
extern queue_entry_t *queue_pop_front(queue_t *q);
|
||||
extern queue_entry_t *queue_pop_back(queue_t *q);
|
||||
|
||||
extern void queue_delete(queue_t *q, queue_entry_t *entry);
|
||||
extern void queue_delete_all(queue_t *q);
|
||||
|
||||
#endif
|
||||
@@ -1,134 +0,0 @@
|
||||
#include <socks/queue.h>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
size_t queue_length(queue_t *q)
|
||||
{
|
||||
size_t i = 0;
|
||||
queue_entry_t *x = q->q_first;
|
||||
while (x) {
|
||||
i++;
|
||||
x = x->qe_next;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
void queue_insert_before(queue_t *q, queue_entry_t *entry, queue_entry_t *before)
|
||||
{
|
||||
queue_entry_t *x = before->qe_prev;
|
||||
if (x) {
|
||||
x->qe_next = entry;
|
||||
} else {
|
||||
q->q_first = entry;
|
||||
}
|
||||
|
||||
entry->qe_prev = x;
|
||||
|
||||
before->qe_prev = entry;
|
||||
entry->qe_next = before;
|
||||
}
|
||||
|
||||
void queue_insert_after(queue_t *q, queue_entry_t *entry, queue_entry_t *after)
|
||||
{
|
||||
queue_entry_t *x = after->qe_next;
|
||||
if (x) {
|
||||
x->qe_prev = entry;
|
||||
} else {
|
||||
q->q_last = entry;
|
||||
}
|
||||
|
||||
entry->qe_prev = x;
|
||||
|
||||
after->qe_next = entry;
|
||||
entry->qe_prev = after;
|
||||
}
|
||||
|
||||
void queue_push_front(queue_t *q, queue_entry_t *entry)
|
||||
{
|
||||
if (q->q_first) {
|
||||
q->q_first->qe_prev = entry;
|
||||
}
|
||||
|
||||
entry->qe_next = q->q_first;
|
||||
entry->qe_prev = NULL;
|
||||
|
||||
q->q_first = entry;
|
||||
|
||||
if (!q->q_last) {
|
||||
q->q_last = entry;
|
||||
}
|
||||
}
|
||||
|
||||
void queue_push_back(queue_t *q, queue_entry_t *entry)
|
||||
{
|
||||
if (q->q_last) {
|
||||
q->q_last->qe_next = entry;
|
||||
}
|
||||
|
||||
entry->qe_prev = q->q_last;
|
||||
entry->qe_next = NULL;
|
||||
|
||||
q->q_last = entry;
|
||||
|
||||
if (!q->q_first) {
|
||||
q->q_first = entry;
|
||||
}
|
||||
}
|
||||
|
||||
queue_entry_t *queue_pop_front(queue_t *q)
|
||||
{
|
||||
queue_entry_t *x = q->q_first;
|
||||
if (x) {
|
||||
queue_delete(q, x);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
queue_entry_t *queue_pop_back(queue_t *q)
|
||||
{
|
||||
queue_entry_t *x = q->q_last;
|
||||
if (x) {
|
||||
queue_delete(q, x);
|
||||
}
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
void queue_delete(queue_t *q, queue_entry_t *entry)
|
||||
{
|
||||
if (!entry) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (entry == q->q_first) {
|
||||
q->q_first = q->q_first->qe_next;
|
||||
}
|
||||
|
||||
if (entry == q->q_last) {
|
||||
q->q_last = q->q_last->qe_prev;
|
||||
}
|
||||
|
||||
if (entry->qe_next) {
|
||||
entry->qe_next->qe_prev = entry->qe_prev;
|
||||
}
|
||||
|
||||
if (entry->qe_prev) {
|
||||
entry->qe_prev->qe_next = entry->qe_next;
|
||||
}
|
||||
|
||||
entry->qe_next = entry->qe_prev = NULL;
|
||||
}
|
||||
|
||||
void queue_delete_all(queue_t *q)
|
||||
{
|
||||
queue_entry_t *x = q->q_first;
|
||||
while (x) {
|
||||
queue_entry_t *next = x->qe_next;
|
||||
x->qe_next = x->qe_prev = NULL;
|
||||
x = next;
|
||||
}
|
||||
|
||||
q->q_first = q->q_last = NULL;
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
|
||||
struct magnitude {
|
||||
size_t threshold;
|
||||
const char *suffix;
|
||||
};
|
||||
|
||||
#define USE_BASE2
|
||||
|
||||
/* This list must be in decending order of threshold */
|
||||
#ifdef USE_BASE2
|
||||
static struct magnitude k_magnitudes[] = {
|
||||
{ 0x10000000000, "TiB" },
|
||||
{ 0x40000000, "GiB" },
|
||||
{ 0x100000, "MiB" },
|
||||
{ 0x400, "KiB" },
|
||||
{ 0x00, "B" },
|
||||
};
|
||||
#else
|
||||
static struct magnitude k_magnitudes[] = {
|
||||
{ 1000000000000, "TB" },
|
||||
{ 1000000000, "GB" },
|
||||
{ 1000000, "MB" },
|
||||
{ 1000, "KB" },
|
||||
{ 0, "B" },
|
||||
};
|
||||
#endif
|
||||
|
||||
static void convert(size_t *big, size_t *small, size_t val, size_t threshold)
|
||||
{
|
||||
if (!threshold) {
|
||||
*big = val;
|
||||
*small = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
*big = val / threshold;
|
||||
size_t rem = val % threshold;
|
||||
*small = (10 * rem) / threshold;
|
||||
}
|
||||
|
||||
void data_size_to_string(size_t value, char *out, size_t outsz)
|
||||
{
|
||||
if (!value) {
|
||||
snprintf(out, outsz, "0 B");
|
||||
return;
|
||||
}
|
||||
|
||||
size_t def_count = sizeof(k_magnitudes) / sizeof(struct magnitude);
|
||||
|
||||
for (size_t i = 0; i < def_count; i++) {
|
||||
if (value > k_magnitudes[i].threshold) {
|
||||
size_t big, small;
|
||||
convert(&big, &small, value, k_magnitudes[i].threshold);
|
||||
if (small) {
|
||||
snprintf(out, outsz, "%zu.%zu %s", big, small, k_magnitudes[i].suffix);
|
||||
} else {
|
||||
snprintf(out, outsz, "%zu %s", big, k_magnitudes[i].suffix);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
*out = 0;
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifndef SOCKS_UTIL_H_
|
||||
#define SOCKS_UTIL_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
extern void data_size_to_string(size_t value, char *out, size_t outsz);
|
||||
|
||||
#endif
|
||||
@@ -1,36 +0,0 @@
|
||||
#include <socks/status.h>
|
||||
#include <limits.h>
|
||||
#include <socks/vm.h>
|
||||
#include <socks/memblock.h>
|
||||
#include <stddef.h>
|
||||
#include <limits.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
/* One vm_pg_data_t per NUMA node. */
|
||||
static vm_pg_data_t *node_data = NULL;
|
||||
|
||||
kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones)
|
||||
{
|
||||
int numa_count = 1;
|
||||
|
||||
/* we're only worrying about UMA systems for now */
|
||||
node_data = memblock_alloc(sizeof(vm_pg_data_t) * numa_count);
|
||||
|
||||
vm_page_init_array();
|
||||
|
||||
for (size_t i = 0; i < nr_zones; i++) {
|
||||
vm_zone_init(&node_data->pg_zones[zones[i].zd_id], &zones[i]);
|
||||
}
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
vm_pg_data_t *vm_pg_data_get(vm_node_id_t node)
|
||||
{
|
||||
if (node == 0) {
|
||||
return node_data;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -1,217 +0,0 @@
|
||||
#include <socks/queue.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include <socks/vm.h>
|
||||
|
||||
#define FREELIST_END ((unsigned int)-1)
|
||||
|
||||
static vm_cache_t cache_cache = { .c_name = "vm_cache", .c_obj_size = sizeof(vm_cache_t) };
|
||||
|
||||
vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags)
|
||||
{
|
||||
if (!VM_CACHE_INITIALISED(&cache_cache)) {
|
||||
vm_cache_init(&cache_cache);
|
||||
}
|
||||
|
||||
vm_cache_t *new_cache = vm_cache_alloc(&cache_cache, 0);
|
||||
|
||||
new_cache->c_name = name;
|
||||
new_cache->c_obj_size = objsz;
|
||||
new_cache->c_flags = flags;
|
||||
|
||||
vm_cache_init(new_cache);
|
||||
|
||||
return new_cache;
|
||||
}
|
||||
|
||||
void vm_cache_init(vm_cache_t *cache)
|
||||
{
|
||||
cache->c_page_order = VM_PAGE_16K;
|
||||
if (cache->c_obj_size >= 512) {
|
||||
cache->c_flags |= VM_CACHE_OFFSLAB;
|
||||
}
|
||||
|
||||
size_t available = vm_page_order_to_bytes(cache->c_page_order);
|
||||
size_t space_per_item = cache->c_obj_size;
|
||||
|
||||
/* align to 16-byte boundary */
|
||||
if (space_per_item & 0xF) {
|
||||
space_per_item &= ~0xF;
|
||||
space_per_item += 0x10;
|
||||
}
|
||||
|
||||
cache->c_stride = space_per_item;
|
||||
|
||||
if (!(cache->c_flags & VM_CACHE_OFFSLAB)) {
|
||||
available -= sizeof(vm_slab_t);
|
||||
}
|
||||
|
||||
/* one entry in the freelist per object slot */
|
||||
space_per_item += sizeof(unsigned int);
|
||||
|
||||
cache->c_obj_count = available / space_per_item;
|
||||
cache->c_slabs_full = QUEUE_INIT;
|
||||
cache->c_slabs_partial = QUEUE_INIT;
|
||||
cache->c_slabs_empty = QUEUE_INIT;
|
||||
|
||||
cache->c_hdr_size = sizeof(vm_slab_t) + (sizeof(unsigned int) * cache->c_obj_count);
|
||||
}
|
||||
|
||||
void vm_cache_destroy(vm_cache_t *cache)
|
||||
{
|
||||
/* TODO */
|
||||
}
|
||||
|
||||
static vm_slab_t *alloc_slab(vm_cache_t *cache, vm_flags_t flags)
|
||||
{
|
||||
vm_page_t *slab_page = vm_page_alloc(cache->c_page_order, flags);
|
||||
vm_slab_t *slab_hdr = NULL;
|
||||
void *slab_data = vm_page_get_vaddr(slab_page);
|
||||
|
||||
if (cache->c_flags & VM_CACHE_OFFSLAB) {
|
||||
/* NOTE the circular dependency here:
|
||||
|
||||
kmalloc -> vm_cache_alloc -> alloc_slab -> kmalloc
|
||||
|
||||
since this call path is only used for caches with
|
||||
VM_CACHE_OFFSLAB set, we avoid the circular dependency
|
||||
by ensuring the small size-N (where N < 512) caches
|
||||
(which don't use that flag) are initialised before
|
||||
attempting to allocate from an offslab cache. */
|
||||
slab_hdr = kmalloc(cache->c_hdr_size, flags);
|
||||
slab_hdr->s_objects = slab_data;
|
||||
} else {
|
||||
slab_hdr = slab_data;
|
||||
slab_hdr->s_objects = (void *)((char *)slab_data + cache->c_hdr_size);
|
||||
}
|
||||
|
||||
slab_hdr->s_cache = cache;
|
||||
slab_hdr->s_list = QUEUE_ENTRY_INIT;
|
||||
slab_hdr->s_obj_allocated = 0;
|
||||
slab_hdr->s_free = 0;
|
||||
|
||||
for (unsigned int i = 0; i < cache->c_obj_count; i++) {
|
||||
slab_hdr->s_freelist[i] = i + 1;
|
||||
}
|
||||
|
||||
slab_hdr->s_freelist[cache->c_obj_count - 1] = FREELIST_END;
|
||||
|
||||
vm_page_foreach (slab_page, i) {
|
||||
i->p_slab = slab_hdr;
|
||||
}
|
||||
|
||||
return slab_hdr;
|
||||
}
|
||||
|
||||
static void destroy_slab(vm_slab_t *slab)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static unsigned int slab_allocate_slot(vm_slab_t *slab)
|
||||
{
|
||||
if (slab->s_free == FREELIST_END) {
|
||||
return FREELIST_END;
|
||||
}
|
||||
|
||||
unsigned int slot = slab->s_free;
|
||||
slab->s_free = slab->s_freelist[slab->s_free];
|
||||
slab->s_obj_allocated++;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
static void slab_free_slot(vm_slab_t *slab, unsigned int slot)
|
||||
{
|
||||
unsigned int next = slab->s_free;
|
||||
slab->s_free = slot;
|
||||
slab->s_freelist[slot] = next;
|
||||
slab->s_obj_allocated--;
|
||||
}
|
||||
|
||||
static void *slot_to_pointer(vm_slab_t *slab, unsigned int slot)
|
||||
{
|
||||
return (void *)((char *)slab->s_objects + (slot * slab->s_cache->c_stride));
|
||||
}
|
||||
|
||||
static unsigned int pointer_to_slot(vm_slab_t *slab, void *p)
|
||||
{
|
||||
size_t offset = (uintptr_t)p - (uintptr_t)slab->s_objects;
|
||||
return offset / slab->s_cache->c_stride;
|
||||
}
|
||||
|
||||
void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
||||
|
||||
vm_slab_t *slab = NULL;
|
||||
if (!queue_empty(&cache->c_slabs_partial)) {
|
||||
/* prefer using up partially-full slabs before taking a fresh one */
|
||||
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_partial);
|
||||
assert(slab_entry);
|
||||
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
|
||||
} else if (!queue_empty(&cache->c_slabs_empty)) {
|
||||
queue_entry_t *slab_entry = queue_pop_front(&cache->c_slabs_empty);
|
||||
assert(slab_entry);
|
||||
slab = QUEUE_CONTAINER(vm_slab_t, s_list, slab_entry);
|
||||
} else {
|
||||
/* we've run out of slabs. create a new one */
|
||||
slab = alloc_slab(cache, flags);
|
||||
}
|
||||
|
||||
if (!slab) {
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
unsigned int slot = slab_allocate_slot(slab);
|
||||
void *p = slot_to_pointer(slab, slot);
|
||||
|
||||
if (slab->s_free == FREELIST_END) {
|
||||
queue_push_back(&cache->c_slabs_full, &slab->s_list);
|
||||
} else {
|
||||
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
return p;
|
||||
}
|
||||
|
||||
void vm_cache_free(vm_cache_t *cache, void *p)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&cache->c_lock, &irq_flags);
|
||||
|
||||
phys_addr_t phys = vm_virt_to_phys(p);
|
||||
vm_page_t *pg = vm_page_get(phys);
|
||||
|
||||
if (!pg || !pg->p_slab) {
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
vm_slab_t *slab = pg->p_slab;
|
||||
|
||||
if (slab->s_cache != cache) {
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (slab->s_free == FREELIST_END) {
|
||||
queue_delete(&cache->c_slabs_full, &slab->s_list);
|
||||
} else {
|
||||
queue_delete(&cache->c_slabs_partial, &slab->s_list);
|
||||
}
|
||||
|
||||
unsigned int slot = pointer_to_slot(slab, p);
|
||||
slab_free_slot(slab, slot);
|
||||
|
||||
if (slab->s_obj_allocated == 0) {
|
||||
queue_push_back(&cache->c_slabs_empty, &slab->s_list);
|
||||
} else {
|
||||
queue_push_back(&cache->c_slabs_partial, &slab->s_list);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cache->c_lock, irq_flags);
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
#ifndef SOCKS_VM_H_
|
||||
#define SOCKS_VM_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <socks/types.h>
|
||||
#include <socks/status.h>
|
||||
#include <socks/queue.h>
|
||||
#include <socks/locks.h>
|
||||
|
||||
/* maximum number of NUMA nodes */
|
||||
#define VM_MAX_NODES 64
|
||||
/* maximum number of memory zones per node */
|
||||
#define VM_MAX_ZONES (VM_ZONE_MAX + 1)
|
||||
/* maximum number of supported page orders */
|
||||
#define VM_MAX_PAGE_ORDERS (VM_PAGE_MAX_ORDER + 1)
|
||||
|
||||
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
|
||||
#define VM_PAGE_SIZE 0x1000
|
||||
#define VM_PAGE_SHIFT 12
|
||||
|
||||
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
|
||||
#define VM_PAGE_IS_FREE(pg) (((pg)->p_flags & (VM_PAGE_RESERVED | VM_PAGE_ALLOC)) == 0)
|
||||
|
||||
#define vm_page_foreach(pg, i) \
|
||||
for (vm_page_t *i = (pg); i; i = vm_page_get_next_tail(i))
|
||||
|
||||
typedef phys_addr_t vm_alignment_t;
|
||||
typedef unsigned int vm_node_id_t;
|
||||
|
||||
typedef struct vm_object {
|
||||
unsigned int reserved;
|
||||
} vm_object_t;
|
||||
|
||||
typedef enum vm_flags {
|
||||
VM_GET_DMA = 0x01u,
|
||||
} vm_flags_t;
|
||||
|
||||
typedef enum vm_zone_id {
|
||||
/* NOTE that these are used as indices into the node_zones array in vm/zone.c
|
||||
they need to be continuous, and must start at 0! */
|
||||
VM_ZONE_DMA = 0u,
|
||||
VM_ZONE_NORMAL = 1u,
|
||||
VM_ZONE_HIGHMEM = 2u,
|
||||
VM_ZONE_MIN = VM_ZONE_DMA,
|
||||
VM_ZONE_MAX = VM_ZONE_HIGHMEM,
|
||||
} vm_zone_id_t;
|
||||
|
||||
typedef enum vm_page_order {
|
||||
VM_PAGE_4K = 0u,
|
||||
VM_PAGE_8K,
|
||||
VM_PAGE_16K,
|
||||
VM_PAGE_32K,
|
||||
VM_PAGE_64K,
|
||||
VM_PAGE_128K,
|
||||
VM_PAGE_256K,
|
||||
VM_PAGE_512K,
|
||||
VM_PAGE_1M,
|
||||
VM_PAGE_2M,
|
||||
VM_PAGE_4M,
|
||||
VM_PAGE_8M,
|
||||
VM_PAGE_16M,
|
||||
VM_PAGE_32M,
|
||||
VM_PAGE_64M,
|
||||
VM_PAGE_128M,
|
||||
#if 0
|
||||
/* vm_page_t only has 4 bits to store the page order with.
|
||||
the maximum order that can be stored in 4 bits is 15 (VM_PAGE_128M)
|
||||
to use any of the page orders listed here, this field
|
||||
will have to be expanded. */
|
||||
VM_PAGE_256M,
|
||||
VM_PAGE_512M,
|
||||
VM_PAGE_1G,
|
||||
#endif
|
||||
VM_PAGE_MIN_ORDER = VM_PAGE_4K,
|
||||
VM_PAGE_MAX_ORDER = VM_PAGE_8M,
|
||||
} vm_page_order_t;
|
||||
|
||||
typedef enum vm_page_flags {
|
||||
/* page is reserved (probably by a call to memblock_reserve()) and cannot be
|
||||
returned by any allocation function */
|
||||
VM_PAGE_RESERVED = 0x01u,
|
||||
/* page has been allocated by a zone's buddy allocator, and is in-use */
|
||||
VM_PAGE_ALLOC = 0x02u,
|
||||
/* page is the first page of a huge-page */
|
||||
VM_PAGE_HEAD = 0x04u,
|
||||
/* page is part of a huge-page */
|
||||
VM_PAGE_HUGE = 0x08u,
|
||||
} vm_page_flags_t;
|
||||
|
||||
typedef enum vm_memory_region_status {
|
||||
VM_REGION_FREE = 0x01u,
|
||||
VM_REGION_RESERVED = 0x02u,
|
||||
} vm_memory_region_status_t;
|
||||
|
||||
typedef enum vm_cache_flags {
|
||||
VM_CACHE_OFFSLAB = 0x01u,
|
||||
VM_CACHE_DMA = 0x02u
|
||||
} vm_cache_flags_t;
|
||||
|
||||
typedef struct vm_zone_descriptor {
|
||||
vm_zone_id_t zd_id;
|
||||
vm_node_id_t zd_node;
|
||||
const char zd_name[32];
|
||||
phys_addr_t zd_base;
|
||||
phys_addr_t zd_limit;
|
||||
} vm_zone_descriptor_t;
|
||||
|
||||
typedef struct vm_zone {
|
||||
vm_zone_descriptor_t z_info;
|
||||
spin_lock_t z_lock;
|
||||
|
||||
queue_t z_free_pages[VM_MAX_PAGE_ORDERS];
|
||||
unsigned long z_size;
|
||||
} vm_zone_t;
|
||||
|
||||
typedef struct vm_pg_data {
|
||||
vm_zone_t pg_zones[VM_MAX_ZONES];
|
||||
} vm_pg_data_t;
|
||||
|
||||
typedef struct vm_region {
|
||||
vm_memory_region_status_t r_status;
|
||||
phys_addr_t r_base;
|
||||
phys_addr_t r_limit;
|
||||
} vm_region_t;
|
||||
|
||||
typedef struct vm_cache {
|
||||
const char *c_name;
|
||||
vm_cache_flags_t c_flags;
|
||||
queue_entry_t c_list;
|
||||
|
||||
queue_t c_slabs_full;
|
||||
queue_t c_slabs_partial;
|
||||
queue_t c_slabs_empty;
|
||||
|
||||
spin_lock_t c_lock;
|
||||
|
||||
/* number of objects that can be stored in a single slab */
|
||||
unsigned int c_obj_count;
|
||||
/* the size of object kept in the cache */
|
||||
unsigned int c_obj_size;
|
||||
/* combined size of vm_slab_t and the freelist */
|
||||
unsigned int c_hdr_size;
|
||||
/* offset from one object to the next in a slab.
|
||||
this may be different from c_obj_size as
|
||||
we enforce a 16-byte alignment on allocated objects */
|
||||
unsigned int c_stride;
|
||||
/* size of page used for slabs */
|
||||
unsigned int c_page_order;
|
||||
} vm_cache_t;
|
||||
|
||||
typedef struct vm_slab {
|
||||
vm_cache_t *s_cache;
|
||||
/* queue entry for vm_cache_t.c_slabs_* */
|
||||
queue_entry_t s_list;
|
||||
/* pointer to the first object slot. */
|
||||
void *s_objects;
|
||||
/* the number of objects allocated on the slab. */
|
||||
unsigned int s_obj_allocated;
|
||||
/* the index of the next free object.
|
||||
if s_free is equal to FREELIST_END (defined in vm/cache.c)
|
||||
there are no free slots left in the slab. */
|
||||
unsigned int s_free;
|
||||
/* list of free object slots.
|
||||
when allocating:
|
||||
- s_free should be set to the value of s_freelist[s_free]
|
||||
when freeing:
|
||||
- s_free should be set to the index of the object being freed.
|
||||
- s_freelist[s_free] should be set to the previous value of s_free.
|
||||
*/
|
||||
unsigned int s_freelist[];
|
||||
} vm_slab_t;
|
||||
|
||||
typedef struct vm_page {
|
||||
/* order of the page block that this page belongs too */
|
||||
uint16_t p_order : 4;
|
||||
/* the id of the NUMA node that this page belongs to */
|
||||
uint16_t p_node : 6;
|
||||
/* the id of the memory zone that this page belongs to */
|
||||
uint16_t p_zone : 3;
|
||||
/* some unused bits */
|
||||
uint16_t p_reserved : 3;
|
||||
|
||||
/* vm_page_flags_t bitfields. */
|
||||
uint32_t p_flags;
|
||||
|
||||
/* multi-purpose list.
|
||||
the owner of the page can decide what to do with this.
|
||||
some examples:
|
||||
- the buddy allocator uses this to maintain its per-zone free-page lists.
|
||||
*/
|
||||
queue_entry_t p_list;
|
||||
|
||||
/* owner-specific data */
|
||||
union {
|
||||
struct {
|
||||
vm_slab_t *p_slab;
|
||||
};
|
||||
};
|
||||
|
||||
} __attribute__((aligned(2 * sizeof(unsigned long)))) vm_page_t;
|
||||
|
||||
extern kern_status_t vm_bootstrap(const vm_zone_descriptor_t *zones, size_t nr_zones);
|
||||
|
||||
extern vm_pg_data_t *vm_pg_data_get(vm_node_id_t node);
|
||||
|
||||
extern phys_addr_t vm_virt_to_phys(void *p);
|
||||
|
||||
extern void vm_page_init_array();
|
||||
extern vm_page_t *vm_page_get(phys_addr_t addr);
|
||||
extern phys_addr_t vm_page_get_paddr(vm_page_t *pg);
|
||||
extern vm_zone_t *vm_page_get_zone(vm_page_t *pg);
|
||||
extern void *vm_page_get_vaddr(vm_page_t *pg);
|
||||
extern size_t vm_page_get_pfn(vm_page_t *pg);
|
||||
extern size_t vm_page_order_to_bytes(vm_page_order_t order);
|
||||
extern size_t vm_page_order_to_pages(vm_page_order_t order);
|
||||
extern vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order);
|
||||
extern vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_page_free(vm_page_t *pg);
|
||||
|
||||
extern int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b);
|
||||
extern vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b);
|
||||
extern vm_page_t *vm_page_get_buddy(vm_page_t *pg);
|
||||
extern vm_page_t *vm_page_get_next_tail(vm_page_t *pg);
|
||||
|
||||
extern size_t vm_bytes_to_pages(size_t bytes);
|
||||
|
||||
extern void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info);
|
||||
extern vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags);
|
||||
extern void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg);
|
||||
|
||||
extern vm_cache_t *vm_cache_create(const char *name, size_t objsz, vm_cache_flags_t flags);
|
||||
extern void vm_cache_init(vm_cache_t *cache);
|
||||
extern void vm_cache_destroy(vm_cache_t *cache);
|
||||
extern void *vm_cache_alloc(vm_cache_t *cache, vm_flags_t flags);
|
||||
extern void vm_cache_free(vm_cache_t *cache, void *p);
|
||||
|
||||
extern void *kmalloc(size_t count, vm_flags_t flags);
|
||||
extern void *kzalloc(size_t count, vm_flags_t flags);
|
||||
extern void kfree(void *p);
|
||||
|
||||
#endif
|
||||
@@ -1,73 +0,0 @@
|
||||
#include <socks/vm.h>
|
||||
#include <string.h>
|
||||
|
||||
#define SIZE_N_CACHE(s) \
|
||||
{ .c_name = "size-" # s, .c_obj_size = s, .c_page_order = VM_PAGE_16K }
|
||||
|
||||
/* reserve space for the size-N caches: */
|
||||
static vm_cache_t size_n_caches[] = {
|
||||
SIZE_N_CACHE(16),
|
||||
SIZE_N_CACHE(32),
|
||||
SIZE_N_CACHE(48),
|
||||
SIZE_N_CACHE(64),
|
||||
SIZE_N_CACHE(96),
|
||||
SIZE_N_CACHE(128),
|
||||
SIZE_N_CACHE(160),
|
||||
SIZE_N_CACHE(256),
|
||||
SIZE_N_CACHE(388),
|
||||
SIZE_N_CACHE(512),
|
||||
SIZE_N_CACHE(576),
|
||||
SIZE_N_CACHE(768),
|
||||
SIZE_N_CACHE(1024),
|
||||
SIZE_N_CACHE(1664),
|
||||
SIZE_N_CACHE(2048),
|
||||
SIZE_N_CACHE(3072),
|
||||
SIZE_N_CACHE(4096),
|
||||
};
|
||||
static const size_t nr_size_n_caches = sizeof size_n_caches / sizeof size_n_caches[0];
|
||||
|
||||
void *kmalloc(size_t count, vm_flags_t flags)
|
||||
{
|
||||
if (!count) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vm_cache_t *best_fit = NULL;
|
||||
for (size_t i = 0; i < nr_size_n_caches; i++) {
|
||||
if (size_n_caches[i].c_obj_size >= count) {
|
||||
best_fit = &size_n_caches[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!best_fit) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!VM_CACHE_INITIALISED(best_fit)) {
|
||||
vm_cache_init(best_fit);
|
||||
}
|
||||
|
||||
return vm_cache_alloc(best_fit, flags);
|
||||
}
|
||||
|
||||
void *kzalloc(size_t count, vm_flags_t flags)
|
||||
{
|
||||
void *p = kmalloc(count, flags);
|
||||
if (p) {
|
||||
memset(p, 0x0, count);
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
void kfree(void *p)
|
||||
{
|
||||
phys_addr_t phys = vm_virt_to_phys(p);
|
||||
vm_page_t *pg = vm_page_get(phys);
|
||||
if (!pg || !pg->p_slab) {
|
||||
return;
|
||||
}
|
||||
|
||||
vm_cache_free(pg->p_slab->s_cache, p);
|
||||
}
|
||||
@@ -1,296 +0,0 @@
|
||||
#include <socks/types.h>
|
||||
#include <socks/memblock.h>
|
||||
#include <socks/vm.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
/* array of pages, one for each physical page frame present in RAM */
|
||||
static vm_page_t *page_array = NULL;
|
||||
|
||||
/* number of pages stored in page_array */
|
||||
static size_t page_array_count = 0;
|
||||
|
||||
/* Pre-calculated page order -> size conversion table */
|
||||
static size_t page_order_bytes[] = {
|
||||
[VM_PAGE_4K] = 0x1000,
|
||||
[VM_PAGE_8K] = 0x2000,
|
||||
[VM_PAGE_16K] = 0x4000,
|
||||
[VM_PAGE_32K] = 0x8000,
|
||||
[VM_PAGE_64K] = 0x10000,
|
||||
[VM_PAGE_128K] = 0x20000,
|
||||
[VM_PAGE_256K] = 0x40000,
|
||||
[VM_PAGE_512K] = 0x80000,
|
||||
[VM_PAGE_1M] = 0x100000,
|
||||
[VM_PAGE_2M] = 0x200000,
|
||||
[VM_PAGE_4M] = 0x400000,
|
||||
[VM_PAGE_8M] = 0x800000,
|
||||
[VM_PAGE_16M] = 0x1000000,
|
||||
[VM_PAGE_32M] = 0x2000000,
|
||||
[VM_PAGE_64M] = 0x4000000,
|
||||
[VM_PAGE_128M] = 0x8000000,
|
||||
#if 0
|
||||
/* vm can support pages of this size, but
|
||||
vm_page_t only has 4 bits with which to store
|
||||
the page order, which cannot accomodate these
|
||||
larger order numbers */
|
||||
[VM_PAGE_256M] = 0x10000000,
|
||||
[VM_PAGE_512M] = 0x20000000,
|
||||
[VM_PAGE_1G] = 0x40000000,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* temporary */
|
||||
static void *tmp_vaddr_base = NULL;
|
||||
static size_t tmp_vaddr_len = 0;
|
||||
void tmp_set_vaddr_base(void *p, size_t len)
|
||||
{
|
||||
tmp_vaddr_base = p;
|
||||
tmp_vaddr_len = len;
|
||||
}
|
||||
|
||||
phys_addr_t vm_virt_to_phys(void *p)
|
||||
{
|
||||
phys_addr_t x = (phys_addr_t)p - (phys_addr_t)tmp_vaddr_base;
|
||||
assert(x < tmp_vaddr_len);
|
||||
return x;
|
||||
}
|
||||
|
||||
void vm_page_init_array()
|
||||
{
|
||||
size_t pmem_size = 0;
|
||||
|
||||
memblock_iter_t it;
|
||||
for_each_mem_range (&it, 0x0, UINTPTR_MAX) {
|
||||
if (pmem_size < it.it_limit + 1) {
|
||||
pmem_size = it.it_limit + 1;
|
||||
}
|
||||
}
|
||||
|
||||
size_t nr_pages = pmem_size / VM_PAGE_SIZE;
|
||||
if (pmem_size % VM_PAGE_SIZE) {
|
||||
nr_pages++;
|
||||
}
|
||||
|
||||
page_array = memblock_alloc(sizeof(vm_page_t) * nr_pages);
|
||||
page_array_count = nr_pages;
|
||||
printf("page_array covers 0x%zx bytes, %zu page frames\n", pmem_size, pmem_size / VM_PAGE_SIZE);
|
||||
printf("page_array is %zu bytes long\n", sizeof(vm_page_t) * nr_pages);
|
||||
|
||||
for (size_t i = 0; i < nr_pages; i++) {
|
||||
memset(&page_array[i], 0x0, sizeof page_array[i]);
|
||||
}
|
||||
|
||||
size_t nr_reserved = 0;
|
||||
for_each_reserved_mem_range(&it, 0x0, UINTPTR_MAX) {
|
||||
for (uintptr_t i = it.it_base; i < it.it_limit; i += VM_PAGE_SIZE) {
|
||||
size_t pfn = i / VM_PAGE_SIZE;
|
||||
|
||||
page_array[pfn].p_flags |= VM_PAGE_RESERVED;
|
||||
nr_reserved++;
|
||||
}
|
||||
}
|
||||
|
||||
printf("%zu reserved page frames\n", nr_reserved);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get(phys_addr_t addr)
|
||||
{
|
||||
size_t pfn = addr / VM_PAGE_SIZE;
|
||||
return pfn < page_array_count ? &page_array[pfn] : NULL;
|
||||
}
|
||||
|
||||
phys_addr_t vm_page_get_paddr(vm_page_t *pg)
|
||||
{
|
||||
return vm_page_get_pfn(pg) * VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
void *vm_page_get_vaddr(vm_page_t *pg)
|
||||
{
|
||||
return (void *)((char *)tmp_vaddr_base + (vm_page_get_pfn(pg) * VM_PAGE_SIZE));
|
||||
}
|
||||
|
||||
size_t vm_page_get_pfn(vm_page_t *pg)
|
||||
{
|
||||
return ((uintptr_t)pg - (uintptr_t)page_array) / sizeof *pg;
|
||||
}
|
||||
|
||||
size_t vm_page_order_to_bytes(vm_page_order_t order)
|
||||
{
|
||||
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return page_order_bytes[order];
|
||||
}
|
||||
|
||||
phys_addr_t vm_page_order_to_pages(vm_page_order_t order)
|
||||
{
|
||||
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return page_order_bytes[order] >> VM_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
vm_alignment_t vm_page_order_to_alignment(vm_page_order_t order)
|
||||
{
|
||||
if (order < 0 || order > VM_PAGE_MAX_ORDER) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ~(page_order_bytes[order] - 1);
|
||||
}
|
||||
|
||||
|
||||
size_t vm_bytes_to_pages(size_t bytes)
|
||||
{
|
||||
if (bytes & (VM_PAGE_SIZE-1)) {
|
||||
bytes &= ~(VM_PAGE_SIZE-1);
|
||||
bytes += VM_PAGE_SIZE;
|
||||
}
|
||||
|
||||
bytes >>= VM_PAGE_SHIFT;
|
||||
return bytes;
|
||||
}
|
||||
|
||||
vm_zone_t *vm_page_get_zone(vm_page_t *pg)
|
||||
{
|
||||
vm_pg_data_t *node = vm_pg_data_get(pg->p_node);
|
||||
if (!node) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pg->p_zone >= VM_MAX_ZONES) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &node->pg_zones[pg->p_zone];
|
||||
}
|
||||
|
||||
|
||||
vm_page_t *vm_page_alloc(vm_page_order_t order, vm_flags_t flags)
|
||||
{
|
||||
/* TODO prefer nodes closer to us */
|
||||
vm_pg_data_t *node = vm_pg_data_get(0);
|
||||
vm_zone_id_t zone_id = VM_ZONE_HIGHMEM;
|
||||
if (flags & VM_GET_DMA) {
|
||||
zone_id = VM_ZONE_DMA;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
vm_zone_t *z = &node->pg_zones[zone_id];
|
||||
|
||||
vm_page_t *pg = vm_zone_alloc_page(z, order, flags);
|
||||
if (pg) {
|
||||
return pg;
|
||||
}
|
||||
|
||||
if (zone_id == VM_ZONE_MIN) {
|
||||
break;
|
||||
}
|
||||
|
||||
zone_id--;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void vm_page_free(vm_page_t *pg)
|
||||
{
|
||||
vm_zone_t *z = vm_page_get_zone(pg);
|
||||
if (!z) {
|
||||
return;
|
||||
}
|
||||
|
||||
vm_zone_free_page(z, pg);
|
||||
}
|
||||
|
||||
int vm_page_split(vm_page_t *pg, vm_page_t **a, vm_page_t **b)
|
||||
{
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* NOTE that we cannot use vm_page_foreach here,
|
||||
as we are modifying the flags that vm_page_foreach
|
||||
uses to determine where a given page block ends */
|
||||
size_t nr_frames = vm_page_order_to_pages(pg->p_order);
|
||||
for (size_t i = 0; i < nr_frames; i++) {
|
||||
pg[i].p_order--;
|
||||
}
|
||||
|
||||
vm_page_t *buddy = vm_page_get_buddy(pg);
|
||||
|
||||
if (pg->p_order == VM_PAGE_MIN_ORDER) {
|
||||
pg->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
||||
buddy->p_flags &= ~(VM_PAGE_HUGE | VM_PAGE_HEAD);
|
||||
} else {
|
||||
pg->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
||||
buddy->p_flags |= VM_PAGE_HEAD | VM_PAGE_HUGE;
|
||||
}
|
||||
|
||||
*a = pg;
|
||||
*b = buddy;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_merge(vm_page_t *a, vm_page_t *b)
|
||||
{
|
||||
if (a->p_order != b->p_order) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (a->p_order == VM_PAGE_MAX_ORDER) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (vm_page_get_buddy(a) != b) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if ((a->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED)) != (b->p_flags & (VM_PAGE_ALLOC | VM_PAGE_RESERVED))) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* make sure that a comes before b */
|
||||
if (a > b) {
|
||||
vm_page_t *tmp = a;
|
||||
a = b;
|
||||
b = tmp;
|
||||
}
|
||||
|
||||
a->p_order++;
|
||||
|
||||
/* NOTE that we cannot use vm_page_foreach here,
|
||||
as we are modifying the flags that vm_page_foreach
|
||||
uses to determine where a given page block ends */
|
||||
size_t nr_frames = vm_page_order_to_pages(a->p_order);
|
||||
for (size_t i = 0; i < nr_frames; i++) {
|
||||
a[i].p_flags &= ~VM_PAGE_HEAD;
|
||||
a[i].p_flags |= VM_PAGE_HUGE;
|
||||
a[i].p_order = a->p_order;
|
||||
}
|
||||
|
||||
a->p_flags |= VM_PAGE_HEAD;
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_buddy(vm_page_t *pg)
|
||||
{
|
||||
phys_addr_t paddr = vm_page_get_paddr(pg);
|
||||
paddr = paddr ^ vm_page_order_to_bytes(pg->p_order);
|
||||
return vm_page_get(paddr);
|
||||
}
|
||||
|
||||
vm_page_t *vm_page_get_next_tail(vm_page_t *pg)
|
||||
{
|
||||
vm_page_t *next = pg + 1;
|
||||
if (next->p_flags & VM_PAGE_HEAD || !(next->p_flags & VM_PAGE_HUGE)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return next;
|
||||
}
|
||||
@@ -1,231 +0,0 @@
|
||||
#include <socks/locks.h>
|
||||
#include <socks/queue.h>
|
||||
#include <socks/types.h>
|
||||
#include <socks/vm.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <inttypes.h>
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static vm_page_t *group_pages_into_block(vm_zone_t *z, phys_addr_t base, phys_addr_t limit, int order)
|
||||
{
|
||||
vm_page_t *first_page = NULL;
|
||||
for (phys_addr_t i = base; i < limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
|
||||
if (order != VM_PAGE_MIN_ORDER) {
|
||||
pg->p_flags |= VM_PAGE_HUGE;
|
||||
}
|
||||
|
||||
if (i == base) {
|
||||
pg->p_flags |= VM_PAGE_HEAD;
|
||||
first_page = pg;
|
||||
}
|
||||
|
||||
pg->p_order = order;
|
||||
pg->p_node = z->z_info.zd_node;
|
||||
pg->p_zone = z->z_info.zd_id;
|
||||
}
|
||||
|
||||
return first_page;
|
||||
}
|
||||
|
||||
static void convert_region_to_blocks(vm_zone_t *zone,
|
||||
phys_addr_t base, phys_addr_t limit,
|
||||
int reserved)
|
||||
{
|
||||
size_t block_frames = vm_bytes_to_pages(limit - base + 1);
|
||||
printf("adding region %08zx-%08zx (%zu frames) to zone %s\n",
|
||||
base, limit, block_frames, zone->z_info.zd_name);
|
||||
int reset_order = 0;
|
||||
|
||||
for (int order = VM_PAGE_MAX_ORDER; order >= VM_PAGE_MIN_ORDER; ) {
|
||||
size_t order_frames = vm_page_order_to_pages(order);
|
||||
vm_alignment_t order_alignment = vm_page_order_to_alignment(order);
|
||||
|
||||
if (order_frames > block_frames) {
|
||||
order--;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!VM_CHECK_ALIGN(base, order_alignment)) {
|
||||
reset_order = 1;
|
||||
order--;
|
||||
continue;
|
||||
}
|
||||
|
||||
printf("%s: %zu %s pages at %08" PRIxPTR "\n",
|
||||
zone->z_info.zd_name,
|
||||
order_frames,
|
||||
reserved == 1 ? "reserved" : "free",
|
||||
base);
|
||||
|
||||
phys_addr_t block_limit = base + (order_frames * VM_PAGE_SIZE) - 1;
|
||||
vm_page_t *block_page = group_pages_into_block(zone, base, block_limit, order);
|
||||
|
||||
if (reserved == 0) {
|
||||
queue_push_back(&zone->z_free_pages[order], &block_page->p_list);
|
||||
}
|
||||
|
||||
base = block_limit + 1;
|
||||
block_frames -= order_frames;
|
||||
|
||||
if (reset_order) {
|
||||
order = VM_PAGE_MAX_ORDER;
|
||||
reset_order = 0;
|
||||
}
|
||||
|
||||
if (base > limit + 1) {
|
||||
printf("too many pages created! %zx > %zx\n", base, limit);
|
||||
abort();
|
||||
}
|
||||
|
||||
if (base == limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void vm_zone_init(vm_zone_t *z, const vm_zone_descriptor_t *zone_info)
|
||||
{
|
||||
if (!vm_page_get(zone_info->zd_base)) {
|
||||
return;
|
||||
}
|
||||
|
||||
printf("initialising zone %s (%08zx-%08zx)\n",
|
||||
zone_info->zd_name, zone_info->zd_base, zone_info->zd_limit);
|
||||
memset(z, 0x0, sizeof *z);
|
||||
memcpy(&z->z_info, zone_info, sizeof *zone_info);
|
||||
z->z_lock = SPIN_LOCK_INIT;
|
||||
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&z->z_lock, &flags);
|
||||
|
||||
phys_addr_t block_start = zone_info->zd_base, block_end = zone_info->zd_limit;
|
||||
int this_page_reserved = 0, last_page_reserved = -1;
|
||||
|
||||
for (uintptr_t i = zone_info->zd_base; i < zone_info->zd_limit; i += VM_PAGE_SIZE) {
|
||||
vm_page_t *pg = vm_page_get(i);
|
||||
if (!pg) {
|
||||
break;
|
||||
}
|
||||
|
||||
this_page_reserved = (pg->p_flags & VM_PAGE_RESERVED) ? 1 : 0;
|
||||
|
||||
if (last_page_reserved == -1) {
|
||||
last_page_reserved = this_page_reserved;
|
||||
}
|
||||
|
||||
if (this_page_reserved == last_page_reserved) {
|
||||
block_end = i;
|
||||
continue;
|
||||
}
|
||||
|
||||
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, last_page_reserved);
|
||||
|
||||
block_start = i;
|
||||
last_page_reserved = this_page_reserved;
|
||||
}
|
||||
|
||||
if (block_start != block_end) {
|
||||
convert_region_to_blocks(z, block_start, block_end + VM_PAGE_SIZE - 1, this_page_reserved);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&z->z_lock, flags);
|
||||
}
|
||||
|
||||
static int replenish_free_page_list(vm_zone_t *z, vm_page_order_t order)
|
||||
{
|
||||
if (!queue_empty(&z->z_free_pages[order])) {
|
||||
/* we already have pages available. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (order == VM_PAGE_MAX_ORDER) {
|
||||
/* there are no larger pages to split, so just give up. */
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* the lowest page order that is >= `order` and still has pages available */
|
||||
vm_page_order_t first_order_with_free = VM_MAX_PAGE_ORDERS;
|
||||
|
||||
for (vm_page_order_t i = order; i <= VM_PAGE_MAX_ORDER; i++) {
|
||||
if (!queue_empty(&z->z_free_pages[i])) {
|
||||
first_order_with_free = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (first_order_with_free == VM_MAX_PAGE_ORDERS) {
|
||||
/* there are no pages available to split */
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (first_order_with_free == order) {
|
||||
/* there are free pages of the requested order, so nothing needs to be done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* starting from the first page list with free pages,
|
||||
take a page, split it in half, and add the sub-pages
|
||||
to the next order's free list. */
|
||||
for (vm_page_order_t i = first_order_with_free; i > order; i--) {
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[i]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
|
||||
|
||||
vm_page_t *a, *b;
|
||||
vm_page_split(pg, &a, &b);
|
||||
|
||||
queue_push_back(&z->z_free_pages[i - 1], &a->p_list);
|
||||
queue_push_back(&z->z_free_pages[i - 1], &b->p_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm_page_t *vm_zone_alloc_page(vm_zone_t *z, vm_page_order_t order, vm_flags_t flags)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
||||
|
||||
int result = replenish_free_page_list(z, order);
|
||||
if (result != 0) {
|
||||
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
queue_entry_t *pg_entry = queue_pop_front(&z->z_free_pages[order]);
|
||||
vm_page_t *pg = QUEUE_CONTAINER(vm_page_t, p_list, pg_entry);
|
||||
vm_page_foreach (pg, i) {
|
||||
i->p_flags |= VM_PAGE_ALLOC;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
||||
return pg;
|
||||
}
|
||||
|
||||
void vm_zone_free_page(vm_zone_t *z, vm_page_t *pg)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
spin_lock_irqsave(&z->z_lock, &irq_flags);
|
||||
|
||||
pg->p_flags &= ~VM_PAGE_ALLOC;
|
||||
queue_push_back(&z->z_free_pages[pg->p_order], &pg->p_list);
|
||||
|
||||
while (1) {
|
||||
vm_page_t *buddy = vm_page_get_buddy(pg);
|
||||
vm_page_t *huge = vm_page_merge(pg, buddy);
|
||||
if (!huge) {
|
||||
break;
|
||||
}
|
||||
|
||||
queue_delete(&z->z_free_pages[buddy->p_order - 1], &buddy->p_list);
|
||||
queue_delete(&z->z_free_pages[buddy->p_order - 1], &pg->p_list);
|
||||
queue_push_back(&z->z_free_pages[huge->p_order], &huge->p_list);
|
||||
|
||||
pg = huge;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&z->z_lock, irq_flags);
|
||||
}
|
||||
Reference in New Issue
Block a user