2023-04-09 16:43:03 +01:00
|
|
|
#include <socks/kext.h>
|
|
|
|
|
#include <socks/btree.h>
|
|
|
|
|
#include <socks/vm.h>
|
|
|
|
|
#include <socks/util.h>
|
|
|
|
|
#include <socks/object.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
|
|
|
|
|
static spin_lock_t kext_tree_lock = SPIN_LOCK_INIT;
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct object *kext_set;
|
|
|
|
|
struct btree kext_tree;
|
2023-04-09 16:43:03 +01:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static kern_status_t kext_query_name(struct object *obj, char out[OBJECT_NAME_MAX])
|
2023-04-09 16:43:03 +01:00
|
|
|
{
|
|
|
|
|
struct kext *kext = object_data(obj);
|
|
|
|
|
strncpy(out, kext->k_ident, OBJECT_NAME_MAX - 1);
|
|
|
|
|
out[OBJECT_NAME_MAX - 1] = 0;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static kern_status_t kext_destroy(struct object *obj)
|
2023-04-09 16:43:03 +01:00
|
|
|
{
|
|
|
|
|
struct kext *kext = object_data(obj);
|
|
|
|
|
if (kext->k_dependencies) {
|
|
|
|
|
kfree(kext->k_dependencies);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct object_type kext_type = {
|
2023-04-09 16:43:03 +01:00
|
|
|
.ob_name = "kext",
|
|
|
|
|
.ob_size = sizeof(struct kext),
|
|
|
|
|
.ob_ops = {
|
|
|
|
|
.query_name = kext_query_name,
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct kext *kext_get(const char *ident)
|
|
|
|
|
{
|
|
|
|
|
uint64_t ident_hash = hash_string(ident);
|
2023-04-12 20:17:11 +01:00
|
|
|
struct btree_node *cur = kext_tree.b_root;
|
2023-04-09 16:43:03 +01:00
|
|
|
|
|
|
|
|
while (cur) {
|
|
|
|
|
struct kext *cur_node = BTREE_CONTAINER(struct kext, k_node, cur);
|
|
|
|
|
if (ident_hash > cur_node->k_ident_hash) {
|
|
|
|
|
cur = btree_right(cur);
|
|
|
|
|
} else if (ident_hash < cur_node->k_ident_hash) {
|
|
|
|
|
cur = btree_left(cur);
|
|
|
|
|
} else if (!strcmp(cur_node->k_ident, ident)) {
|
|
|
|
|
return cur_node;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void kext_add(struct kext *kext)
|
|
|
|
|
{
|
|
|
|
|
if (!kext_tree.b_root) {
|
|
|
|
|
kext_tree.b_root = &kext->k_node;
|
|
|
|
|
btree_insert_fixup(&kext_tree, &kext->k_node);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct btree_node *cur = kext_tree.b_root;
|
2023-04-09 16:43:03 +01:00
|
|
|
while (1) {
|
|
|
|
|
struct kext *cur_node = BTREE_CONTAINER(struct kext, k_node, cur);
|
2023-04-12 20:17:11 +01:00
|
|
|
struct btree_node *next = NULL;
|
2023-04-09 16:43:03 +01:00
|
|
|
|
|
|
|
|
if (kext->k_ident_hash > cur_node->k_ident_hash) {
|
|
|
|
|
next = btree_right(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_right(cur, &kext->k_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else if (kext->k_ident_hash < cur_node->k_ident_hash) {
|
|
|
|
|
next = btree_left(cur);
|
|
|
|
|
|
|
|
|
|
if (!next) {
|
|
|
|
|
btree_put_left(cur, &kext->k_node);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cur = next;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
btree_insert_fixup(&kext_tree, &kext->k_node);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void kext_remove(struct kext *kext)
|
|
|
|
|
{
|
|
|
|
|
btree_delete(&kext_tree, &kext->k_node);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct kext *kext_get_by_id(const char *ident)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&kext_tree_lock, &flags);
|
|
|
|
|
|
|
|
|
|
struct kext *kext = kext_get(ident);
|
|
|
|
|
if (kext) {
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *kext_obj = object_header(kext);
|
2023-04-09 16:43:03 +01:00
|
|
|
object_ref(kext_obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&kext_tree_lock, flags);
|
|
|
|
|
return kext;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t kext_cache_init(void)
|
|
|
|
|
{
|
|
|
|
|
object_type_register(&kext_type);
|
2023-04-09 20:40:06 +01:00
|
|
|
kext_set = set_create("kexts");
|
|
|
|
|
object_publish(global_namespace(), "/", kext_set);
|
2023-04-09 16:43:03 +01:00
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct kext *kext_alloc(void)
|
|
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *kext_obj = object_create(&kext_type);
|
2023-04-09 16:43:03 +01:00
|
|
|
if (!kext_obj) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return object_data(kext_obj);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void kext_release(struct kext *kext)
|
|
|
|
|
{
|
|
|
|
|
object_deref(object_header(kext));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t kext_register(struct kext *kext)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&kext_tree_lock, &flags);
|
|
|
|
|
|
|
|
|
|
struct kext *n = kext_get(kext->k_ident);
|
|
|
|
|
if (n) {
|
|
|
|
|
spin_unlock_irqrestore(&kext_tree_lock, flags);
|
|
|
|
|
return KERN_NAME_EXISTS;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *kext_obj = object_header(kext);
|
2023-04-09 16:43:03 +01:00
|
|
|
object_ref(object_header(kext));
|
|
|
|
|
kext_add(kext);
|
|
|
|
|
|
2023-04-09 20:40:06 +01:00
|
|
|
set_add_object(kext_set, kext_obj);
|
|
|
|
|
|
2023-04-09 16:43:03 +01:00
|
|
|
spin_unlock_irqrestore(&kext_tree_lock, flags);
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t kext_bring_online(struct kext *kext)
|
|
|
|
|
{
|
|
|
|
|
if (kext->k_flags & KEXT_ONLINE) {
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t status;
|
|
|
|
|
for (unsigned int i = 0; i < kext->k_nr_dependencies; i++) {
|
|
|
|
|
status = kext_bring_online(kext->k_dependencies[i]);
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (kext->k_online) {
|
|
|
|
|
status = kext->k_online(kext);
|
|
|
|
|
|
|
|
|
|
if (status != KERN_OK) {
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kext->k_flags |= KEXT_ONLINE;
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|