diff --git a/include/mango/handle.h b/include/mango/handle.h new file mode 100644 index 0000000..2ff62d9 --- /dev/null +++ b/include/mango/handle.h @@ -0,0 +1,56 @@ +#ifndef MANGO_HANDLE_H_ +#define MANGO_HANDLE_H_ + +#include +#include +#include + +/* subtract 32 bytes to account for the handle bitmap */ +#define HANDLES_PER_TABLE ((4096 - 32) / sizeof(struct handle)) +#define REFS_PER_TABLE ((4096 - 64) / sizeof(struct handle_table *)) + +typedef uint32_t kern_handle_t; + +struct object; + +struct handle { + union { + struct object *h_object; + uint64_t __x; + }; + + uint64_t h_flags; +}; + +struct handle_table { + union { + struct { + /* bitmap tracks which bits in t_handle_list are + * allocated */ + DECLARE_BITMAP(t_handle_map, HANDLES_PER_TABLE); + struct handle t_handle_list[HANDLES_PER_TABLE]; + } t_handles; + + struct { + /* bitmap tracks which sub-tables are fully-allocated */ + DECLARE_BITMAP(t_subtable_map, REFS_PER_TABLE); + struct handle_table *t_subtable_list[REFS_PER_TABLE]; + } t_subtables; + }; +}; + +extern struct handle_table *handle_table_create(void); +extern void handle_table_destroy(struct handle_table *tab); + +extern kern_status_t handle_table_alloc_handle( + struct handle_table *tab, + struct handle **out_slot, + kern_handle_t *out_handle); +extern void handle_table_free_handle( + struct handle_table *tab, + kern_handle_t handle); +extern struct handle *handle_table_get_handle( + struct handle_table *tab, + kern_handle_t handle); + +#endif diff --git a/kernel/handle.c b/kernel/handle.c new file mode 100644 index 0000000..8dd8b50 --- /dev/null +++ b/kernel/handle.c @@ -0,0 +1,177 @@ +#include +#include +#include +#include + +/* depth=3 gives a maximum of ~66.6 million handles */ +#define MAX_TABLE_DEPTH 3 + +static struct vm_cache handle_table_cache = { + .c_name = "handle_table", + .c_obj_size = sizeof(struct handle_table), +}; + +struct handle_table *handle_table_create(void) +{ + if (!VM_CACHE_INITIALISED(&handle_table_cache)) { + vm_cache_init(&handle_table_cache); + } + + struct handle_table *out + = vm_cache_alloc(&handle_table_cache, VM_NORMAL); + if (!out) { + return NULL; + } + + memset(out, 0x0, sizeof *out); + + return out; +} + +void handle_table_destroy(struct handle_table *tab) +{ +} + +static kern_status_t decode_handle_indices( + kern_handle_t handle, + unsigned int indices[MAX_TABLE_DEPTH]) +{ + handle >>= 2; + for (int i = 0; i < MAX_TABLE_DEPTH; i++) { + unsigned int div = (i > 0 ? REFS_PER_TABLE : HANDLES_PER_TABLE); + + unsigned int v = handle % div; + indices[MAX_TABLE_DEPTH - i - 1] = v; + handle /= div; + } + + return handle == 0 ? KERN_OK : KERN_INVALID_ARGUMENT; +} + +static kern_status_t encode_handle_indices( + unsigned int indices[MAX_TABLE_DEPTH], + kern_handle_t *out_handle) +{ + kern_handle_t handle = 0; + unsigned int mul = 1; + + for (int i = MAX_TABLE_DEPTH - 1; i >= 0; i--) { + unsigned int v = indices[i] * mul; + handle += v; + mul *= REFS_PER_TABLE; + } + + handle <<= 2; + + *out_handle = handle; + return KERN_OK; +} + +kern_status_t handle_table_alloc_handle( + struct handle_table *tab, + struct handle **out_slot, + kern_handle_t *out_handle) +{ + int i; + unsigned int indices[MAX_TABLE_DEPTH] = {0}; + + for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) { + unsigned int next_index = bitmap_lowest_clear( + tab->t_subtables.t_subtable_map, + REFS_PER_TABLE); + + if (next_index == BITMAP_NPOS) { + return KERN_NO_ENTRY; + } + + struct handle_table *next + = tab->t_subtables.t_subtable_list[next_index]; + if (!next) { + next = handle_table_create(); + tab->t_subtables.t_subtable_list[next_index] = next; + } + + if (!next) { + return KERN_NO_MEMORY; + } + + indices[i] = next_index; + tab = next; + } + + unsigned int handle_index = bitmap_lowest_clear( + tab->t_handles.t_handle_map, + HANDLES_PER_TABLE); + if (handle_index == BITMAP_NPOS) { + return KERN_NO_ENTRY; + } + + bitmap_set(tab->t_handles.t_handle_map, handle_index); + memset(&tab->t_handles.t_handle_list[handle_index], + 0x0, + sizeof(struct handle)); + + indices[i] = handle_index; + + *out_slot = &tab->t_handles.t_handle_list[handle_index]; + return encode_handle_indices(indices, out_handle); +} + +void handle_table_free_handle(struct handle_table *tab, kern_handle_t handle) +{ + unsigned int indices[MAX_TABLE_DEPTH]; + if (decode_handle_indices(handle, indices) != KERN_OK) { + return; + } + + int i; + for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) { + struct handle_table *next + = tab->t_subtables.t_subtable_list[indices[i]]; + if (!next) { + return; + } + + bitmap_clear(tab->t_subtables.t_subtable_map, indices[i]); + tab = next; + } + + unsigned int handle_index = indices[i]; + bitmap_clear(tab->t_handles.t_handle_map, handle_index); + struct handle *handle_entry + = &tab->t_handles.t_handle_list[handle_index]; + + if (handle_entry->h_object) { + object_remove_handle(handle_entry->h_object); + } + + memset(handle_entry, 0x0, sizeof *handle_entry); +} + +struct handle *handle_table_get_handle( + struct handle_table *tab, + kern_handle_t handle) +{ + unsigned int indices[MAX_TABLE_DEPTH]; + if (decode_handle_indices(handle, indices) != KERN_OK) { + return NULL; + } + + int i; + for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) { + struct handle_table *next + = tab->t_subtables.t_subtable_list[indices[i]]; + if (!next) { + return NULL; + } + + tab = next; + } + + unsigned int handle_index = indices[i]; + if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) { + return NULL; + } + + return &tab->t_handles.t_handle_list[handle_index]; +}