314 lines
6.8 KiB
C
314 lines
6.8 KiB
C
#include <kernel/handle.h>
|
|
#include <kernel/libc/string.h>
|
|
#include <kernel/object.h>
|
|
#include <kernel/sched.h>
|
|
#include <kernel/util.h>
|
|
#include <kernel/vm.h>
|
|
|
|
/* depth=3 gives a maximum of ~66.6 million handles */
|
|
#define MAX_TABLE_DEPTH 3
|
|
#define RESERVED_HANDLES 64
|
|
|
|
static struct vm_cache handle_table_cache = {
|
|
.c_name = "handle_table",
|
|
.c_obj_size = sizeof(struct handle_table),
|
|
};
|
|
|
|
struct handle_table *handle_table_create(void)
|
|
{
|
|
if (!VM_CACHE_INITIALISED(&handle_table_cache)) {
|
|
vm_cache_init(&handle_table_cache);
|
|
}
|
|
|
|
struct handle_table *out
|
|
= vm_cache_alloc(&handle_table_cache, VM_NORMAL);
|
|
if (!out) {
|
|
return NULL;
|
|
}
|
|
|
|
memset(out, 0x0, sizeof *out);
|
|
|
|
return out;
|
|
}
|
|
|
|
void handle_table_destroy(struct handle_table *tab)
|
|
{
|
|
}
|
|
|
|
static kern_status_t decode_handle_indices(
|
|
kern_handle_t handle,
|
|
unsigned int indices[MAX_TABLE_DEPTH])
|
|
{
|
|
for (int i = 0; i < MAX_TABLE_DEPTH; i++) {
|
|
unsigned int div = (i > 0 ? REFS_PER_TABLE : HANDLES_PER_TABLE);
|
|
|
|
unsigned int v = handle % div;
|
|
indices[MAX_TABLE_DEPTH - i - 1] = v;
|
|
handle /= div;
|
|
}
|
|
|
|
return handle == 0 ? KERN_OK : KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
static kern_status_t encode_handle_indices(
|
|
unsigned int indices[MAX_TABLE_DEPTH],
|
|
kern_handle_t *out_handle)
|
|
{
|
|
kern_handle_t handle = 0;
|
|
unsigned int mul = 1;
|
|
|
|
for (int i = MAX_TABLE_DEPTH - 1; i >= 0; i--) {
|
|
unsigned int v = indices[i] * mul;
|
|
handle += v;
|
|
mul *= REFS_PER_TABLE;
|
|
}
|
|
|
|
*out_handle = handle;
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t handle_table_alloc_handle(
|
|
struct handle_table *tab,
|
|
struct handle **out_slot,
|
|
kern_handle_t *out_handle)
|
|
{
|
|
int i;
|
|
unsigned int indices[MAX_TABLE_DEPTH] = {0};
|
|
static const unsigned int reserved_indices[MAX_TABLE_DEPTH] = {0};
|
|
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
unsigned int next_index = bitmap_lowest_clear(
|
|
tab->t_subtables.t_subtable_map,
|
|
REFS_PER_TABLE);
|
|
|
|
if (next_index == BITMAP_NPOS) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[next_index];
|
|
if (!next) {
|
|
next = handle_table_create();
|
|
tab->t_subtables.t_subtable_list[next_index] = next;
|
|
}
|
|
|
|
if (!next) {
|
|
return KERN_NO_MEMORY;
|
|
}
|
|
|
|
indices[i] = next_index;
|
|
tab = next;
|
|
}
|
|
|
|
if (memcmp(indices, reserved_indices, sizeof indices) == 0) {
|
|
bitmap_fill(tab->t_handles.t_handle_map, RESERVED_HANDLES);
|
|
}
|
|
|
|
unsigned int handle_index = bitmap_lowest_clear(
|
|
tab->t_handles.t_handle_map,
|
|
HANDLES_PER_TABLE);
|
|
if (handle_index == BITMAP_NPOS) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_set(tab->t_handles.t_handle_map, handle_index);
|
|
memset(&tab->t_handles.t_handle_list[handle_index],
|
|
0x0,
|
|
sizeof(struct handle));
|
|
|
|
indices[i] = handle_index;
|
|
|
|
*out_slot = &tab->t_handles.t_handle_list[handle_index];
|
|
return encode_handle_indices(indices, out_handle);
|
|
}
|
|
|
|
kern_status_t handle_table_free_handle(
|
|
struct handle_table *tab,
|
|
kern_handle_t handle)
|
|
{
|
|
unsigned int indices[MAX_TABLE_DEPTH];
|
|
if (decode_handle_indices(handle, indices) != KERN_OK) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
int i;
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[indices[i]];
|
|
if (!next) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_clear(tab->t_subtables.t_subtable_map, indices[i]);
|
|
tab = next;
|
|
}
|
|
|
|
unsigned int handle_index = indices[i];
|
|
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_clear(tab->t_handles.t_handle_map, handle_index);
|
|
struct handle *handle_entry
|
|
= &tab->t_handles.t_handle_list[handle_index];
|
|
|
|
if (handle_entry->h_object) {
|
|
object_remove_handle(handle_entry->h_object);
|
|
}
|
|
|
|
memset(handle_entry, 0x0, sizeof *handle_entry);
|
|
return KERN_OK;
|
|
}
|
|
|
|
struct handle *handle_table_get_handle(
|
|
struct handle_table *tab,
|
|
kern_handle_t handle)
|
|
{
|
|
unsigned int indices[MAX_TABLE_DEPTH];
|
|
if (decode_handle_indices(handle, indices) != KERN_OK) {
|
|
return NULL;
|
|
}
|
|
|
|
int i;
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[indices[i]];
|
|
if (!next) {
|
|
return NULL;
|
|
}
|
|
|
|
tab = next;
|
|
}
|
|
|
|
unsigned int handle_index = indices[i];
|
|
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
|
|
return NULL;
|
|
}
|
|
|
|
if (!tab->t_handles.t_handle_list[handle_index].h_object) {
|
|
return NULL;
|
|
}
|
|
|
|
return &tab->t_handles.t_handle_list[handle_index];
|
|
}
|
|
|
|
struct handle_list_iterator {
|
|
struct handle_list *it_list;
|
|
size_t it_list_count;
|
|
size_t it_list_ptr;
|
|
|
|
kern_handle_t *it_handles;
|
|
size_t it_nr_handles;
|
|
};
|
|
|
|
static void handle_list_iterator_begin(
|
|
struct handle_list_iterator *it,
|
|
struct handle_list *list,
|
|
size_t list_count)
|
|
{
|
|
memset(it, 0x0, sizeof *it);
|
|
it->it_list = list;
|
|
it->it_list_count = list_count;
|
|
|
|
while (it->it_list_ptr < list_count) {
|
|
if (list[it->it_list_ptr].l_nr_handles > 0) {
|
|
break;
|
|
}
|
|
|
|
it->it_list_ptr++;
|
|
}
|
|
|
|
if (it->it_list_ptr >= list_count) {
|
|
return;
|
|
}
|
|
|
|
it->it_handles = list[it->it_list_ptr].l_handles;
|
|
it->it_nr_handles = list[it->it_list_ptr].l_nr_handles;
|
|
}
|
|
|
|
static void handle_list_iterator_seek(
|
|
struct handle_list_iterator *it,
|
|
size_t nr_handles)
|
|
{
|
|
if (nr_handles > it->it_nr_handles) {
|
|
nr_handles = it->it_nr_handles;
|
|
}
|
|
|
|
if (nr_handles < it->it_nr_handles) {
|
|
it->it_handles += nr_handles;
|
|
it->it_nr_handles -= nr_handles;
|
|
return;
|
|
}
|
|
|
|
it->it_list_ptr++;
|
|
while (it->it_list_ptr < it->it_list_count) {
|
|
if (it->it_list[it->it_list_ptr].l_nr_handles > 0) {
|
|
break;
|
|
}
|
|
|
|
it->it_list_ptr++;
|
|
}
|
|
|
|
if (it->it_list_ptr >= it->it_list_count) {
|
|
return;
|
|
}
|
|
|
|
it->it_handles = it->it_list[it->it_list_ptr].l_handles;
|
|
it->it_nr_handles = it->it_list[it->it_list_ptr].l_nr_handles;
|
|
}
|
|
|
|
kern_status_t handle_list_transfer(
|
|
struct handle_table *dest_table,
|
|
struct handle_list *dest_list,
|
|
size_t dest_list_count,
|
|
struct handle_table *src_table,
|
|
const struct handle_list *src_list,
|
|
size_t src_list_count)
|
|
{
|
|
struct handle_list_iterator src, dest;
|
|
handle_list_iterator_begin(
|
|
&src,
|
|
(struct handle_list *)src_list,
|
|
src_list_count);
|
|
handle_list_iterator_begin(&dest, dest_list, dest_list_count);
|
|
|
|
while (src.it_nr_handles && dest.it_nr_handles) {
|
|
size_t to_copy = MIN(src.it_nr_handles, dest.it_nr_handles);
|
|
for (size_t i = 0; i < to_copy; i++) {
|
|
kern_handle_t handle_v = src.it_handles[i];
|
|
struct handle *handle
|
|
= handle_table_get_handle(src_table, handle_v);
|
|
if (!handle) {
|
|
return KERN_HANDLE_INVALID;
|
|
}
|
|
|
|
struct object *obj = object_ref(handle->h_object);
|
|
handle_flags_t flags = handle->h_flags;
|
|
|
|
handle_table_free_handle(src_table, handle_v);
|
|
|
|
struct handle *dest_slot = NULL;
|
|
kern_status_t status = handle_table_alloc_handle(
|
|
dest_table,
|
|
&dest_slot,
|
|
&handle_v);
|
|
if (status != KERN_OK) {
|
|
return status;
|
|
}
|
|
|
|
dest_slot->h_object = obj;
|
|
dest_slot->h_flags = flags;
|
|
|
|
object_add_handle(obj);
|
|
object_unref(obj);
|
|
|
|
dest.it_handles[i] = handle_v;
|
|
}
|
|
|
|
handle_list_iterator_seek(&src, to_copy);
|
|
handle_list_iterator_seek(&dest, to_copy);
|
|
}
|
|
|
|
return KERN_OK;
|
|
}
|