the lowest 2 bits of handle values are no longer unused, and 0 is now a valid handle value. the first 64 handles are now reserved, and will not be automatically allocated by the kernel. however, they are still valid handles, and other handles can be moved to this area using an as-yet-unwritten function. this is to allow support for standard POSIX file descriptors, which require the values 0, 1, and 2.
190 lines
4.2 KiB
C
190 lines
4.2 KiB
C
#include <kernel/handle.h>
|
|
#include <kernel/libc/string.h>
|
|
#include <kernel/object.h>
|
|
#include <kernel/sched.h>
|
|
#include <kernel/util.h>
|
|
#include <kernel/vm.h>
|
|
|
|
/* depth=3 gives a maximum of ~66.6 million handles */
|
|
#define MAX_TABLE_DEPTH 3
|
|
#define RESERVED_HANDLES 64
|
|
|
|
static struct vm_cache handle_table_cache = {
|
|
.c_name = "handle_table",
|
|
.c_obj_size = sizeof(struct handle_table),
|
|
};
|
|
|
|
struct handle_table *handle_table_create(void)
|
|
{
|
|
if (!VM_CACHE_INITIALISED(&handle_table_cache)) {
|
|
vm_cache_init(&handle_table_cache);
|
|
}
|
|
|
|
struct handle_table *out
|
|
= vm_cache_alloc(&handle_table_cache, VM_NORMAL);
|
|
if (!out) {
|
|
return NULL;
|
|
}
|
|
|
|
memset(out, 0x0, sizeof *out);
|
|
|
|
return out;
|
|
}
|
|
|
|
void handle_table_destroy(struct handle_table *tab)
|
|
{
|
|
}
|
|
|
|
static kern_status_t decode_handle_indices(
|
|
kern_handle_t handle,
|
|
unsigned int indices[MAX_TABLE_DEPTH])
|
|
{
|
|
for (int i = 0; i < MAX_TABLE_DEPTH; i++) {
|
|
unsigned int div = (i > 0 ? REFS_PER_TABLE : HANDLES_PER_TABLE);
|
|
|
|
unsigned int v = handle % div;
|
|
indices[MAX_TABLE_DEPTH - i - 1] = v;
|
|
handle /= div;
|
|
}
|
|
|
|
return handle == 0 ? KERN_OK : KERN_INVALID_ARGUMENT;
|
|
}
|
|
|
|
static kern_status_t encode_handle_indices(
|
|
unsigned int indices[MAX_TABLE_DEPTH],
|
|
kern_handle_t *out_handle)
|
|
{
|
|
kern_handle_t handle = 0;
|
|
unsigned int mul = 1;
|
|
|
|
for (int i = MAX_TABLE_DEPTH - 1; i >= 0; i--) {
|
|
unsigned int v = indices[i] * mul;
|
|
handle += v;
|
|
mul *= REFS_PER_TABLE;
|
|
}
|
|
|
|
*out_handle = handle;
|
|
return KERN_OK;
|
|
}
|
|
|
|
kern_status_t handle_table_alloc_handle(
|
|
struct handle_table *tab,
|
|
struct handle **out_slot,
|
|
kern_handle_t *out_handle)
|
|
{
|
|
int i;
|
|
unsigned int indices[MAX_TABLE_DEPTH] = {0};
|
|
static const unsigned int reserved_indices[MAX_TABLE_DEPTH] = {0};
|
|
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
unsigned int next_index = bitmap_lowest_clear(
|
|
tab->t_subtables.t_subtable_map,
|
|
REFS_PER_TABLE);
|
|
|
|
if (next_index == BITMAP_NPOS) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[next_index];
|
|
if (!next) {
|
|
next = handle_table_create();
|
|
tab->t_subtables.t_subtable_list[next_index] = next;
|
|
}
|
|
|
|
if (!next) {
|
|
return KERN_NO_MEMORY;
|
|
}
|
|
|
|
indices[i] = next_index;
|
|
tab = next;
|
|
}
|
|
|
|
if (memcmp(indices, reserved_indices, sizeof indices) == 0) {
|
|
bitmap_fill(tab->t_handles.t_handle_map, RESERVED_HANDLES);
|
|
}
|
|
|
|
unsigned int handle_index = bitmap_lowest_clear(
|
|
tab->t_handles.t_handle_map,
|
|
HANDLES_PER_TABLE);
|
|
if (handle_index == BITMAP_NPOS) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_set(tab->t_handles.t_handle_map, handle_index);
|
|
memset(&tab->t_handles.t_handle_list[handle_index],
|
|
0x0,
|
|
sizeof(struct handle));
|
|
|
|
indices[i] = handle_index;
|
|
|
|
*out_slot = &tab->t_handles.t_handle_list[handle_index];
|
|
return encode_handle_indices(indices, out_handle);
|
|
}
|
|
|
|
kern_status_t handle_table_free_handle(
|
|
struct handle_table *tab,
|
|
kern_handle_t handle)
|
|
{
|
|
unsigned int indices[MAX_TABLE_DEPTH];
|
|
if (decode_handle_indices(handle, indices) != KERN_OK) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
int i;
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[indices[i]];
|
|
if (!next) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_clear(tab->t_subtables.t_subtable_map, indices[i]);
|
|
tab = next;
|
|
}
|
|
|
|
unsigned int handle_index = indices[i];
|
|
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
|
|
return KERN_NO_ENTRY;
|
|
}
|
|
|
|
bitmap_clear(tab->t_handles.t_handle_map, handle_index);
|
|
struct handle *handle_entry
|
|
= &tab->t_handles.t_handle_list[handle_index];
|
|
|
|
if (handle_entry->h_object) {
|
|
object_remove_handle(handle_entry->h_object);
|
|
}
|
|
|
|
memset(handle_entry, 0x0, sizeof *handle_entry);
|
|
return KERN_OK;
|
|
}
|
|
|
|
struct handle *handle_table_get_handle(
|
|
struct handle_table *tab,
|
|
kern_handle_t handle)
|
|
{
|
|
unsigned int indices[MAX_TABLE_DEPTH];
|
|
if (decode_handle_indices(handle, indices) != KERN_OK) {
|
|
return NULL;
|
|
}
|
|
|
|
int i;
|
|
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
|
|
struct handle_table *next
|
|
= tab->t_subtables.t_subtable_list[indices[i]];
|
|
if (!next) {
|
|
return NULL;
|
|
}
|
|
|
|
tab = next;
|
|
}
|
|
|
|
unsigned int handle_index = indices[i];
|
|
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
|
|
return NULL;
|
|
}
|
|
|
|
return &tab->t_handles.t_handle_list[handle_index];
|
|
}
|