Files

337 lines
7.4 KiB
C
Raw Permalink Normal View History

#include <kernel/handle.h>
#include <kernel/libc/string.h>
#include <kernel/object.h>
#include <kernel/sched.h>
#include <kernel/util.h>
#include <kernel/vm-region.h>
#include <kernel/vm.h>
#include <mango/types.h>
2026-02-08 12:55:47 +00:00
/* depth=3 gives a maximum of ~66.6 million handles */
#define MAX_TABLE_DEPTH 3
#define RESERVED_HANDLES 64
2026-02-08 12:55:47 +00:00
static struct vm_cache handle_table_cache = {
.c_name = "handle_table",
.c_obj_size = sizeof(struct handle_table),
};
struct handle_table *handle_table_create(void)
{
if (!VM_CACHE_INITIALISED(&handle_table_cache)) {
vm_cache_init(&handle_table_cache);
}
struct handle_table *out
= vm_cache_alloc(&handle_table_cache, VM_NORMAL);
if (!out) {
return NULL;
}
memset(out, 0x0, sizeof *out);
return out;
}
void handle_table_destroy(struct handle_table *tab)
{
}
static kern_status_t decode_handle_indices(
kern_handle_t handle,
unsigned int indices[MAX_TABLE_DEPTH])
{
for (int i = 0; i < MAX_TABLE_DEPTH; i++) {
unsigned int div = (i > 0 ? REFS_PER_TABLE : HANDLES_PER_TABLE);
unsigned int v = handle % div;
indices[MAX_TABLE_DEPTH - i - 1] = v;
handle /= div;
}
return handle == 0 ? KERN_OK : KERN_INVALID_ARGUMENT;
}
static kern_status_t encode_handle_indices(
unsigned int indices[MAX_TABLE_DEPTH],
kern_handle_t *out_handle)
{
kern_handle_t handle = 0;
unsigned int mul = 1;
for (int i = MAX_TABLE_DEPTH - 1; i >= 0; i--) {
unsigned int v = indices[i] * mul;
handle += v;
mul *= REFS_PER_TABLE;
}
*out_handle = handle;
return KERN_OK;
}
kern_status_t handle_table_alloc_handle(
struct handle_table *tab,
struct handle **out_slot,
kern_handle_t *out_handle)
{
int i;
unsigned int indices[MAX_TABLE_DEPTH] = {0};
static const unsigned int reserved_indices[MAX_TABLE_DEPTH] = {0};
2026-02-08 12:55:47 +00:00
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
unsigned int next_index = bitmap_lowest_clear(
tab->t_subtables.t_subtable_map,
REFS_PER_TABLE);
if (next_index == BITMAP_NPOS) {
return KERN_NO_ENTRY;
}
struct handle_table *next
= tab->t_subtables.t_subtable_list[next_index];
if (!next) {
next = handle_table_create();
tab->t_subtables.t_subtable_list[next_index] = next;
}
if (!next) {
return KERN_NO_MEMORY;
}
indices[i] = next_index;
tab = next;
}
if (memcmp(indices, reserved_indices, sizeof indices) == 0) {
bitmap_fill(tab->t_handles.t_handle_map, RESERVED_HANDLES);
}
2026-02-08 12:55:47 +00:00
unsigned int handle_index = bitmap_lowest_clear(
tab->t_handles.t_handle_map,
HANDLES_PER_TABLE);
if (handle_index == BITMAP_NPOS) {
return KERN_NO_ENTRY;
}
bitmap_set(tab->t_handles.t_handle_map, handle_index);
memset(&tab->t_handles.t_handle_list[handle_index],
0x0,
sizeof(struct handle));
indices[i] = handle_index;
*out_slot = &tab->t_handles.t_handle_list[handle_index];
return encode_handle_indices(indices, out_handle);
}
kern_status_t handle_table_free_handle(
struct handle_table *tab,
kern_handle_t handle)
2026-02-08 12:55:47 +00:00
{
unsigned int indices[MAX_TABLE_DEPTH];
if (decode_handle_indices(handle, indices) != KERN_OK) {
return KERN_NO_ENTRY;
2026-02-08 12:55:47 +00:00
}
int i;
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
struct handle_table *next
= tab->t_subtables.t_subtable_list[indices[i]];
if (!next) {
return KERN_NO_ENTRY;
2026-02-08 12:55:47 +00:00
}
bitmap_clear(tab->t_subtables.t_subtable_map, indices[i]);
tab = next;
}
unsigned int handle_index = indices[i];
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
return KERN_NO_ENTRY;
}
2026-02-08 12:55:47 +00:00
bitmap_clear(tab->t_handles.t_handle_map, handle_index);
struct handle *handle_entry
= &tab->t_handles.t_handle_list[handle_index];
if (handle_entry->h_object) {
object_remove_handle(handle_entry->h_object);
}
memset(handle_entry, 0x0, sizeof *handle_entry);
return KERN_OK;
2026-02-08 12:55:47 +00:00
}
struct handle *handle_table_get_handle(
struct handle_table *tab,
kern_handle_t handle)
{
unsigned int indices[MAX_TABLE_DEPTH];
if (decode_handle_indices(handle, indices) != KERN_OK) {
return NULL;
}
int i;
for (i = 0; i < MAX_TABLE_DEPTH - 1; i++) {
struct handle_table *next
= tab->t_subtables.t_subtable_list[indices[i]];
if (!next) {
return NULL;
}
tab = next;
}
unsigned int handle_index = indices[i];
if (!bitmap_check(tab->t_handles.t_handle_map, handle_index)) {
return NULL;
}
if (!tab->t_handles.t_handle_list[handle_index].h_object) {
return NULL;
}
2026-02-08 12:55:47 +00:00
return &tab->t_handles.t_handle_list[handle_index];
}
kern_status_t handle_table_transfer(
struct vm_region *dst_region,
struct handle_table *dst,
kern_msg_handle_t *dst_handles,
size_t dst_handles_max,
struct vm_region *src_region,
struct handle_table *src,
kern_msg_handle_t *src_handles,
size_t src_handles_count)
{
kern_status_t status = KERN_OK;
size_t to_transfer = MIN(dst_handles_max, src_handles_count);
size_t i = 0;
for (size_t i = 0; i < to_transfer; i++) {
kern_msg_handle_t src_handle = {0}, dst_handle = {0};
virt_addr_t src_handle_addr
= (virt_addr_t)src_handles + (i * sizeof src_handle);
virt_addr_t dst_handle_addr
= (virt_addr_t)dst_handles + (i * sizeof dst_handle);
status = vm_region_read_kernel(
src_region,
src_handle_addr,
sizeof src_handle,
&src_handle,
NULL);
if (status != KERN_OK) {
src_handle.hnd_result = KERN_OK;
vm_region_write_kernel(
src_region,
src_handle_addr,
sizeof src_handle,
&src_handle,
NULL);
break;
}
struct handle *src_entry
= handle_table_get_handle(src, src_handle.hnd_value);
struct handle *dst_entry = NULL;
kern_handle_t dst_value = KERN_HANDLE_INVALID;
if (!src_entry) {
status = KERN_INVALID_ARGUMENT;
src_handle.hnd_result = KERN_OK;
vm_region_write_kernel(
src_region,
src_handle_addr,
sizeof src_handle,
&src_handle,
NULL);
break;
}
switch (src_handle.hnd_mode) {
case KERN_MSG_HANDLE_IGNORE:
break;
case KERN_MSG_HANDLE_MOVE:
status = handle_table_alloc_handle(
dst,
&dst_entry,
&dst_value);
if (status != KERN_OK) {
break;
}
dst_entry->h_object = src_entry->h_object;
dst_entry->h_flags = src_entry->h_flags;
object_add_handle(dst_entry->h_object);
handle_table_free_handle(src, src_handles[i].hnd_value);
dst_handle.hnd_mode = src_handles[i].hnd_mode;
dst_handle.hnd_value = dst_value;
dst_handle.hnd_result = KERN_OK;
break;
case KERN_MSG_HANDLE_COPY:
status = handle_table_alloc_handle(
dst,
&dst_entry,
&dst_value);
if (status != KERN_OK) {
break;
}
dst_entry->h_object = src_entry->h_object;
dst_entry->h_flags = src_entry->h_flags;
object_add_handle(dst_entry->h_object);
dst_handle.hnd_mode = src_handles[i].hnd_mode;
dst_handle.hnd_value = dst_value;
dst_handle.hnd_result = KERN_OK;
break;
default:
status = KERN_INVALID_ARGUMENT;
break;
}
src_handle.hnd_result = status;
vm_region_write_kernel(
src_region,
src_handle_addr,
sizeof src_handle,
&src_handle,
NULL);
vm_region_write_kernel(
dst_region,
dst_handle_addr,
sizeof dst_handle,
&dst_handle,
NULL);
}
for (; i < src_handles_count; i++) {
kern_msg_handle_t handle = {0};
virt_addr_t handle_addr
= (virt_addr_t)src_handles + (i * sizeof handle);
vm_region_read_kernel(
src_region,
handle_addr,
sizeof handle,
&handle,
NULL);
if (handle.hnd_mode != KERN_MSG_HANDLE_MOVE) {
continue;
}
struct handle *src_entry
= handle_table_get_handle(src, handle.hnd_value);
if (src_entry) {
object_remove_handle(src_entry->h_object);
handle_table_free_handle(src, handle.hnd_value);
}
}
return status;
}