2026-02-19 18:54:48 +00:00
|
|
|
#include <kernel/locks.h>
|
|
|
|
|
#include <kernel/object.h>
|
|
|
|
|
#include <kernel/queue.h>
|
2023-02-17 19:36:14 +00:00
|
|
|
|
2023-02-25 19:12:09 +00:00
|
|
|
#define HAS_OP(obj, opname) ((obj)->ob_type->ob_ops.opname)
|
2023-02-17 19:36:14 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct queue object_types;
|
2023-02-17 19:36:14 +00:00
|
|
|
static spin_lock_t object_types_lock = SPIN_LOCK_INIT;
|
|
|
|
|
|
2026-02-23 18:31:28 +00:00
|
|
|
static koid_t koid_alloc(void)
|
|
|
|
|
{
|
|
|
|
|
static koid_t counter = 0;
|
|
|
|
|
static spin_lock_t lock = SPIN_LOCK_INIT;
|
|
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&lock, &flags);
|
|
|
|
|
koid_t result = counter;
|
|
|
|
|
counter++;
|
|
|
|
|
spin_unlock_irqrestore(&lock, flags);
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
kern_status_t object_bootstrap(void)
|
|
|
|
|
{
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_type_register(struct object_type *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&object_types_lock, &flags);
|
|
|
|
|
queue_push_back(&object_types, &p->ob_list);
|
|
|
|
|
spin_unlock_irqrestore(&object_types_lock, flags);
|
|
|
|
|
|
|
|
|
|
p->ob_cache.c_name = p->ob_name;
|
2023-05-06 19:48:14 +01:00
|
|
|
p->ob_cache.c_obj_size = p->ob_size;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
vm_cache_init(&p->ob_cache);
|
|
|
|
|
p->ob_flags |= OBJTYPE_INIT;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_type_unregister(struct object_type *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&object_types_lock, &flags);
|
|
|
|
|
queue_delete(&object_types, &p->ob_list);
|
|
|
|
|
spin_unlock_irqrestore(&object_types_lock, flags);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_create(struct object_type *type)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
if (!(type->ob_flags & OBJTYPE_INIT)) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_cache *cache = &type->ob_cache;
|
2023-05-06 22:22:05 +01:00
|
|
|
void *obj_buf = vm_cache_alloc(cache, 0);
|
|
|
|
|
if (!obj_buf) {
|
2023-02-17 19:36:14 +00:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2025-05-20 23:13:12 +01:00
|
|
|
struct object *obj = (struct object *)((unsigned char *)obj_buf
|
|
|
|
|
+ type->ob_header_offset);
|
2023-05-06 22:22:05 +01:00
|
|
|
|
2026-02-23 18:31:28 +00:00
|
|
|
obj->ob_id = koid_alloc();
|
2023-02-25 19:12:09 +00:00
|
|
|
obj->ob_type = type;
|
2023-02-17 19:36:14 +00:00
|
|
|
obj->ob_lock = SPIN_LOCK_INIT;
|
|
|
|
|
obj->ob_magic = OBJECT_MAGIC;
|
|
|
|
|
obj->ob_refcount = 1;
|
|
|
|
|
obj->ob_handles = 0;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_ref(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
obj->ob_refcount++;
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 18:34:12 +00:00
|
|
|
static void __cleanup(struct object *obj, struct queue *queue)
|
|
|
|
|
{
|
|
|
|
|
if (HAS_OP(obj, destroy)) {
|
|
|
|
|
obj->ob_type->ob_ops.destroy(obj, queue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vm_cache_free(&obj->ob_type->ob_cache, obj);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:37:08 +00:00
|
|
|
static void object_cleanup(struct object *obj, unsigned long flags)
|
|
|
|
|
{
|
|
|
|
|
if (obj->ob_refcount > 0 || obj->ob_handles > 0) {
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 18:34:12 +00:00
|
|
|
struct queue queue = QUEUE_INIT;
|
|
|
|
|
__cleanup(obj, &queue);
|
|
|
|
|
|
|
|
|
|
if (!HAS_OP(obj, destroy_recurse)) {
|
|
|
|
|
return;
|
2026-02-08 12:37:08 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 18:34:12 +00:00
|
|
|
while (!queue_empty(&queue)) {
|
|
|
|
|
struct queue_entry *entry = queue_pop_front(&queue);
|
|
|
|
|
struct object *child = NULL;
|
|
|
|
|
obj->ob_type->ob_ops.destroy_recurse(entry, &child);
|
|
|
|
|
if (child) {
|
|
|
|
|
__cleanup(child, &queue);
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-08 12:37:08 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:36:32 +00:00
|
|
|
void object_unref(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&obj->ob_lock, &flags);
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
if (obj->ob_refcount == 0) {
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
obj->ob_refcount--;
|
2026-02-08 12:37:08 +00:00
|
|
|
object_cleanup(obj, flags);
|
|
|
|
|
}
|
2023-02-17 19:36:14 +00:00
|
|
|
|
2026-02-08 12:37:08 +00:00
|
|
|
void object_add_handle(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
obj->ob_handles++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void object_remove_handle(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&obj->ob_lock, &flags);
|
|
|
|
|
|
|
|
|
|
if (obj->ob_handles == 0) {
|
2023-02-17 19:36:14 +00:00
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-08 12:37:08 +00:00
|
|
|
obj->ob_handles--;
|
|
|
|
|
object_cleanup(obj, flags);
|
2023-02-17 19:36:14 +00:00
|
|
|
}
|
|
|
|
|
|
2023-06-02 19:34:33 +01:00
|
|
|
void object_lock(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
spin_lock(&obj->ob_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void object_unlock(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
spin_unlock(&obj->ob_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void object_lock_irqsave(struct object *obj, unsigned long *flags)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
|
|
|
|
spin_lock_irqsave(&obj->ob_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-02 19:34:33 +01:00
|
|
|
void object_unlock_irqrestore(struct object *obj, unsigned long flags)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void *object_data(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
return (char *)obj + sizeof *obj;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_header(void *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *obj = (struct object *)((char *)p - sizeof *obj);
|
2023-02-17 19:36:14 +00:00
|
|
|
if (obj->ob_magic != OBJECT_MAGIC) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|