2023-02-17 19:36:14 +00:00
|
|
|
#include <socks/object.h>
|
|
|
|
|
#include <socks/queue.h>
|
|
|
|
|
#include <socks/locks.h>
|
|
|
|
|
|
2023-02-25 19:12:09 +00:00
|
|
|
#define HAS_OP(obj, opname) ((obj)->ob_type->ob_ops.opname)
|
2023-02-17 19:36:14 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
static struct queue object_types;
|
2023-02-17 19:36:14 +00:00
|
|
|
static spin_lock_t object_types_lock = SPIN_LOCK_INIT;
|
|
|
|
|
|
|
|
|
|
kern_status_t object_bootstrap(void)
|
|
|
|
|
{
|
|
|
|
|
init_set_objects();
|
2023-06-02 19:34:33 +01:00
|
|
|
init_link_objects();
|
2023-02-17 19:36:14 +00:00
|
|
|
init_global_namespace();
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_type_register(struct object_type *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&object_types_lock, &flags);
|
|
|
|
|
queue_push_back(&object_types, &p->ob_list);
|
|
|
|
|
spin_unlock_irqrestore(&object_types_lock, flags);
|
|
|
|
|
|
|
|
|
|
p->ob_cache.c_name = p->ob_name;
|
2023-05-06 19:48:14 +01:00
|
|
|
p->ob_cache.c_obj_size = p->ob_size;
|
2023-02-17 19:36:14 +00:00
|
|
|
p->ob_cache.c_page_order = VM_PAGE_16K;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
vm_cache_init(&p->ob_cache);
|
|
|
|
|
p->ob_flags |= OBJTYPE_INIT;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_type_unregister(struct object_type *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&object_types_lock, &flags);
|
|
|
|
|
queue_delete(&object_types, &p->ob_list);
|
|
|
|
|
spin_unlock_irqrestore(&object_types_lock, flags);
|
|
|
|
|
|
|
|
|
|
return KERN_OK;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_create(struct object_type *type)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
if (!(type->ob_flags & OBJTYPE_INIT)) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct vm_cache *cache = &type->ob_cache;
|
2023-05-06 22:22:05 +01:00
|
|
|
void *obj_buf = vm_cache_alloc(cache, 0);
|
|
|
|
|
if (!obj_buf) {
|
2023-02-17 19:36:14 +00:00
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-06 22:22:05 +01:00
|
|
|
memset(obj_buf, 0x00, type->ob_size);
|
|
|
|
|
|
|
|
|
|
struct object *obj = (struct object *)((unsigned char *)obj_buf + type->ob_header_offset);
|
|
|
|
|
|
2023-02-25 19:12:09 +00:00
|
|
|
obj->ob_type = type;
|
2023-02-17 19:36:14 +00:00
|
|
|
obj->ob_lock = SPIN_LOCK_INIT;
|
|
|
|
|
obj->ob_magic = OBJECT_MAGIC;
|
|
|
|
|
obj->ob_refcount = 1;
|
|
|
|
|
obj->ob_handles = 0;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_ref(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
obj->ob_refcount++;
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void object_deref(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&obj->ob_lock, &flags);
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-17 19:36:14 +00:00
|
|
|
if (obj->ob_refcount == 0) {
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
obj->ob_refcount--;
|
|
|
|
|
|
|
|
|
|
if (obj->ob_refcount > 0) {
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-20 20:41:39 +00:00
|
|
|
if (HAS_OP(obj, destroy)) {
|
|
|
|
|
obj->ob_type->ob_ops.destroy(obj);
|
2023-02-17 19:36:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vm_cache_free(&obj->ob_type->ob_cache, obj);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-02 19:34:33 +01:00
|
|
|
void object_lock(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
spin_lock(&obj->ob_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void object_unlock(struct object *obj)
|
|
|
|
|
{
|
|
|
|
|
spin_unlock(&obj->ob_lock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void object_lock_irqsave(struct object *obj, unsigned long *flags)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
|
|
|
|
spin_lock_irqsave(&obj->ob_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-02 19:34:33 +01:00
|
|
|
void object_unlock_irqrestore(struct object *obj, unsigned long flags)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
|
|
|
|
spin_unlock_irqrestore(&obj->ob_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
void *object_data(struct object *obj)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
|
|
|
|
return (char *)obj + sizeof *obj;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *object_header(void *p)
|
2023-02-17 19:36:14 +00:00
|
|
|
{
|
2023-04-12 20:17:11 +01:00
|
|
|
struct object *obj = (struct object *)((char *)p - sizeof *obj);
|
2023-02-17 19:36:14 +00:00
|
|
|
if (obj->ob_magic != OBJECT_MAGIC) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
|
}
|
2023-02-25 17:57:53 +00:00
|
|
|
|
2023-05-14 21:11:32 +01:00
|
|
|
kern_status_t object_read(struct object *obj, void *p, size_t max,
|
|
|
|
|
size_t *nr_read, socks_flags_t flags)
|
|
|
|
|
{
|
|
|
|
|
kern_status_t status = KERN_UNSUPPORTED;
|
|
|
|
|
|
|
|
|
|
if (obj->ob_type->ob_ops.read) {
|
|
|
|
|
status = obj->ob_type->ob_ops.read(obj, p, &max, flags);
|
|
|
|
|
} else {
|
|
|
|
|
max = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (nr_read) {
|
|
|
|
|
*nr_read = max;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
kern_status_t object_write(struct object *obj, const void *p, size_t max,
|
|
|
|
|
size_t *nr_written, socks_flags_t flags)
|
|
|
|
|
{
|
|
|
|
|
kern_status_t status = KERN_UNSUPPORTED;
|
|
|
|
|
|
|
|
|
|
if (obj->ob_type->ob_ops.write) {
|
|
|
|
|
status = obj->ob_type->ob_ops.write(obj, p, &max, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_get_child_named(struct object *obj, const char *name, struct object **out)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
2023-02-25 19:12:09 +00:00
|
|
|
kern_status_t status = KERN_UNSUPPORTED;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-25 17:57:53 +00:00
|
|
|
if (HAS_OP(obj, get_named)) {
|
2023-02-25 19:12:09 +00:00
|
|
|
status = obj->ob_type->ob_ops.get_named(obj, name, out);
|
2023-02-25 17:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-25 19:12:09 +00:00
|
|
|
return status;
|
2023-02-25 17:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_get_child_at(struct object *obj, size_t at, struct object **out)
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
2023-02-25 19:12:09 +00:00
|
|
|
kern_status_t status = KERN_UNSUPPORTED;
|
2023-03-20 20:41:39 +00:00
|
|
|
|
2023-02-25 17:57:53 +00:00
|
|
|
if (HAS_OP(obj, get_at)) {
|
2023-02-25 19:12:09 +00:00
|
|
|
status = obj->ob_type->ob_ops.get_at(obj, at, out);
|
2023-02-25 17:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-25 19:12:09 +00:00
|
|
|
return status;
|
2023-02-25 17:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
2023-04-12 20:17:11 +01:00
|
|
|
kern_status_t object_query_name(struct object *obj, char name[OBJECT_NAME_MAX])
|
2023-02-25 17:57:53 +00:00
|
|
|
{
|
|
|
|
|
if (HAS_OP(obj, query_name)) {
|
2023-02-25 19:12:09 +00:00
|
|
|
return obj->ob_type->ob_ops.query_name(obj, name);
|
2023-02-25 17:57:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return KERN_UNSUPPORTED;
|
|
|
|
|
}
|