#include "btree.h" #include "file.h" #include "interface.h" #include "mapping.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #define TEMP_OBJECT_SIZE 0x10000 BTREE_DEFINE_SIMPLE_GET(struct fs_file, unsigned long, f_node, f_id, get_file); BTREE_DEFINE_SIMPLE_INSERT(struct fs_file, f_node, f_id, put_file); struct fs_context { struct fs_superblock *ctx_sb; struct fs_allocator *ctx_alloc; struct btree ctx_filelist; kern_handle_t ctx_vm_controller; kern_handle_t ctx_channel; kern_handle_t ctx_temp_object; void *ctx_temp_object_buf; struct fs_vtable ctx_vtable; }; struct fs_context *fs_context_create(struct fs_allocator *alloc) { kern_handle_t self, address_space; task_self(&self); task_get_address_space(self, &address_space); kern_handle_close(self); struct fs_context *ctx = fs_alloc(alloc, sizeof *ctx); if (!ctx) { return NULL; } memset(ctx, 0x0, sizeof *ctx); kern_status_t status = vm_controller_create(&ctx->ctx_vm_controller); if (status != KERN_OK) { fs_free(alloc, ctx); return NULL; } status = vm_object_create( NULL, 0, TEMP_OBJECT_SIZE, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER, &ctx->ctx_temp_object); if (status != KERN_OK) { kern_handle_close(ctx->ctx_vm_controller); fs_free(alloc, ctx); return NULL; } virt_addr_t temp_buffer; status = address_space_map( address_space, MAP_ADDRESS_ANY, ctx->ctx_temp_object, 0, TEMP_OBJECT_SIZE, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER, &temp_buffer); if (status != KERN_OK) { kern_handle_close(ctx->ctx_temp_object); kern_handle_close(ctx->ctx_vm_controller); fs_free(alloc, ctx); return NULL; } ctx->ctx_temp_object_buf = (void *)temp_buffer; ctx->ctx_alloc = alloc; ctx->ctx_vtable.open = fs_msg_open; ctx->ctx_vtable.close = fs_msg_close; ctx->ctx_vtable.read = fs_msg_read; ctx->ctx_vtable.write = fs_msg_write; ctx->ctx_vtable.map = fs_msg_map; return ctx; } void fs_context_destroy(struct fs_context *ctx) { fs_free(ctx->ctx_alloc, ctx); } enum fs_status fs_context_mount_filesystem( struct fs_context *ctx, fs_mount_function_t func, void *arg, enum fs_mount_flags flags) { if (!func) { return FS_ERR_INVALID_ARGUMENT; } struct fs_superblock *sb = NULL; enum fs_status status = func(ctx, arg, flags, &sb); if (status != FS_SUCCESS) { return status; } if (!sb) { return FS_ERR_INTERNAL_FAILURE; } ctx->ctx_sb = sb; return FS_SUCCESS; } enum fs_status fs_context_unmount_filesystem(struct fs_context *ctx) { return FS_ERR_NOT_IMPLEMENTED; } void fs_context_set_channel(struct fs_context *ctx, kern_handle_t channel) { ctx->ctx_channel = channel; } kern_handle_t fs_context_get_vm_controller(const struct fs_context *ctx) { return ctx->ctx_vm_controller; } static enum fs_status handle_msg(struct fs_context *ctx) { xpc_msg_t msg; kern_status_t status = xpc_msg_recv_nowait(ctx->ctx_channel, &msg); if (status == KERN_NO_ENTRY) { return FS_SUCCESS; } if (status != KERN_OK) { kern_logf("message recv error %d", status); return FS_ERR_INTERNAL_FAILURE; } switch (msg.msg_header.hdr_interface) { case INTERFACE_FS: status = fs_context_dispatch_msg(ctx, &msg); break; default: kern_logf( "unknown message protocol %u", msg.msg_header.hdr_interface); xpc_msg_reply_error(&msg, KERN_UNSUPPORTED); break; } return FS_SUCCESS; } static enum fs_status handle_page_request(struct fs_context *ctx) { equeue_packet_page_request_t packet; vm_controller_recv(ctx->ctx_vm_controller, &packet); struct file_mapping *mapping = (struct file_mapping *)packet.req_vmo; kern_logf( "received page request [%zx-%zx] for file %s", packet.req_offset, packet.req_offset + packet.req_length, mapping->m_file->f_dent->d_name); size_t length = packet.req_length; if (length > TEMP_OBJECT_SIZE) { length = TEMP_OBJECT_SIZE; } char *dst = ctx->ctx_temp_object_buf; xpc_buffer_t buf = XPC_LOCAL_BUFFER_OUT(dst, TEMP_OBJECT_SIZE); enum fs_status status = fs_file_read_at( mapping->m_file, &buf, mapping->m_file_offset + packet.req_offset, length); if (status != FS_SUCCESS) { kern_logf("map-read failed with code %d", status); return status; } vm_controller_supply_pages( ctx->ctx_vm_controller, mapping->m_vmo, packet.req_offset, ctx->ctx_temp_object, 0, packet.req_length); return FS_SUCCESS; } enum fs_status fs_context_handle_request(struct fs_context *ctx) { kern_wait_item_t waiters[] = { { .w_handle = ctx->ctx_channel, .w_waitfor = CHANNEL_SIGNAL_MSG_RECEIVED, }, { .w_handle = ctx->ctx_vm_controller, .w_waitfor = VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED, }, }; const size_t nr_waiters = sizeof waiters / sizeof waiters[0]; kern_status_t kstatus = kern_object_wait(waiters, nr_waiters); if (kstatus != KERN_OK) { return FS_ERR_INTERNAL_FAILURE; } if (waiters[0].w_observed & CHANNEL_SIGNAL_MSG_RECEIVED) { return handle_msg(ctx); } if (waiters[1].w_observed & VM_CONTROLLER_SIGNAL_REQUEST_RECEIVED) { return handle_page_request(ctx); } return FS_SUCCESS; } struct fs_file *fs_context_open_file(struct fs_context *ctx, unsigned long id) { struct fs_file *f = get_file(&ctx->ctx_filelist, id); if (!f) { f = fs_alloc(ctx->ctx_alloc, sizeof *f); if (!f) { return NULL; } memset(f, 0x0, sizeof *f); f->f_id = id; put_file(&ctx->ctx_filelist, f); } return f; } struct fs_file *fs_context_get_file(struct fs_context *ctx, unsigned long id) { return get_file(&ctx->ctx_filelist, id); } void fs_context_close_file(struct fs_context *ctx, struct fs_file *f) { } static size_t get_first_path_component(const char *in, char *out, size_t max) { size_t i = 0; while (i < max - 1) { if (in[i] == '\0' || in[i] == '/') { break; } out[i] = in[i]; i++; } out[i] = '\0'; return i; } extern enum fs_status fs_context_resolve_path( struct fs_context *ctx, const char *path, struct fs_dentry **out) { if (!ctx->ctx_sb || !ctx->ctx_sb->s_root) { return FS_ERR_NO_ENTRY; } struct fs_dentry *cur = ctx->ctx_sb->s_root; char tok[256]; while (*path != '\0') { while (*path == '/') { path++; } size_t tok_len = get_first_path_component(path, tok, sizeof tok); if (!tok_len) { break; } bool is_dir = *(path + tok_len) != '\0'; if (cur->d_inode->i_mode != FS_INODE_DIR) { return FS_ERR_NOT_DIRECTORY; } struct fs_dentry *next = NULL; enum fs_status status = fs_inode_lookup(cur->d_inode, tok, &next); if (status != FS_SUCCESS) { return status; } if (!next) { return FS_ERR_INTERNAL_FAILURE; } cur = next; path += tok_len; } *out = cur; return FS_SUCCESS; } kern_status_t fs_context_dispatch_msg(struct fs_context *ctx, xpc_msg_t *msg) { return fs_dispatch(NULL, msg, &ctx->ctx_vtable, ctx); } void *fs_context_alloc(struct fs_context *ctx, size_t count) { return fs_alloc(ctx->ctx_alloc, count); } void *fs_context_calloc(struct fs_context *ctx, size_t count, size_t sz) { return fs_calloc(ctx->ctx_alloc, count, sz); } void *fs_context_realloc(struct fs_context *ctx, void *p, size_t count) { return fs_realloc(ctx->ctx_alloc, p, count); } void fs_context_free(struct fs_context *ctx, void *p) { fs_free(ctx->ctx_alloc, p); }