diff --git a/kernel/bsp.c b/kernel/bsp.c index 20020d8..4ef6569 100644 --- a/kernel/bsp.c +++ b/kernel/bsp.c @@ -102,7 +102,6 @@ static kern_status_t map_executable( 5, bsp->bsp_trailer.bsp_data_size, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_USER); - /* TODO copy .data from executable to memory */ if (!data) { return KERN_NO_MEMORY; } @@ -115,6 +114,19 @@ static kern_status_t map_executable( off_t text_voffset = bsp->bsp_trailer.bsp_text_vaddr; off_t data_voffset = bsp->bsp_trailer.bsp_data_vaddr; +#if 0 + size_t tmp = 0; + status = vm_object_copy( + data, + 0, + bsp->bsp_vmo, + bsp->bsp_trailer.bsp_data_faddr, + bsp->bsp_trailer.bsp_data_size, + &tmp); + + tracek("read %zuB of data from executable", tmp); +#endif + tracek("text_foffset=%06llx, data_foffset=%06llx", text_foffset, data_foffset); diff --git a/vm/vm-object.c b/vm/vm-object.c index ca45f81..4be48a7 100644 --- a/vm/vm-object.c +++ b/vm/vm-object.c @@ -5,14 +5,130 @@ #define VM_OBJECT_CAST(p) \ OBJECT_C_CAST(struct vm_object, vo_base, &vm_object_type, p) +#define PAGE_ALIGN_DOWN(p) ((p) & ~VM_PAGE_MASK) +#define PAGE_ALIGN_UP(p) \ + if ((p) & VM_PAGE_MASK) { \ + (p) &= ~VM_PAGE_MASK; \ + (p) += VM_PAGE_SIZE; \ + } + static struct object_type vm_object_type = { .ob_name = "vm-object", .ob_size = sizeof(struct vm_object), .ob_header_offset = offsetof(struct vm_object, vo_base), }; +struct object_iterator { + struct vm_object *it_obj; + off_t it_offset; + struct vm_page *it_pg; + bool it_alloc; + + void *it_buf; + size_t it_max; +}; + static const enum vm_page_order GLOBAL_PAGE_ORDER = VM_PAGE_4K; +static kern_status_t object_iterator_begin( + struct object_iterator *it, + struct vm_object *obj, + bool alloc) +{ + memset(it, 0x0, sizeof *it); + + it->it_obj = obj; + it->it_alloc = alloc; + + if (alloc) { + it->it_pg = vm_object_alloc_page(obj, 0, VM_PAGE_4K); + + if (!it->it_pg) { + return KERN_NO_MEMORY; + } + } else { + it->it_pg = vm_object_get_page(obj, 0); + } + + if (it->it_pg) { + it->it_buf = vm_page_get_vaddr(it->it_pg); + it->it_max = vm_page_get_size_bytes(it->it_pg); + } else { + struct btree_node *n = btree_first(&obj->vo_pages); + struct vm_page *pg + = BTREE_CONTAINER(struct vm_page, p_bnode, n); + it->it_buf = NULL; + it->it_max = pg ? pg->p_vmo_offset : obj->vo_size; + } + + return KERN_OK; +} + +static kern_status_t object_iterator_seek( + struct object_iterator *it, + size_t nr_bytes) +{ + if (nr_bytes < it->it_max) { + it->it_offset += nr_bytes; + it->it_buf = (char *)it->it_buf + nr_bytes; + it->it_max -= nr_bytes; + return KERN_OK; + } + + it->it_offset += nr_bytes; + if (it->it_offset >= it->it_obj->vo_size) { + it->it_buf = NULL; + it->it_max = 0; + return KERN_OK; + } + + if (it->it_alloc) { + it->it_pg = vm_object_alloc_page( + it->it_obj, + it->it_offset, + VM_PAGE_4K); + + if (!it->it_pg) { + return KERN_NO_MEMORY; + } + } else { + it->it_pg = vm_object_get_page(it->it_obj, it->it_offset); + } + + if (it->it_pg) { + it->it_buf = vm_page_get_vaddr(it->it_pg); + it->it_max = vm_page_get_size_bytes(it->it_pg); + } else { + struct btree_node *n = btree_first(&it->it_obj->vo_pages); + struct vm_page *pg + = BTREE_CONTAINER(struct vm_page, p_bnode, n); + while (pg) { + if (pg->p_vmo_offset >= it->it_offset) { + break; + } + + n = btree_next(n); + pg = BTREE_CONTAINER(struct vm_page, p_bnode, n); + } + + it->it_buf = NULL; + it->it_max = pg ? pg->p_vmo_offset + : it->it_obj->vo_size - it->it_offset; + } + + return KERN_OK; +} + +kern_status_t vm_object_type_init(void) +{ + return object_type_register(&vm_object_type); +} + +struct vm_object *vm_object_cast(struct object *obj) +{ + return VM_OBJECT_CAST(obj); +} + static void put_page( struct vm_object *vmo, struct vm_page *new_page, @@ -58,11 +174,6 @@ static void put_page( } } -kern_status_t vm_object_type_init(void) -{ - return object_type_register(&vm_object_type); -} - enum vm_page_order vm_object_global_page_order(void) { return GLOBAL_PAGE_ORDER; @@ -216,3 +327,444 @@ extern struct vm_page *vm_object_alloc_page( return NULL; } + +#if 0 +/* read data from a vm-object, where [offset, offset+count] is confined to + * a single page */ +static kern_status_t read_data_onepage( + struct vm_object *vo, + void *dst, + off_t offset, + size_t count, + size_t *nr_read) +{ + off_t offset_aligned = PAGE_ALIGN_DOWN(offset); + off_t page_offset = offset - offset_aligned; + + if (nr_read) { + *nr_read = count; + } + + struct vm_page *pg = vm_object_get_page(vo, offset_aligned); + if (!pg) { + memset(dst, 0x0, count); + return KERN_OK; + } + + const char *page_data = (const char *)vm_page_get_vaddr(pg); + const char *src = page_data + page_offset; + memcpy(dst, src, count); + + return KERN_OK; +} + +/* where offset is not aligned to a page boundary, read data from offset until + * the end of the page */ +static kern_status_t read_data_header( + struct vm_object *vo, + void **dstp, + off_t *offset, + size_t *count, + size_t *nr_read) +{ + void *dst = *dstp; + off_t offset_aligned = PAGE_ALIGN_DOWN(*offset); + off_t page_offset = *offset - offset_aligned; + struct vm_page *pg = vm_object_get_page(vo, offset_aligned); + + size_t to_read = VM_PAGE_SIZE - page_offset; + + if (pg) { + const char *src = (const char *)vm_page_get_vaddr(pg); + memcpy(dst, src, to_read); + } else { + memset(dst, 0x0, to_read); + } + + *dstp = (char *)dst + to_read; + *offset += to_read; + *count -= to_read; + *nr_read += to_read; + + return KERN_OK; +} + +/* read data from a vm-object, where the start and end of the read are aligned + * to page boundaries, and at least one page's worth of data is being read */ +static kern_status_t read_data( + struct vm_object *vo, + void **dstp, + off_t *offset, + size_t *count, + size_t *nr_read) +{ + off_t offset_unaligned = *offset; + + off_t offset_aligned = offset_unaligned; + size_t count_aligned = *count; + offset_aligned = PAGE_ALIGN_DOWN(offset_unaligned); + count_aligned = PAGE_ALIGN_DOWN(*count); + + char *dst = (char *)*dstp + (offset_unaligned - offset_aligned); + struct vm_page *pg = NULL; + + for (size_t i = 0; i < count_aligned; i += VM_PAGE_SIZE) { + pg = vm_object_get_page(vo, offset_aligned + i); + + if (pg) { + const char *src = (const char *)vm_page_get_vaddr(pg); + memcpy(dst + i, src, VM_PAGE_SIZE); + } else { + memset(dst + i, 0x0, VM_PAGE_SIZE); + } + } + + *dstp = dst + count_aligned; + *offset = offset_aligned + count_aligned; + *count = *count - count_aligned; + *nr_read += count_aligned; + + return KERN_OK; +} + +/* where offset+count is not aligned to a page boundary, read data from the + * start of the last page up to offset+count. */ +static kern_status_t read_data_trailer( + struct vm_object *vo, + void **dstp, + off_t *offset, + size_t *count, + size_t *nr_read) +{ + void *dst = *dstp; + off_t limit = *offset + *count; + off_t limit_aligned = PAGE_ALIGN_DOWN(limit); + struct vm_page *pg = vm_object_get_page(vo, limit_aligned); + + size_t to_read = limit - limit_aligned; + + if (pg) { + const char *src = (const char *)vm_page_get_vaddr(pg); + memcpy(dst, src, to_read); + } else { + memset(dst, 0x0, to_read); + } + + *dstp = (char *)dst + to_read; + *offset += to_read; + *count -= to_read; + + return KERN_OK; +} +#endif + +kern_status_t vm_object_read( + struct vm_object *vo, + void *dst, + off_t offset, + size_t count, + size_t *nr_read) +{ + if (offset > vo->vo_size) { + if (nr_read) { + *nr_read = 0; + } + + return KERN_OK; + } + + struct object_iterator it; + kern_status_t status = object_iterator_begin(&it, vo, false); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_seek(&it, offset); + if (status != KERN_OK) { + return status; + } + + size_t r = 0; + char *p = dst; + + while (it.it_max && r < count && status == KERN_OK) { + size_t remaining = count - r; + size_t to_copy = MIN(it.it_max, remaining); + + if (it.it_buf) { + memcpy(p, it.it_buf, to_copy); + } else { + memset(p, 0x0, to_copy); + } + + r += to_copy; + p += to_copy; + + status = object_iterator_seek(&it, to_copy); + } + + if (nr_read) { + *nr_read = r; + } + + return KERN_OK; +} + +#if 0 + +/* write data from a vm-object, where [offset, offset+count] is confined to + * a single page */ +static kern_status_t write_data_onepage( + struct vm_object *vo, + const void *src, + off_t offset, + size_t count, + size_t *nr_written) +{ + off_t offset_aligned = PAGE_ALIGN_DOWN(offset); + off_t page_offset = offset - offset_aligned; + + if (nr_written) { + *nr_written = count; + } + + struct vm_page *pg + = vm_object_alloc_page(vo, offset_aligned, VM_PAGE_4K); + if (!pg) { + return KERN_NO_MEMORY; + } + + char *page_data = (char *)vm_page_get_vaddr(pg); + char *dst = page_data + page_offset; + memcpy(dst, src, count); + + return KERN_OK; +} + +/* where offset is not aligned to a page boundary, write data from offset until + * the end of the page */ +static kern_status_t write_data_header( + struct vm_object *vo, + const void **srcp, + off_t *offset, + size_t *count, + size_t *nr_written) +{ + const void *src = *srcp; + off_t offset_aligned = PAGE_ALIGN_DOWN(*offset); + off_t page_offset = *offset - offset_aligned; + struct vm_page *pg + = vm_object_alloc_page(vo, offset_aligned, VM_PAGE_4K); + + if (!pg) { + return KERN_NO_MEMORY; + } + + size_t to_write = VM_PAGE_SIZE - page_offset; + + char *dst = (char *)vm_page_get_vaddr(pg); + memcpy(dst, src, to_write); + + *srcp = (const char *)src + to_write; + *offset += to_write; + *count -= to_write; + + if (nr_written) { + *nr_written += to_write; + } + + return KERN_OK; +} + +/* write data from a vm-object, where the start and end of the write are aligned + * to page boundaries, and at least one page's worth of data is being write */ +static kern_status_t write_data( + struct vm_object *vo, + const void **srcp, + off_t *offset, + size_t *count, + size_t *nr_written) +{ + off_t offset_unaligned = *offset; + + off_t offset_aligned = PAGE_ALIGN_DOWN(offset_unaligned); + size_t count_aligned = PAGE_ALIGN_DOWN(*count); + + const char *src + = (const char *)*srcp + (offset_unaligned - offset_aligned); + struct vm_page *pg = NULL; + + for (size_t i = 0; i < count_aligned; i += VM_PAGE_SIZE) { + pg = vm_object_alloc_page(vo, offset_aligned + i, VM_PAGE_4K); + if (!pg) { + return KERN_NO_MEMORY; + } + + char *dst = (char *)vm_page_get_vaddr(pg); + memcpy(dst, src + i, VM_PAGE_SIZE); + } + + *srcp = src + count_aligned; + *offset = offset_aligned + count_aligned; + *count = *count - count_aligned; + + if (nr_written) { + *nr_written += count_aligned; + } + + return KERN_OK; +} + +/* where offset+count is not aligned to a page boundary, write data from the + * start of the last page up to offset+count. */ +static kern_status_t write_data_trailer( + struct vm_object *vo, + const void **srcp, + off_t *offset, + size_t *count, + size_t *nr_written) +{ + const void *src = *srcp; + off_t limit = *offset + *count; + off_t limit_aligned = PAGE_ALIGN_DOWN(limit); + struct vm_page *pg + = vm_object_alloc_page(vo, limit_aligned, VM_PAGE_4K); + if (!pg) { + return KERN_NO_MEMORY; + } + + size_t to_write = limit - limit_aligned; + + char *dst = (char *)vm_page_get_vaddr(pg); + memcpy(dst, src, to_write); + + *srcp = (const char *)src + to_write; + *offset += to_write; + *count -= to_write; + + if (nr_written) { + *nr_written += to_write; + } + + return KERN_OK; +} +#endif + +kern_status_t vm_object_write( + struct vm_object *vo, + const void *src, + off_t offset, + size_t count, + size_t *nr_written) +{ + if (offset > vo->vo_size) { + if (nr_written) { + *nr_written = 0; + } + + return KERN_OK; + } + + struct object_iterator it; + kern_status_t status = object_iterator_begin(&it, vo, true); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_seek(&it, offset); + if (status != KERN_OK) { + return status; + } + + size_t w = 0; + const char *p = src; + + while (it.it_max && w < count && status == KERN_OK) { + size_t remaining = count - w; + size_t to_copy = MIN(it.it_max, remaining); + + memcpy(it.it_buf, p, to_copy); + + w += to_copy; + p += to_copy; + + status = object_iterator_seek(&it, to_copy); + } + + if (nr_written) { + *nr_written = w; + } + + return status; +} + +kern_status_t vm_object_copy( + struct vm_object *dst_object, + off_t dst_offset, + struct vm_object *src_object, + off_t src_offset, + size_t count, + size_t *nr_copied) +{ + if (dst_offset > dst_object->vo_size + || src_offset > src_object->vo_size) { + if (nr_copied) { + *nr_copied = 0; + } + + return KERN_OK; + } + + kern_status_t status; + struct object_iterator src, dst; + status = object_iterator_begin(&src, src_object, false); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_begin(&dst, dst_object, true); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_seek(&src, src_offset); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_seek(&dst, dst_offset); + if (status != KERN_OK) { + return status; + } + + size_t copied = 0; + + while (src.it_max && dst.it_max && copied < count) { + size_t remaining = count - copied; + size_t to_copy = MIN(MIN(src.it_max, dst.it_max), remaining); + + if (src.it_buf) { + memcpy(dst.it_buf, src.it_buf, to_copy); + } else { + memcpy(dst.it_buf, 0x0, to_copy); + } + + copied += to_copy; + + status = object_iterator_seek(&src, to_copy); + if (status != KERN_OK) { + return status; + } + + status = object_iterator_seek(&dst, to_copy); + if (status != KERN_OK) { + return status; + } + } + + if (nr_copied) { + *nr_copied = copied; + } + + return KERN_OK; +}