dev: implement reading from block devices
reading from block devices is done using the block cache (bcache). This cache stores sectors from a block device in pages of memory marked as 'cached', which will allow them to be reclaimed when memory pressure is high (TODO). while block device drivers implement callbacks allowing reading/writing at block-granularity, the device subsystem uses the block cache to implement reading/writing at byte-granularity in a driver-agnostic way. block drivers can disable the block cache for their devices, but this will require that any clients communicate with the devices at block-granularity. also added an offset parameter to device and object read/write functions/callbacks.
This commit is contained in:
287
dev/block.c
287
dev/block.c
@@ -1,4 +1,5 @@
|
||||
#include <socks/device.h>
|
||||
#include <socks/block.h>
|
||||
#include <socks/util.h>
|
||||
#include <socks/printk.h>
|
||||
#include <socks/libc/stdio.h>
|
||||
@@ -24,9 +25,191 @@ struct block_device *block_device_from_generic(struct device *dev)
|
||||
return BLOCK_DEVICE(dev);
|
||||
}
|
||||
|
||||
kern_status_t block_device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
static kern_status_t do_read_blocks(struct block_device *blockdev, void *buf, sectors_t offset, size_t nr_sectors, size_t *sectors_read, socks_flags_t flags)
|
||||
{
|
||||
return KERN_UNIMPLEMENTED;
|
||||
struct device *dev = block_device_base(blockdev);
|
||||
struct iovec vec = { .io_buf = buf, .io_len = nr_sectors * blockdev->b_sector_size };
|
||||
kern_status_t status = blockdev->b_ops->read_blocks(dev, offset, &nr_sectors, &vec, 1, flags);
|
||||
*sectors_read = nr_sectors;
|
||||
return status;
|
||||
}
|
||||
|
||||
extern kern_status_t get_cached_sector(struct block_device *blockdev, sectors_t sector, socks_flags_t flags, void **bufp)
|
||||
{
|
||||
if (blockdev->b_flags & BLOCK_DEVICE_NO_BCACHE) {
|
||||
return KERN_UNSUPPORTED;
|
||||
}
|
||||
|
||||
kern_status_t status = KERN_OK;
|
||||
struct bcache_sector cache_buf;
|
||||
status = bcache_get(&blockdev->b_cache, sector, true, &cache_buf);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
if (!cache_buf.sect_present) {
|
||||
size_t nr_read = 0;
|
||||
/* TODO read all missing blocks in one go */
|
||||
status = do_read_blocks(blockdev, cache_buf.sect_buf, sector, 1, &nr_read, flags);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
|
||||
bcache_mark_present(&cache_buf);
|
||||
}
|
||||
|
||||
*bufp = cache_buf.sect_buf;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t offset, size_t nr_sectors, size_t *sectors_read, socks_flags_t flags)
|
||||
{
|
||||
struct block_device *blockdev = BLOCK_DEVICE(dev);
|
||||
if (!blockdev) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
if (!blockdev->b_ops || !blockdev->b_ops->read_blocks) {
|
||||
return KERN_UNSUPPORTED;
|
||||
}
|
||||
|
||||
if (offset >= blockdev->b_capacity) {
|
||||
*sectors_read = 0;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
if (offset + nr_sectors >= blockdev->b_capacity) {
|
||||
nr_sectors = blockdev->b_capacity - offset;
|
||||
}
|
||||
|
||||
if (!nr_sectors) {
|
||||
*sectors_read = 0;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
if (blockdev->b_flags & BLOCK_DEVICE_NO_BCACHE) {
|
||||
return do_read_blocks(blockdev, buf, offset, nr_sectors, sectors_read, flags);
|
||||
}
|
||||
|
||||
bcache_lock(&blockdev->b_cache);
|
||||
|
||||
size_t nr_read = 0;
|
||||
|
||||
kern_status_t status = KERN_OK;
|
||||
for (sectors_t i = 0; i < nr_sectors; i++) {
|
||||
sectors_t sect = offset + i;
|
||||
void *sect_cache_buf;
|
||||
status = get_cached_sector(blockdev, sect, flags, §_cache_buf);
|
||||
if (status != KERN_OK) {
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*sectors_read = nr_read;
|
||||
return status;
|
||||
}
|
||||
|
||||
char *sect_dest_buf = (char *)buf + (i * blockdev->b_sector_size);
|
||||
memcpy(sect_dest_buf, sect_cache_buf, blockdev->b_sector_size);
|
||||
nr_read++;
|
||||
}
|
||||
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*sectors_read = nr_read;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
{
|
||||
struct block_device *blockdev = BLOCK_DEVICE(dev);
|
||||
if (!blockdev) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
kern_status_t status = KERN_OK;
|
||||
|
||||
if (blockdev->b_flags & BLOCK_DEVICE_NO_BCACHE) {
|
||||
/* no bcache for this device, so the client has to read data at sector granularity. */
|
||||
sectors_t sect_offset = offset / blockdev->b_sector_size;
|
||||
size_t nr_sectors = size / blockdev->b_sector_size;
|
||||
if ((sect_offset * blockdev->b_sector_size != offset) || (nr_sectors * blockdev->b_sector_size != size)) {
|
||||
/* args are not sector-aligned */
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
size_t sectors_read = 0;
|
||||
|
||||
status = block_device_read_blocks(dev, buf, sect_offset, nr_sectors, §ors_read, flags);
|
||||
*bytes_read = sectors_read * blockdev->b_sector_size;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
bcache_lock(&blockdev->b_cache);
|
||||
|
||||
char *dest = buf;
|
||||
|
||||
sectors_t first_sect = offset / blockdev->b_sector_size;
|
||||
sectors_t last_sect = (offset + size) / blockdev->b_sector_size;
|
||||
size_t nr_read = 0;
|
||||
|
||||
if (first_sect * blockdev->b_sector_size < offset) {
|
||||
/* non-sector sized chunk at the start of the buffer. */
|
||||
void *sector_cachebuf;
|
||||
status = get_cached_sector(blockdev, first_sect, flags, §or_cachebuf);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*bytes_read = nr_read;
|
||||
return status;
|
||||
}
|
||||
|
||||
unsigned int in_sect_offset = (offset - (first_sect * blockdev->b_sector_size));
|
||||
unsigned int in_sect_size = MIN(blockdev->b_sector_size - in_sect_offset, size);
|
||||
|
||||
char *p = (char *)sector_cachebuf + in_sect_offset;
|
||||
memcpy(dest, p, in_sect_size);
|
||||
|
||||
dest += in_sect_size;
|
||||
nr_read += in_sect_size;
|
||||
first_sect++;
|
||||
}
|
||||
|
||||
for (sectors_t i = first_sect; i < last_sect; i++) {
|
||||
void *sector_cachebuf;
|
||||
status = get_cached_sector(blockdev, i, flags, §or_cachebuf);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*bytes_read = nr_read;
|
||||
return status;
|
||||
}
|
||||
|
||||
char *p = sector_cachebuf;
|
||||
memcpy(dest, p, blockdev->b_sector_size);
|
||||
dest += blockdev->b_sector_size;
|
||||
nr_read += blockdev->b_sector_size;
|
||||
}
|
||||
|
||||
|
||||
if (last_sect * blockdev->b_sector_size < offset + size && nr_read < size) {
|
||||
/* non-sector sized chunk at the end of the buffer. */
|
||||
void *sector_cachebuf;
|
||||
status = get_cached_sector(blockdev, last_sect, flags, §or_cachebuf);
|
||||
|
||||
if (status != KERN_OK) {
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*bytes_read = nr_read;
|
||||
return status;
|
||||
}
|
||||
|
||||
unsigned int in_sect_size = (offset + size) - (last_sect * blockdev->b_sector_size);
|
||||
|
||||
char *p = sector_cachebuf;
|
||||
memcpy(dest, p, in_sect_size);
|
||||
nr_read += in_sect_size;
|
||||
}
|
||||
|
||||
bcache_unlock(&blockdev->b_cache);
|
||||
*bytes_read = nr_read;
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
static kern_status_t generate_name(struct block_device *dev, char out[DEV_NAME_MAX])
|
||||
@@ -37,13 +220,21 @@ static kern_status_t generate_name(struct block_device *dev, char out[DEV_NAME_M
|
||||
|
||||
kern_status_t block_device_register(struct device *dev)
|
||||
{
|
||||
struct block_device *blockdev = &dev->blk;
|
||||
|
||||
if (!(blockdev->b_flags & BLOCK_DEVICE_NO_BCACHE)) {
|
||||
kern_status_t status = bcache_init(&blockdev->b_cache, blockdev->b_sector_size);
|
||||
if (status != KERN_OK) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&block_device_ids_lock, &flags);
|
||||
unsigned int id = bitmap_lowest_clear(block_device_ids, BLOCK_DEVICE_MAX);
|
||||
bitmap_set(block_device_ids, id);
|
||||
spin_unlock_irqrestore(&block_device_ids_lock, flags);
|
||||
|
||||
struct block_device *blockdev = &dev->blk;
|
||||
blockdev->b_id = id;
|
||||
|
||||
char name[DEV_NAME_MAX];
|
||||
@@ -52,7 +243,7 @@ kern_status_t block_device_register(struct device *dev)
|
||||
snprintf(path, sizeof path, "/dev/block/%s", name);
|
||||
|
||||
char size_string[32];
|
||||
data_size_to_string(blockdev->sector_size * blockdev->capacity, size_string, sizeof size_string);
|
||||
data_size_to_string(blockdev->b_sector_size * blockdev->b_capacity, size_string, sizeof size_string);
|
||||
|
||||
printk("dev: found %s %s block device '%s'", size_string, dev->dev_owner->drv_name, dev->dev_model_name);
|
||||
|
||||
@@ -63,3 +254,91 @@ struct device_type_ops block_type_ops = {
|
||||
.register_device = block_device_register,
|
||||
.read = block_device_read,
|
||||
};
|
||||
|
||||
static BTREE_DEFINE_SIMPLE_GET(struct vm_page, sectors_t, p_bnode, p_blockid, get_block_page)
|
||||
static BTREE_DEFINE_SIMPLE_INSERT(struct vm_page, p_bnode, p_blockid, put_block_page)
|
||||
|
||||
struct bcache *bcache_create(unsigned int block_size)
|
||||
{
|
||||
struct bcache *out = kmalloc(sizeof *out, VM_NORMAL);
|
||||
if (!out) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (bcache_init(out, block_size) != KERN_OK) {
|
||||
kfree(out);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
void bcache_destroy(struct bcache *cache)
|
||||
{
|
||||
bcache_deinit(cache);
|
||||
kfree(cache);
|
||||
}
|
||||
|
||||
kern_status_t bcache_init(struct bcache *cache, unsigned int block_size)
|
||||
{
|
||||
memset(cache, 0x0, sizeof *cache);
|
||||
cache->b_sector_size = block_size;
|
||||
cache->b_sectors_per_page = VM_PAGE_SIZE / block_size;
|
||||
cache->b_lock = SPIN_LOCK_INIT;
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
void bcache_deinit(struct bcache *cache)
|
||||
{
|
||||
struct btree_node *first_node = btree_first(&cache->b_pagetree);
|
||||
if (!first_node) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct vm_page *cur = BTREE_CONTAINER(struct vm_page, p_bnode, first_node);
|
||||
|
||||
while (cur) {
|
||||
struct btree_node *next_node = btree_next(&cur->p_bnode);
|
||||
struct vm_page *next = BTREE_CONTAINER(struct vm_page, p_bnode, next_node);
|
||||
|
||||
cur->p_flags &= ~(VM_PAGE_CACHE);
|
||||
btree_delete(&cache->b_pagetree, &cur->p_bnode);
|
||||
vm_page_free(cur);
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
|
||||
kern_status_t bcache_get(struct bcache *cache, sectors_t at, bool create, struct bcache_sector *out)
|
||||
{
|
||||
unsigned int page_index = at % cache->b_sectors_per_page;
|
||||
at /= cache->b_sectors_per_page;
|
||||
|
||||
struct vm_page *page = get_block_page(&cache->b_pagetree, at);
|
||||
if (!page) {
|
||||
if (!create) {
|
||||
return KERN_NO_ENTRY;
|
||||
}
|
||||
|
||||
page = vm_page_alloc(VM_PAGE_MIN_ORDER, VM_NORMAL);
|
||||
if (!page) {
|
||||
return KERN_NO_MEMORY;
|
||||
}
|
||||
|
||||
page->p_flags |= VM_PAGE_CACHE;
|
||||
bitmap_zero(page->p_blockbits, VM_MAX_SECTORS_PER_PAGE);
|
||||
page->p_blockid = at;
|
||||
}
|
||||
|
||||
out->sect_page = page;
|
||||
out->sect_index = page_index;
|
||||
out->sect_buf = vm_page_get_vaddr(page);
|
||||
out->sect_present = bitmap_check(page->p_blockbits, page_index);
|
||||
|
||||
return KERN_OK;
|
||||
}
|
||||
|
||||
void bcache_mark_present(struct bcache_sector *sect)
|
||||
{
|
||||
bitmap_set(sect->sect_page->p_blockbits, sect->sect_index);
|
||||
}
|
||||
|
||||
@@ -18,25 +18,25 @@ struct char_device *char_device_from_generic(struct device *dev)
|
||||
return CHAR_DEVICE(dev);
|
||||
}
|
||||
|
||||
static kern_status_t char_device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
static kern_status_t char_device_read(struct device *dev, void *buf, size_t offset, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
struct char_device *cdev = CHAR_DEVICE(dev);
|
||||
|
||||
if (cdev->c_ops && cdev->c_ops->read) {
|
||||
status = cdev->c_ops->read(dev, buf, size, bytes_read, flags);
|
||||
status = cdev->c_ops->read(dev, buf, offset, size, bytes_read, flags);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static kern_status_t char_device_write(struct device *dev, const void *buf, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
static kern_status_t char_device_write(struct device *dev, const void *buf, size_t offset, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
struct char_device *cdev = CHAR_DEVICE(dev);
|
||||
|
||||
if (cdev->c_ops && cdev->c_ops->write) {
|
||||
status = cdev->c_ops->write(dev, buf, size, bytes_read, flags);
|
||||
status = cdev->c_ops->write(dev, buf, offset, size, bytes_read, flags);
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
20
dev/core.c
20
dev/core.c
@@ -11,8 +11,8 @@ static struct object *dev_folder = NULL;
|
||||
static struct device *__root_device = NULL;
|
||||
static struct device *__misc_device = NULL;
|
||||
static kern_status_t device_object_destroy(struct object *);
|
||||
static kern_status_t device_object_read(struct object *obj, void *, size_t *, socks_flags_t);
|
||||
static kern_status_t device_object_write(struct object *obj, const void *, size_t *, socks_flags_t);
|
||||
static kern_status_t device_object_read(struct object *obj, void *, size_t, size_t *, socks_flags_t);
|
||||
static kern_status_t device_object_write(struct object *obj, const void *, size_t, size_t *, socks_flags_t);
|
||||
static kern_status_t device_object_query_name(struct object *, char out[OBJECT_NAME_MAX]);
|
||||
static kern_status_t device_object_get_child_at(struct object *, size_t, struct object **);
|
||||
static kern_status_t device_object_get_child_named(struct object *, const char *, struct object **);
|
||||
@@ -123,23 +123,23 @@ struct device *generic_device_create(void)
|
||||
return dev;
|
||||
}
|
||||
|
||||
kern_status_t device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
kern_status_t device_read(struct device *dev, void *buf, size_t offset, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
|
||||
if (type_ops[dev->dev_type] && type_ops[dev->dev_type]->read) {
|
||||
status = type_ops[dev->dev_type]->read(dev, buf, size, bytes_read, flags);
|
||||
status = type_ops[dev->dev_type]->read(dev, buf, offset, size, bytes_read, flags);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
kern_status_t device_write(struct device *dev, const void *buf, size_t size, size_t *bytes_written, socks_flags_t flags)
|
||||
kern_status_t device_write(struct device *dev, const void *buf, size_t offset, size_t size, size_t *bytes_written, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
|
||||
if (type_ops[dev->dev_type] && type_ops[dev->dev_type]->write) {
|
||||
status = type_ops[dev->dev_type]->write(dev, buf, size, bytes_written, flags);
|
||||
status = type_ops[dev->dev_type]->write(dev, buf, offset, size, bytes_written, flags);
|
||||
}
|
||||
|
||||
return status;
|
||||
@@ -150,16 +150,16 @@ struct device *cast_to_device(struct object *obj)
|
||||
return DEVICE_CAST(obj);
|
||||
}
|
||||
|
||||
static kern_status_t device_object_read(struct object *obj, void *p, size_t *count, socks_flags_t flags)
|
||||
static kern_status_t device_object_read(struct object *obj, void *p, size_t offset, size_t *count, socks_flags_t flags)
|
||||
{
|
||||
struct device *dev = DEVICE_CAST(obj);
|
||||
return device_read(dev, p, *count, count, flags);
|
||||
return device_read(dev, p, *count, offset, count, flags);
|
||||
}
|
||||
|
||||
static kern_status_t device_object_write(struct object *obj, const void *p, size_t *count, socks_flags_t flags)
|
||||
static kern_status_t device_object_write(struct object *obj, const void *p, size_t offset, size_t *count, socks_flags_t flags)
|
||||
{
|
||||
struct device *dev = DEVICE_CAST(obj);
|
||||
return device_write(dev, p, *count, count, flags);
|
||||
return device_write(dev, p, *count, offset, count, flags);
|
||||
}
|
||||
|
||||
static kern_status_t device_object_destroy(struct object *obj)
|
||||
|
||||
@@ -60,7 +60,8 @@ kern_status_t input_device_report_event(struct input_device *dev, const struct i
|
||||
return r == sizeof *ev ? KERN_OK : KERN_WOULD_BLOCK;
|
||||
}
|
||||
|
||||
kern_status_t input_device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
kern_status_t input_device_read(struct device *dev, void *buf, size_t offset,
|
||||
size_t size, size_t *bytes_read, socks_flags_t flags)
|
||||
{
|
||||
if (dev->dev_type != DEV_TYPE_INPUT || (size % sizeof (struct input_event)) != 0) {
|
||||
return KERN_INVALID_ARGUMENT;
|
||||
|
||||
46
include/socks/block.h
Normal file
46
include/socks/block.h
Normal file
@@ -0,0 +1,46 @@
|
||||
#ifndef SOCKS_BLOCK_H_
|
||||
#define SOCKS_BLOCK_H_
|
||||
|
||||
#include <socks/types.h>
|
||||
#include <socks/btree.h>
|
||||
#include <socks/locks.h>
|
||||
#include <socks/status.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
enum block_device_flags {
|
||||
BLOCK_DEVICE_NO_BCACHE = 0x01u,
|
||||
};
|
||||
|
||||
struct bcache {
|
||||
unsigned int b_sector_size;
|
||||
unsigned int b_sectors_per_page;
|
||||
struct btree b_pagetree;
|
||||
spin_lock_t b_lock;
|
||||
};
|
||||
|
||||
struct bcache_sector {
|
||||
struct vm_page *sect_page;
|
||||
unsigned int sect_index;
|
||||
void *sect_buf;
|
||||
bool sect_present;
|
||||
};
|
||||
|
||||
extern struct bcache *bcache_create(unsigned int block_size);
|
||||
extern void bcache_destroy(struct bcache *cache);
|
||||
|
||||
extern kern_status_t bcache_init(struct bcache *cache, unsigned int block_size);
|
||||
extern void bcache_deinit(struct bcache *cache);
|
||||
|
||||
static inline void bcache_lock(struct bcache *cache)
|
||||
{
|
||||
spin_lock(&cache->b_lock);
|
||||
}
|
||||
static inline void bcache_unlock(struct bcache *cache)
|
||||
{
|
||||
spin_unlock(&cache->b_lock);
|
||||
}
|
||||
|
||||
extern kern_status_t bcache_get(struct bcache *cache, sectors_t at, bool create, struct bcache_sector *out);
|
||||
extern void bcache_mark_present(struct bcache_sector *sect);
|
||||
|
||||
#endif
|
||||
@@ -6,6 +6,7 @@
|
||||
#include <socks/status.h>
|
||||
#include <socks/bitmap.h>
|
||||
#include <socks/object.h>
|
||||
#include <socks/block.h>
|
||||
#include <socks/fb.h>
|
||||
#include <socks/ringbuffer.h>
|
||||
|
||||
@@ -49,14 +50,14 @@ struct iovec {
|
||||
};
|
||||
|
||||
struct device_type_ops {
|
||||
kern_status_t(*read)(struct device *, void *, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*write)(struct device *, const void *, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*read)(struct device *, void *, size_t, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*write)(struct device *, const void *, size_t, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*register_device)(struct device *);
|
||||
};
|
||||
|
||||
struct block_device_ops {
|
||||
kern_status_t(*read_blocks)(struct device *, sectors_t, size_t, struct iovec *, size_t, socks_flags_t);
|
||||
kern_status_t(*write_blocks)(struct device *, sectors_t, size_t, struct iovec *, size_t, socks_flags_t);
|
||||
kern_status_t(*read_blocks)(struct device *, sectors_t, size_t *, struct iovec *, size_t, socks_flags_t);
|
||||
kern_status_t(*write_blocks)(struct device *, sectors_t, size_t *, struct iovec *, size_t, socks_flags_t);
|
||||
kern_status_t(*ioctl)(struct device *, unsigned int, void *);
|
||||
};
|
||||
|
||||
@@ -68,8 +69,8 @@ struct net_device_ops {
|
||||
};
|
||||
|
||||
struct char_device_ops {
|
||||
kern_status_t(*read)(struct device *, void *, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*write)(struct device *, const void *, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*read)(struct device *, void *, size_t, size_t, size_t *, socks_flags_t);
|
||||
kern_status_t(*write)(struct device *, const void *, size_t, size_t, size_t *, socks_flags_t);
|
||||
};
|
||||
|
||||
struct input_device_ops {
|
||||
@@ -86,9 +87,11 @@ struct framebuffer_device_ops {
|
||||
|
||||
struct block_device {
|
||||
struct block_device_ops *b_ops;
|
||||
struct bcache b_cache;
|
||||
enum block_device_flags b_flags;
|
||||
unsigned int b_id;
|
||||
unsigned int sector_size;
|
||||
sectors_t capacity;
|
||||
unsigned int b_sector_size;
|
||||
sectors_t b_capacity;
|
||||
};
|
||||
|
||||
struct char_device {
|
||||
@@ -195,8 +198,8 @@ static inline void device_unlock_irqrestore(struct device *dev, unsigned long fl
|
||||
object_unlock_irqrestore(&dev->dev_base, flags);
|
||||
}
|
||||
|
||||
extern kern_status_t device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags);
|
||||
extern kern_status_t device_write(struct device *dev, const void *buf, size_t size, size_t *bytes_written, socks_flags_t flags);
|
||||
extern kern_status_t device_read(struct device *dev, void *buf, size_t offset, size_t size, size_t *bytes_read, socks_flags_t flags);
|
||||
extern kern_status_t device_write(struct device *dev, const void *buf, size_t offset, size_t size, size_t *bytes_written, socks_flags_t flags);
|
||||
|
||||
extern struct device *cast_to_device(struct object *obj);
|
||||
|
||||
@@ -286,7 +289,8 @@ static inline void device_deref(struct device *dev)
|
||||
}
|
||||
|
||||
extern kern_status_t input_device_report_event(struct input_device *dev, const struct input_event *ev, bool noblock);
|
||||
extern kern_status_t input_device_read(struct device *dev, void *buf, size_t size, size_t *bytes_read, socks_flags_t flags);
|
||||
extern kern_status_t input_device_read(struct device *dev, void *buf, size_t offset,
|
||||
size_t size, size_t *bytes_read, socks_flags_t flags);
|
||||
extern kern_status_t input_device_add_hook(struct device *dev, struct input_event_hook *hook);
|
||||
extern kern_status_t input_device_remove_hook(struct device *dev, struct input_event_hook *hook);
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ enum object_type_flags {
|
||||
struct object_ops {
|
||||
kern_status_t(*open)(struct object *obj);
|
||||
kern_status_t(*close)(struct object *obj);
|
||||
kern_status_t(*read)(struct object *obj, void *p, size_t *r, socks_flags_t flags);
|
||||
kern_status_t(*write)(struct object *obj, const void *p, size_t *w, socks_flags_t flags);
|
||||
kern_status_t(*read)(struct object *obj, void *p, size_t off, size_t *r, socks_flags_t flags);
|
||||
kern_status_t(*write)(struct object *obj, const void *p, size_t off, size_t *w, socks_flags_t flags);
|
||||
kern_status_t(*destroy)(struct object *obj);
|
||||
kern_status_t(*query_name)(struct object *obj, char out[OBJECT_NAME_MAX]);
|
||||
kern_status_t(*parse)(struct object *obj, const char *path, struct object **out);
|
||||
@@ -92,8 +92,8 @@ static inline kern_status_t object_get(const char *path, struct object **out)
|
||||
{
|
||||
return object_namespace_get_object(global_namespace(), path, out);
|
||||
}
|
||||
extern kern_status_t object_read(struct object *obj, void *p, size_t max, size_t *nr_read, socks_flags_t flags);
|
||||
extern kern_status_t object_write(struct object *obj, const void *p, size_t max, size_t *nr_written, socks_flags_t flags);
|
||||
extern kern_status_t object_read(struct object *obj, void *p, size_t offset, size_t max, size_t *nr_read, socks_flags_t flags);
|
||||
extern kern_status_t object_write(struct object *obj, const void *p, size_t offset, size_t max, size_t *nr_written, socks_flags_t flags);
|
||||
extern kern_status_t object_get_child_named(struct object *obj, const char *name, struct object **out);
|
||||
extern kern_status_t object_get_child_at(struct object *obj, size_t at, struct object **out);
|
||||
extern kern_status_t object_query_name(struct object *obj, char name[OBJECT_NAME_MAX]);
|
||||
|
||||
@@ -131,8 +131,8 @@ static inline struct driver *tty_driver_base(struct tty_driver *drv)
|
||||
return &drv->tty_base;
|
||||
}
|
||||
|
||||
extern kern_status_t tty_read(struct device *tty, void *buf, size_t max, size_t *nr_read, socks_flags_t flags);
|
||||
extern kern_status_t tty_write(struct device *tty, const void *buf, size_t len, size_t *nr_written, socks_flags_t flags);
|
||||
extern kern_status_t tty_read(struct device *tty, void *buf, size_t offset, size_t max, size_t *nr_read, socks_flags_t flags);
|
||||
extern kern_status_t tty_write(struct device *tty, const void *buf, size_t offset, size_t len, size_t *nr_written, socks_flags_t flags);
|
||||
extern kern_status_t tty_report_event(struct device *tty, const struct input_event *ev);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
@@ -5,6 +5,8 @@
|
||||
#include <socks/types.h>
|
||||
#include <socks/status.h>
|
||||
#include <socks/queue.h>
|
||||
#include <socks/btree.h>
|
||||
#include <socks/bitmap.h>
|
||||
#include <socks/locks.h>
|
||||
#include <socks/machine/vm.h>
|
||||
|
||||
@@ -12,6 +14,8 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct bcache;
|
||||
|
||||
/* maximum number of NUMA nodes */
|
||||
#define VM_MAX_NODES 64
|
||||
/* maximum number of memory zones per node */
|
||||
@@ -21,6 +25,11 @@ extern "C" {
|
||||
/* maximum number of sparse memory sectors */
|
||||
#define VM_MAX_SECTORS 1024
|
||||
|
||||
/* maximum number of disk sectors that can be stored in a single
|
||||
page. AKA the number of bits in the sector bitmap.
|
||||
used by the block cache */
|
||||
#define VM_MAX_SECTORS_PER_PAGE 32
|
||||
|
||||
#define VM_CHECK_ALIGN(p, mask) ((((p) & (mask)) == (p)) ? 1 : 0)
|
||||
|
||||
#define VM_CACHE_INITIALISED(c) ((c)->c_obj_count != 0)
|
||||
@@ -108,6 +117,8 @@ enum vm_page_flags {
|
||||
VM_PAGE_HEAD = 0x04u,
|
||||
/* page is part of a huge-page */
|
||||
VM_PAGE_HUGE = 0x08u,
|
||||
/* page is holding cached data from secondary storage, and can be freed if necessary (and not dirty). */
|
||||
VM_PAGE_CACHE = 0x10u,
|
||||
};
|
||||
|
||||
enum vm_memory_region_status {
|
||||
@@ -210,18 +221,40 @@ struct vm_page {
|
||||
|
||||
uint32_t p_flags;
|
||||
|
||||
/* multi-purpose list.
|
||||
/* owner-specific pointer */
|
||||
union {
|
||||
struct vm_slab *p_slab;
|
||||
struct bcache *p_bcache;
|
||||
void *p_priv0;
|
||||
};
|
||||
|
||||
/* multi-purpose list/tree entry.
|
||||
the owner of the page can decide what to do with this.
|
||||
some examples:
|
||||
- the buddy allocator uses this to maintain its per-zone free-page lists.
|
||||
- the block cache uses this to maintain a tree of pages keyed by block number.
|
||||
*/
|
||||
struct queue_entry p_list;
|
||||
|
||||
/* owner-specific data */
|
||||
union {
|
||||
struct vm_slab *p_slab;
|
||||
struct queue_entry p_list;
|
||||
struct btree_node p_bnode;
|
||||
|
||||
/* btree_node contains three pointers, so provide three pointer-sized integers for
|
||||
use if p_bnode isn't needed. */
|
||||
uintptr_t priv1[3];
|
||||
};
|
||||
|
||||
union {
|
||||
/* used by bcache when sector size is < page size. bitmap of present/missing sectors */
|
||||
DECLARE_BITMAP(p_blockbits, VM_MAX_SECTORS_PER_PAGE);
|
||||
uint32_t p_priv2;
|
||||
};
|
||||
|
||||
union {
|
||||
/* sector address, used by bcache */
|
||||
sectors_t p_blockid;
|
||||
|
||||
uint32_t p_priv3[2];
|
||||
};
|
||||
} __attribute__((aligned(2 * sizeof(unsigned long))));
|
||||
|
||||
/* represents a sector of memory, containing its own array of vm_pages.
|
||||
|
||||
22
init/main.c
22
init/main.c
@@ -29,7 +29,7 @@ void print_kernel_banner(void)
|
||||
static void hang(void)
|
||||
{
|
||||
while (1) {
|
||||
printk("tick");
|
||||
//printk("tick");
|
||||
milli_sleep(2000);
|
||||
}
|
||||
}
|
||||
@@ -110,5 +110,25 @@ void kernel_init(uintptr_t arg)
|
||||
tty_connect_foreground_input_device(cast_to_device(kbd));
|
||||
}
|
||||
|
||||
struct object *disk;
|
||||
status = object_get("/dev/block/disk0", &disk);
|
||||
if (status == KERN_OK) {
|
||||
unsigned char buf[32] = {0};
|
||||
struct device *disk_dev = cast_to_device(disk);
|
||||
size_t nread = 0;
|
||||
|
||||
status = device_read(disk_dev, buf, 1, 32, &nread, 0);
|
||||
if (status == KERN_OK) {
|
||||
printk("read %zu bytes from /dev/block/disk0:", nread);
|
||||
for (int i = 0; i < sizeof buf; i++) {
|
||||
printk("%02xh", buf[i]);
|
||||
}
|
||||
} else {
|
||||
printk("failed to read from block device (%s)", kern_status_string(status));
|
||||
}
|
||||
} else {
|
||||
printk("cannot open block device (%s)", kern_status_string(status));
|
||||
}
|
||||
|
||||
hang();
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ static void tty_console_write(struct console *con, const char *s, unsigned int l
|
||||
{
|
||||
if (foreground) {
|
||||
size_t nr_written;
|
||||
tty_write(foreground, s, len, &nr_written, 0);
|
||||
tty_write(foreground, s, 0, len, &nr_written, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -355,7 +355,7 @@ static void convert_ev_to_chars(struct device *tty, const struct input_event *ev
|
||||
|
||||
ringbuffer_write(&ttydev->tty_input, data_len, data, S_NOBLOCK);
|
||||
size_t nr_written;
|
||||
tty_write(tty, echo, echo_len, &nr_written, S_NOBLOCK);
|
||||
tty_write(tty, echo, 0, echo_len, &nr_written, S_NOBLOCK);
|
||||
}
|
||||
|
||||
static kern_status_t canonical_read(struct device *tty, void *buf, size_t max, size_t *nr_read, socks_flags_t flags)
|
||||
|
||||
@@ -73,7 +73,7 @@ static void putchar(struct device *tty, int c)
|
||||
}
|
||||
}
|
||||
|
||||
kern_status_t tty_read(struct device *tty, void *buf, size_t max, size_t *nr_read, socks_flags_t flags)
|
||||
kern_status_t tty_read(struct device *tty, void *buf, size_t offset, size_t max, size_t *nr_read, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
struct tty_device *ttydev = TTY_DEVICE(tty);
|
||||
@@ -85,7 +85,7 @@ kern_status_t tty_read(struct device *tty, void *buf, size_t max, size_t *nr_rea
|
||||
return status;
|
||||
}
|
||||
|
||||
kern_status_t tty_write(struct device *tty, const void *buf, size_t len, size_t *nr_written, socks_flags_t flags)
|
||||
kern_status_t tty_write(struct device *tty, const void *buf, size_t offset, size_t len, size_t *nr_written, socks_flags_t flags)
|
||||
{
|
||||
size_t r = 0;
|
||||
const char *s = buf;
|
||||
|
||||
12
obj/object.c
12
obj/object.c
@@ -132,13 +132,13 @@ struct object *object_header(void *p)
|
||||
return obj;
|
||||
}
|
||||
|
||||
kern_status_t object_read(struct object *obj, void *p, size_t max,
|
||||
size_t *nr_read, socks_flags_t flags)
|
||||
kern_status_t object_read(struct object *obj, void *p, size_t offset,
|
||||
size_t max, size_t *nr_read, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
|
||||
if (obj->ob_type->ob_ops.read) {
|
||||
status = obj->ob_type->ob_ops.read(obj, p, &max, flags);
|
||||
status = obj->ob_type->ob_ops.read(obj, p, offset, &max, flags);
|
||||
} else {
|
||||
max = 0;
|
||||
}
|
||||
@@ -150,13 +150,13 @@ kern_status_t object_read(struct object *obj, void *p, size_t max,
|
||||
return status;
|
||||
}
|
||||
|
||||
kern_status_t object_write(struct object *obj, const void *p, size_t max,
|
||||
size_t *nr_written, socks_flags_t flags)
|
||||
kern_status_t object_write(struct object *obj, const void *p, size_t offset,
|
||||
size_t max, size_t *nr_written, socks_flags_t flags)
|
||||
{
|
||||
kern_status_t status = KERN_UNSUPPORTED;
|
||||
|
||||
if (obj->ob_type->ob_ops.write) {
|
||||
status = obj->ob_type->ob_ops.write(obj, p, &max, flags);
|
||||
status = obj->ob_type->ob_ops.write(obj, p, offset, &max, flags);
|
||||
}
|
||||
|
||||
return status;
|
||||
|
||||
Reference in New Issue
Block a user