dev: removed internal spinlock from bcache
bcaches must now have an explicit external lock to protect them from concurrent access (i.e. a lock belonging to their parent block device)
This commit is contained in:
11
dev/block.c
11
dev/block.c
@@ -91,8 +91,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
|
|||||||
return do_read_blocks(blockdev, buf, offset, nr_sectors, sectors_read, flags);
|
return do_read_blocks(blockdev, buf, offset, nr_sectors, sectors_read, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
bcache_lock(&blockdev->b_cache);
|
|
||||||
|
|
||||||
size_t nr_read = 0;
|
size_t nr_read = 0;
|
||||||
|
|
||||||
kern_status_t status = KERN_OK;
|
kern_status_t status = KERN_OK;
|
||||||
@@ -101,7 +99,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
|
|||||||
void *sect_cache_buf;
|
void *sect_cache_buf;
|
||||||
status = get_cached_sector(blockdev, sect, flags, §_cache_buf);
|
status = get_cached_sector(blockdev, sect, flags, §_cache_buf);
|
||||||
if (status != KERN_OK) {
|
if (status != KERN_OK) {
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*sectors_read = nr_read;
|
*sectors_read = nr_read;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -111,7 +108,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
|
|||||||
nr_read++;
|
nr_read++;
|
||||||
}
|
}
|
||||||
|
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*sectors_read = nr_read;
|
*sectors_read = nr_read;
|
||||||
return KERN_OK;
|
return KERN_OK;
|
||||||
}
|
}
|
||||||
@@ -142,8 +138,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
bcache_lock(&blockdev->b_cache);
|
|
||||||
|
|
||||||
char *dest = buf;
|
char *dest = buf;
|
||||||
|
|
||||||
sectors_t first_sect = offset / blockdev->b_sector_size;
|
sectors_t first_sect = offset / blockdev->b_sector_size;
|
||||||
@@ -156,7 +150,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
|
|||||||
status = get_cached_sector(blockdev, first_sect, flags, §or_cachebuf);
|
status = get_cached_sector(blockdev, first_sect, flags, §or_cachebuf);
|
||||||
|
|
||||||
if (status != KERN_OK) {
|
if (status != KERN_OK) {
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*bytes_read = nr_read;
|
*bytes_read = nr_read;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -177,7 +170,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
|
|||||||
status = get_cached_sector(blockdev, i, flags, §or_cachebuf);
|
status = get_cached_sector(blockdev, i, flags, §or_cachebuf);
|
||||||
|
|
||||||
if (status != KERN_OK) {
|
if (status != KERN_OK) {
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*bytes_read = nr_read;
|
*bytes_read = nr_read;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -195,7 +187,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
|
|||||||
status = get_cached_sector(blockdev, last_sect, flags, §or_cachebuf);
|
status = get_cached_sector(blockdev, last_sect, flags, §or_cachebuf);
|
||||||
|
|
||||||
if (status != KERN_OK) {
|
if (status != KERN_OK) {
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*bytes_read = nr_read;
|
*bytes_read = nr_read;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@@ -207,7 +198,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
|
|||||||
nr_read += in_sect_size;
|
nr_read += in_sect_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
bcache_unlock(&blockdev->b_cache);
|
|
||||||
*bytes_read = nr_read;
|
*bytes_read = nr_read;
|
||||||
return KERN_OK;
|
return KERN_OK;
|
||||||
}
|
}
|
||||||
@@ -284,7 +274,6 @@ kern_status_t bcache_init(struct bcache *cache, unsigned int block_size)
|
|||||||
memset(cache, 0x0, sizeof *cache);
|
memset(cache, 0x0, sizeof *cache);
|
||||||
cache->b_sector_size = block_size;
|
cache->b_sector_size = block_size;
|
||||||
cache->b_sectors_per_page = VM_PAGE_SIZE / block_size;
|
cache->b_sectors_per_page = VM_PAGE_SIZE / block_size;
|
||||||
cache->b_lock = SPIN_LOCK_INIT;
|
|
||||||
|
|
||||||
return KERN_OK;
|
return KERN_OK;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ struct bcache {
|
|||||||
unsigned int b_sector_size;
|
unsigned int b_sector_size;
|
||||||
unsigned int b_sectors_per_page;
|
unsigned int b_sectors_per_page;
|
||||||
struct btree b_pagetree;
|
struct btree b_pagetree;
|
||||||
spin_lock_t b_lock;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bcache_sector {
|
struct bcache_sector {
|
||||||
@@ -31,15 +30,6 @@ extern void bcache_destroy(struct bcache *cache);
|
|||||||
extern kern_status_t bcache_init(struct bcache *cache, unsigned int block_size);
|
extern kern_status_t bcache_init(struct bcache *cache, unsigned int block_size);
|
||||||
extern void bcache_deinit(struct bcache *cache);
|
extern void bcache_deinit(struct bcache *cache);
|
||||||
|
|
||||||
static inline void bcache_lock(struct bcache *cache)
|
|
||||||
{
|
|
||||||
spin_lock(&cache->b_lock);
|
|
||||||
}
|
|
||||||
static inline void bcache_unlock(struct bcache *cache)
|
|
||||||
{
|
|
||||||
spin_unlock(&cache->b_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern kern_status_t bcache_get(struct bcache *cache, sectors_t at, bool create, struct bcache_sector *out);
|
extern kern_status_t bcache_get(struct bcache *cache, sectors_t at, bool create, struct bcache_sector *out);
|
||||||
extern void bcache_mark_present(struct bcache_sector *sect);
|
extern void bcache_mark_present(struct bcache_sector *sect);
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user