dev: removed internal spinlock from bcache

bcaches must now have an explicit external lock to protect them from
concurrent access (i.e. a lock belonging to their parent block device)
This commit is contained in:
2023-07-11 21:28:02 +01:00
parent c0f380ddca
commit e9e73bc027
2 changed files with 0 additions and 21 deletions

View File

@@ -91,8 +91,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
return do_read_blocks(blockdev, buf, offset, nr_sectors, sectors_read, flags);
}
bcache_lock(&blockdev->b_cache);
size_t nr_read = 0;
kern_status_t status = KERN_OK;
@@ -101,7 +99,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
void *sect_cache_buf;
status = get_cached_sector(blockdev, sect, flags, &sect_cache_buf);
if (status != KERN_OK) {
bcache_unlock(&blockdev->b_cache);
*sectors_read = nr_read;
return status;
}
@@ -111,7 +108,6 @@ kern_status_t block_device_read_blocks(struct device *dev, void *buf, sectors_t
nr_read++;
}
bcache_unlock(&blockdev->b_cache);
*sectors_read = nr_read;
return KERN_OK;
}
@@ -142,8 +138,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
return status;
}
bcache_lock(&blockdev->b_cache);
char *dest = buf;
sectors_t first_sect = offset / blockdev->b_sector_size;
@@ -156,7 +150,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
status = get_cached_sector(blockdev, first_sect, flags, &sector_cachebuf);
if (status != KERN_OK) {
bcache_unlock(&blockdev->b_cache);
*bytes_read = nr_read;
return status;
}
@@ -177,7 +170,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
status = get_cached_sector(blockdev, i, flags, &sector_cachebuf);
if (status != KERN_OK) {
bcache_unlock(&blockdev->b_cache);
*bytes_read = nr_read;
return status;
}
@@ -195,7 +187,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
status = get_cached_sector(blockdev, last_sect, flags, &sector_cachebuf);
if (status != KERN_OK) {
bcache_unlock(&blockdev->b_cache);
*bytes_read = nr_read;
return status;
}
@@ -207,7 +198,6 @@ kern_status_t block_device_read(struct device *dev, void *buf, size_t offset, si
nr_read += in_sect_size;
}
bcache_unlock(&blockdev->b_cache);
*bytes_read = nr_read;
return KERN_OK;
}
@@ -284,7 +274,6 @@ kern_status_t bcache_init(struct bcache *cache, unsigned int block_size)
memset(cache, 0x0, sizeof *cache);
cache->b_sector_size = block_size;
cache->b_sectors_per_page = VM_PAGE_SIZE / block_size;
cache->b_lock = SPIN_LOCK_INIT;
return KERN_OK;
}