301 lines
6.2 KiB
C
301 lines
6.2 KiB
C
#include "tag.h"
|
|
|
|
#include "image.h"
|
|
#include "pipeline.h"
|
|
#include "status.h"
|
|
|
|
#include <blue/io/file.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
static enum ec3_status init_seq_write_temp_files(struct ec3_tag_ioctx *tag)
|
|
{
|
|
b_status status = b_file_open_temp(
|
|
B_FILE_READ_WRITE | B_FILE_BINARY,
|
|
&tag->io_f_data);
|
|
|
|
if (!B_OK(status)) {
|
|
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
|
|
}
|
|
|
|
status = b_file_open_temp(
|
|
B_FILE_READ_WRITE | B_FILE_BINARY,
|
|
&tag->io_f_cluster_table);
|
|
|
|
if (!B_OK(status)) {
|
|
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
|
|
}
|
|
|
|
cluster_table_init(&tag->io_cluster_table, tag->io_f_cluster_table, 0);
|
|
cluster_table_init_empty_table(&tag->io_cluster_table);
|
|
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
enum ec3_status ec3_tag_ioctx_create(
|
|
struct ec3_tag_ioctx **out,
|
|
size_t cluster_size,
|
|
enum ec3_tag_ioctx_mode mode)
|
|
{
|
|
if ((mode & EC3_TAG_IO_READ) && (mode & EC3_TAG_IO_SEQUENTIAL)) {
|
|
return EC3_ERR_INVALID_VALUE;
|
|
}
|
|
|
|
struct ec3_tag_ioctx *ioctx = malloc(sizeof *ioctx);
|
|
if (!ioctx) {
|
|
return EC3_ERR_NO_MEMORY;
|
|
}
|
|
|
|
memset(ioctx, 0x0, sizeof *ioctx);
|
|
|
|
ioctx->io_mode = mode;
|
|
|
|
ioctx->io_cluster_buf = malloc(cluster_size);
|
|
if (!ioctx->io_cluster_buf) {
|
|
free(ioctx);
|
|
return EC3_ERR_NO_MEMORY;
|
|
}
|
|
|
|
enum ec3_status status = EC3_SUCCESS;
|
|
|
|
if (mode & EC3_TAG_IO_SEQUENTIAL) {
|
|
status = init_seq_write_temp_files(ioctx);
|
|
} else {
|
|
status = cluster_cache_init(
|
|
&ioctx->io_cache,
|
|
(mode & EC3_TAG_IO_WRITE) == EC3_TAG_IO_WRITE,
|
|
cluster_size);
|
|
}
|
|
|
|
if (status != EC3_SUCCESS) {
|
|
ec3_tag_ioctx_close(ioctx);
|
|
return status;
|
|
}
|
|
|
|
*out = ioctx;
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
enum ec3_status ec3_tag_ioctx_close(struct ec3_tag_ioctx *tag)
|
|
{
|
|
enum ec3_tag_ioctx_mode mode = tag->io_mode;
|
|
tag->io_mode |= EC3_TAG_IO_CLOSED;
|
|
|
|
if (tag->io_f_image) {
|
|
b_file_release(tag->io_f_image);
|
|
tag->io_f_image = NULL;
|
|
}
|
|
|
|
if (tag->io_cluster_buf) {
|
|
free(tag->io_cluster_buf);
|
|
tag->io_cluster_buf = NULL;
|
|
}
|
|
|
|
if (mode & EC3_TAG_IO_WRITE) {
|
|
/* leave the rest of the data for the image ioctx to use */
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
if (tag->io_pipeline) {
|
|
ec3_pipeline_destroy(tag->io_pipeline);
|
|
tag->io_pipeline = NULL;
|
|
}
|
|
|
|
if (tag->io_f_data) {
|
|
b_file_release(tag->io_f_data);
|
|
tag->io_f_data = NULL;
|
|
}
|
|
|
|
if (tag->io_f_cluster_table) {
|
|
b_file_release(tag->io_f_cluster_table);
|
|
tag->io_f_cluster_table = NULL;
|
|
}
|
|
|
|
cluster_cache_finish(&tag->io_cache);
|
|
cluster_table_finish(&tag->io_cluster_table);
|
|
b_btree_delete(&tag->io_parent->io_opened_tags, &tag->io_node);
|
|
free(tag);
|
|
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
enum ec3_status ec3_tag_ioctx_read_cluster(
|
|
struct ec3_tag_ioctx *tag,
|
|
size_t cluster_id,
|
|
void *buf,
|
|
size_t *nr_read)
|
|
{
|
|
if (!(tag->io_mode & EC3_TAG_IO_READ)) {
|
|
return EC3_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
if (tag->io_mode & EC3_TAG_IO_SEQUENTIAL) {
|
|
return EC3_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
enum ec3_status status
|
|
= cluster_cache_get(&tag->io_cache, cluster_id, buf, nr_read);
|
|
if (status == EC3_SUCCESS) {
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
size_t physical_cluster_id = 0;
|
|
status = ec3_image_ioctx_cluster_logical_to_physical(
|
|
tag->io_parent,
|
|
tag->io_tag_info,
|
|
cluster_id,
|
|
&physical_cluster_id);
|
|
|
|
if (status != EC3_SUCCESS) {
|
|
return status;
|
|
}
|
|
|
|
/* the cluster isn't in the cache, and needs to be read from the main
|
|
* image */
|
|
struct cluster cluster_info;
|
|
status = cluster_table_get(
|
|
&tag->io_cluster_table,
|
|
physical_cluster_id,
|
|
&cluster_info);
|
|
|
|
if (status != EC3_SUCCESS) {
|
|
return status;
|
|
}
|
|
|
|
if (cluster_info.c_len > tag->io_parent->io_header.img_cluster_size) {
|
|
return EC3_ERR_BAD_FORMAT;
|
|
}
|
|
|
|
size_t offset = tag->io_cluster_data_offset + cluster_info.c_base;
|
|
size_t r;
|
|
enum b_status status2 = b_file_read(
|
|
tag->io_f_image,
|
|
offset,
|
|
cluster_info.c_len,
|
|
tag->io_cluster_buf,
|
|
&r);
|
|
|
|
if (!B_OK(status2)) {
|
|
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
|
|
}
|
|
|
|
status = ec3_pipeline_decode_cluster(
|
|
tag->io_pipeline,
|
|
tag->io_cluster_buf,
|
|
cluster_info.c_len,
|
|
buf,
|
|
tag->io_parent->io_header.img_cluster_size,
|
|
nr_read);
|
|
|
|
if (status != EC3_SUCCESS) {
|
|
return status;
|
|
}
|
|
|
|
cluster_cache_put(&tag->io_cache, cluster_id, buf, *nr_read);
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
enum ec3_status ec3_tag_ioctx_write_cluster(
|
|
struct ec3_tag_ioctx *tag,
|
|
size_t cluster_id,
|
|
void *buf,
|
|
size_t len,
|
|
size_t *nr_written)
|
|
{
|
|
if (!(tag->io_mode & EC3_TAG_IO_WRITE)) {
|
|
return EC3_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
enum ec3_status status = EC3_SUCCESS;
|
|
|
|
if (!(tag->io_mode & EC3_TAG_IO_SEQUENTIAL)) {
|
|
status = cluster_cache_put(
|
|
tag->io_cluster_buf,
|
|
cluster_id,
|
|
buf,
|
|
len);
|
|
|
|
if (status != EC3_SUCCESS) {
|
|
return status;
|
|
}
|
|
|
|
*nr_written = len;
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
if (cluster_id != tag->io_seq_cluster_id) {
|
|
/* can only write to each cluster once, sequentially, in this
|
|
* mode */
|
|
return EC3_ERR_NOT_SUPPORTED;
|
|
}
|
|
|
|
size_t offset = 0;
|
|
b_file_size(tag->io_f_data, &offset);
|
|
|
|
size_t encoded_len = 0;
|
|
ec3_pipeline_encode_cluster(
|
|
tag->io_pipeline,
|
|
buf,
|
|
len,
|
|
tag->io_cluster_buf,
|
|
tag->io_parent->io_header.img_cluster_size,
|
|
&encoded_len);
|
|
|
|
b_status status2 = b_file_write(
|
|
tag->io_f_data,
|
|
offset,
|
|
encoded_len,
|
|
tag->io_cluster_buf,
|
|
nr_written);
|
|
|
|
if (!B_OK(status2)) {
|
|
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
|
|
}
|
|
|
|
struct cluster cluster = {0};
|
|
cluster.c_id = cluster_id;
|
|
cluster.c_base = offset;
|
|
cluster.c_len = *nr_written;
|
|
|
|
cluster_table_put(&tag->io_cluster_table, &cluster);
|
|
|
|
tag->io_seq_cluster_id++;
|
|
|
|
return EC3_SUCCESS;
|
|
}
|
|
|
|
enum ec3_status ec3_tag_ioctx_get_nr_clusters(
|
|
struct ec3_tag_ioctx *tag,
|
|
size_t *out_nr_clusters)
|
|
{
|
|
size_t nr_clusters = 0;
|
|
|
|
const struct ec3_extent_info *extents
|
|
= ec3_image_ioctx_get_extent_info(tag->io_parent);
|
|
size_t nr_extents = tag->io_parent->io_header.img_nr_extents;
|
|
|
|
for (size_t i = 0; i < nr_extents; i++) {
|
|
const struct ec3_extent_info *x = &extents[i];
|
|
if (x->ex_owner != tag->io_tag_info->tag_ident) {
|
|
continue;
|
|
}
|
|
|
|
if (x->ex_logical_cluster + x->ex_count > nr_clusters) {
|
|
nr_clusters = x->ex_logical_cluster + x->ex_count;
|
|
}
|
|
}
|
|
|
|
size_t highest_cached_cluster;
|
|
|
|
enum ec3_status status = cluster_cache_get_highest_cluster_id(
|
|
&tag->io_cache,
|
|
&highest_cached_cluster);
|
|
|
|
if (status == EC3_SUCCESS && highest_cached_cluster >= nr_clusters) {
|
|
nr_clusters = highest_cached_cluster + 1;
|
|
}
|
|
|
|
*out_nr_clusters = nr_clusters;
|
|
return EC3_SUCCESS;
|
|
}
|