Files
ec3/src/image.c

942 lines
22 KiB
C
Raw Normal View History

#include "image.h"
#include "bin.h"
#include "pipeline.h"
#include "shadow-image.h"
#include "status.h"
#include <blue/io/directory.h>
#include <blue/io/file.h>
#include <blue/io/path.h>
#include <blue/object/buffer.h>
#include <stdlib.h>
#include <string.h>
B_BTREE_DEFINE_SIMPLE_GET(
struct ec3_tag_ioctx,
uint64_t,
io_node,
io_tag_info.tag_ident,
get_opened_tag)
B_BTREE_DEFINE_SIMPLE_INSERT(
struct ec3_tag_ioctx,
io_node,
io_tag_info.tag_ident,
put_opened_tag)
static enum ec3_status decode_header(
const struct ec3_header *in,
struct ec3_image_info *out)
{
out->img_version = b_i16_btoh(in->h_version);
out->img_cluster_size
= ec3_cluster_size_id_to_bytes(b_i16_btoh(in->h_cluster_size));
out->img_tag_table_offset = b_i64_btoh(in->h_tag_table_offset);
out->img_extent_table_offset = b_i64_btoh(in->h_extent_table_offset);
out->img_cluster_table_offset = b_i64_btoh(in->h_cluster_table_offset);
out->img_cluster_data_offset = sizeof(struct ec3_header);
out->img_nr_tags = b_i32_btoh(in->h_tag_count);
out->img_nr_extents = b_i32_btoh(in->h_extent_count);
out->img_nr_cluster_groups = b_i32_btoh(in->h_cluster_group_count);
out->img_encryption_function = b_i16_btoh(in->h_encryption);
out->img_compression_function = b_i16_btoh(in->h_compression);
out->img_id = b_i64_btoh(in->h_app_magic);
return EC3_SUCCESS;
}
static void decode_tag(
const struct ec3_tag_table_entry *in,
struct ec3_tag_info *out)
{
out->tag_type = b_i32_btoh(in->tag_type);
out->tag_flags = b_i32_btoh(in->tag_flags);
out->tag_checksum = b_i32_btoh(in->tag_checksum);
out->tag_ident = b_i64_btoh(in->tag_ident);
out->tag_total_length = b_i64_btoh(in->tag_length);
}
static void decode_extent(
const struct ec3_extent *in,
struct ec3_extent_info *out)
{
out->ex_owner = b_i64_btoh(in->ex_owner);
out->ex_logical_cluster = b_i32_btoh(in->ex_logical_cluster);
out->ex_physical_cluster = b_i32_btoh(in->ex_physical_cluster);
out->ex_count = b_i32_btoh(in->ex_count);
}
static enum ec3_status read_header(
b_file *image_file,
struct ec3_image_info *out_info)
{
struct ec3_header header;
size_t nr_read;
enum b_status status
= b_file_read(image_file, 0, sizeof header, &header, &nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
return decode_header(&header, out_info);
}
static enum ec3_status read_tag_table(
b_file *image_file,
const struct ec3_image_info *image_info,
b_buffer *out_table)
{
size_t offset = image_info->img_tag_table_offset;
b_buffer_resize(out_table, image_info->img_nr_tags);
struct ec3_tag_table_entry tag;
size_t nr_read;
for (size_t i = 0; i < image_info->img_nr_tags; i++) {
enum b_status status = b_file_read(
image_file,
offset,
sizeof tag,
&tag,
&nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_read != sizeof tag) {
return EC3_ERR_BAD_FORMAT;
}
struct ec3_tag_info *tag_info = b_buffer_get(out_table, i);
decode_tag(&tag, tag_info);
offset += sizeof tag;
}
return EC3_SUCCESS;
}
static enum ec3_status read_extent_table(
b_file *image_file,
const struct ec3_image_info *image_info,
b_buffer *out_table)
{
size_t offset = image_info->img_extent_table_offset;
b_buffer_resize(out_table, image_info->img_nr_extents);
struct ec3_extent extent;
size_t nr_read;
for (size_t i = 0; i < image_info->img_nr_extents; i++) {
enum b_status status = b_file_read(
image_file,
offset,
sizeof extent,
&extent,
&nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_read != sizeof extent) {
return EC3_ERR_BAD_FORMAT;
}
struct ec3_extent_info *extent_info
= b_buffer_get(out_table, i);
decode_extent(&extent, extent_info);
offset += sizeof extent;
}
return EC3_SUCCESS;
}
static enum ec3_status create_ioctx(struct ec3_image_ioctx **out)
{
struct ec3_image_ioctx *ioctx = malloc(sizeof *ioctx);
if (!ioctx) {
return EC3_ERR_NO_MEMORY;
}
memset(ioctx, 0x0, sizeof *ioctx);
ioctx->io_tag_table = b_buffer_create(sizeof(struct ec3_tag_info));
ioctx->io_extent_table
= b_buffer_create(sizeof(struct ec3_extent_info));
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_ro(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_ONLY | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_header(image_file, &ioctx->io_header);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_tag_table(
image_file,
&ioctx->io_header,
ioctx->io_tag_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_extent_table(
image_file,
&ioctx->io_header,
ioctx->io_extent_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
cluster_table_init(
&ioctx->io_cluster_table,
image_file,
ioctx->io_header.img_cluster_table_offset);
ioctx->io_mode = mode;
ioctx->io_main = image_file;
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_rw(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_WRITE | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_header(image_file, &ioctx->io_header);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_tag_table(
image_file,
&ioctx->io_header,
ioctx->io_tag_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_extent_table(
image_file,
&ioctx->io_header,
ioctx->io_extent_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
cluster_table_init(
&ioctx->io_cluster_table,
image_file,
ioctx->io_header.img_cluster_table_offset);
ioctx->io_mode = mode;
ioctx->io_main = image_file;
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_create(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
const struct ec3_parameters *param,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_WRITE | B_FILE_CREATE | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
ioctx->io_mode = mode;
ioctx->io_main = image_file;
ioctx->io_header.img_id = param->p_ident;
ioctx->io_header.img_cluster_size
= ec3_cluster_size_id_to_bytes(param->p_cluster_size);
ioctx->io_header.img_encryption_function = param->p_encryption_func;
ioctx->io_header.img_compression_function = param->p_compression_func;
ioctx->io_header.img_version = EC3_VERSION_1_0;
*out = ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open(
const char *path,
const struct ec3_parameters *param,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
if (mode == 0) {
return EC3_ERR_INVALID_VALUE;
}
b_path *image_path = b_path_create_from_cstr(path);
if (mode & EC3_IMAGE_IO_TRUNCATE) {
b_path_unlink(image_path);
mode &= ~EC3_IMAGE_IO_TRUNCATE;
}
b_file_info image_info;
enum b_status status = b_path_stat(image_path, &image_info);
2025-02-23 22:00:50 +00:00
enum ec3_status status2 = EC3_ERR_NO_ENTRY;
if ((status == B_ERR_NO_ENTRY) && (mode & EC3_IMAGE_IO_WRITE)) {
2025-02-23 22:00:50 +00:00
status2 = open_image_create(image_path, mode, param, out);
}
if ((status == B_SUCCESS) && (mode & EC3_IMAGE_IO_WRITE)) {
2025-02-23 22:00:50 +00:00
status2 = open_image_rw(image_path, mode, out);
}
if ((status == B_SUCCESS) && (mode & EC3_IMAGE_IO_READ)) {
2025-02-23 22:00:50 +00:00
status2 = open_image_ro(image_path, mode, out);
}
2025-02-23 22:00:50 +00:00
b_path_release(image_path);
return status2;
}
static void destroy_image_ioctx(struct ec3_image_ioctx *image)
{
if (image->io_main) {
b_file_release(image->io_main);
}
if (image->io_tag_table) {
b_buffer_release(image->io_tag_table);
}
if (image->io_extent_table) {
b_buffer_release(image->io_extent_table);
}
b_btree_iterator it;
b_btree_iterator_begin(&image->io_opened_tags, &it);
while (b_btree_iterator_is_valid(&it)) {
struct ec3_tag_ioctx *tag
= b_unbox(struct ec3_tag_ioctx, it.node, io_node);
b_btree_iterator_erase(&it);
if (tag->io_f_data) {
b_file_release(tag->io_f_data);
}
if (tag->io_f_image) {
b_file_release(tag->io_f_image);
}
if (tag->io_f_cluster_table) {
b_file_release(tag->io_f_cluster_table);
}
free(tag);
}
free(image);
}
enum ec3_status ec3_image_ioctx_close(struct ec3_image_ioctx *image)
{
if (!(image->io_mode & EC3_IMAGE_IO_WRITE)) {
destroy_image_ioctx(image);
return EC3_SUCCESS;
}
enum ec3_status status = EC3_SUCCESS;
struct shadow_image shadow;
status = shadow_image_init(image, &shadow);
if (status != EC3_SUCCESS) {
return status;
}
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
/* first set of tags to write are those that haven't been opened,
* or were opened read-only. we can read clusters directly from
* the source image to the dest without re-encoding them */
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
const struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (tag_io) {
/* this tag has been written to. handle it later */
continue;
}
status = shadow_image_write_tag(image, tag, NULL, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
/* next, write the set of tags that have been opened
* non-sequential read-write. the modified clusters for these
* tags is stored unencoded in an on-disk cache. the unmodified clusters
* are read from the original image. each modified cluster needs to be
* read into memory, encoded, and written to the dest image
*/
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (!tag_io || !(tag_io->io_mode & EC3_TAG_IO_WRITE)) {
/* this tag has not been written to, and was handled
* earlier */
continue;
}
if (tag_io->io_mode & EC3_TAG_IO_SEQUENTIAL) {
/* this tag is sequential write-only, and will be
* handled later */
continue;
}
status = shadow_image_write_tag(image, tag, tag_io, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
/* finally, write the set of tags that have been opened
* sequential write-only. the clusters for these tags
* have been written as a single continuous stream of
* encoded clusters to a temporary file on-disk, along
* with a corresponding cluster table. append the
* encoded cluster data directly to the image, and merge
* the tag's cluster table with that of the dest image
*/
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (!tag_io || !(tag_io->io_mode & EC3_TAG_IO_WRITE)) {
/* this tag has not been written to, and was handled
* earlier */
continue;
}
if (!(tag_io->io_mode & EC3_TAG_IO_SEQUENTIAL)) {
/* this tag is not sequential write-only, and was
* handled earlier */
continue;
}
status = shadow_image_write_tag(image, tag, tag_io, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
2025-02-23 22:00:50 +00:00
b_btree_iterator it;
b_btree_iterator_begin(&image->io_opened_tags, &it);
while (b_btree_iterator_is_valid(&it)) {
struct ec3_tag_ioctx *tag
= b_unbox(struct ec3_tag_ioctx, it.node, io_node);
b_btree_iterator_erase(&it);
/* disable write access so that ec3_tag_ioctx_close will
* actually destroy the ioctx */
tag->io_mode &= ~EC3_TAG_IO_WRITE;
ec3_tag_ioctx_close(tag);
}
shadow_image_finish(image, &shadow);
destroy_image_ioctx(image);
return EC3_SUCCESS;
}
const struct ec3_image_info *ec3_image_ioctx_get_info(
struct ec3_image_ioctx *image)
{
return &image->io_header;
}
static const struct ec3_tag_info *get_tag_info_by_type(
struct ec3_image_ioctx *image,
uint32_t type,
unsigned int index)
{
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_type != type) {
continue;
}
if (index > 0) {
index--;
continue;
}
return &tags[i];
}
return NULL;
}
static const struct ec3_tag_info *get_tag_info_by_id(
struct ec3_image_ioctx *image,
uint64_t id)
{
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_ident == id) {
return &tags[i];
}
}
return NULL;
}
const struct ec3_tag_info *ec3_image_ioctx_get_tag_info(
struct ec3_image_ioctx *image)
{
return b_buffer_ptr(image->io_tag_table);
}
const struct ec3_tag_info *ec3_image_ioctx_get_tag_info_by_id(
struct ec3_image_ioctx *image,
uint64_t id)
{
return get_tag_info_by_id(image, id);
}
const struct ec3_extent_info *ec3_image_ioctx_get_extent_info(
struct ec3_image_ioctx *image)
{
return b_buffer_ptr(image->io_extent_table);
}
static uint64_t allocate_tag_id(struct ec3_image_ioctx *image)
{
uint64_t id = 0;
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_ident < 4096 && tags[i].tag_ident > id) {
id = tags[i].tag_ident + 1;
}
}
return id;
}
static enum ec3_status init_pipeline(
struct ec3_image_ioctx *image,
struct ec3_pipeline **pipeline)
{
struct ec3_pipeline_stage_args stages[2] = {0};
if (image->io_header.img_compression_function != EC3_COMPRESSION_NONE) {
stages[0].type = ec3_get_pipeline_stage_for_compression_func(
image->io_header.img_compression_function);
}
if (image->io_header.img_encryption_function != EC3_ENCRYPTION_NONE) {
stages[1].type = ec3_get_pipeline_stage_for_encryption_func(
image->io_header.img_encryption_function);
}
return ec3_pipeline_create(
stages,
sizeof stages / sizeof stages[0],
image->io_header.img_cluster_size,
pipeline);
}
static enum ec3_status init_tag_ioctx_ro(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
cluster_table_init(
&tag->io_cluster_table,
image->io_main,
image->io_header.img_cluster_table_offset);
return EC3_SUCCESS;
}
static enum ec3_status init_tag_ioctx_rw(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
return EC3_SUCCESS;
}
static enum ec3_status init_tag_ioctx_create(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open_tag_by_type(
struct ec3_image_ioctx *image,
uint32_t type,
unsigned int index,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
if ((mode & EC3_TAG_IO_WRITE)
&& !(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (!(mode & (EC3_TAG_IO_READ | EC3_TAG_IO_WRITE))) {
return EC3_ERR_INVALID_VALUE;
}
const struct ec3_tag_info *tag_info
= get_tag_info_by_type(image, type, index);
if (!tag_info) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag_info->tag_ident);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = *tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
if (mode & EC3_TAG_IO_WRITE) {
status = init_tag_ioctx_rw(image, tag_info, tag_ioctx);
} else {
status = init_tag_ioctx_ro(image, tag_info, tag_ioctx);
}
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open_tag_by_id(
struct ec3_image_ioctx *image,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
if ((mode & EC3_TAG_IO_WRITE)
&& !(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (!(mode & (EC3_TAG_IO_READ | EC3_TAG_IO_WRITE))) {
return EC3_ERR_INVALID_VALUE;
}
const struct ec3_tag_info *tag_info = get_tag_info_by_id(image, id);
if (!tag_info) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag_info->tag_ident);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = *tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
if (mode & EC3_TAG_IO_WRITE) {
status = init_tag_ioctx_rw(image, tag_info, tag_ioctx);
} else {
status = init_tag_ioctx_ro(image, tag_info, tag_ioctx);
}
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_create_tag(
struct ec3_image_ioctx *image,
uint32_t type,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
mode |= EC3_TAG_IO_WRITE;
if (!(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (id == 0) {
id = allocate_tag_id(image);
}
struct ec3_tag_info *tag_info
= (struct ec3_tag_info *)get_tag_info_by_id(image, id);
if (tag_info) {
return EC3_ERR_NAME_EXISTS;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, id);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
b_buffer_push_back(image->io_tag_table, 1, (void **)&tag_info);
tag_info->tag_ident = id;
tag_info->tag_type = type;
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = *tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
status = init_tag_ioctx_create(image, tag_info, tag_ioctx);
if (status != EC3_SUCCESS) {
b_buffer_pop_back(image->io_tag_table, 1);
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
image->io_header.img_nr_tags++;
put_opened_tag(&image->io_opened_tags, tag_ioctx);
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_cluster_logical_to_physical(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t logical_cluster,
size_t *out_physical_cluster)
{
const struct ec3_extent_info *extents
= b_buffer_ptr(image->io_extent_table);
size_t nr_extents = image->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->tag_ident) {
continue;
}
if (logical_cluster < x->ex_logical_cluster
|| logical_cluster >= x->ex_logical_cluster + x->ex_count) {
continue;
}
*out_physical_cluster
= x->ex_physical_cluster
+ (logical_cluster - x->ex_logical_cluster);
return EC3_SUCCESS;
}
return EC3_ERR_OUT_OF_BOUNDS;
}
enum ec3_status ec3_image_ioctx_get_tag_nr_clusters(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t *out_nr_clusters)
{
size_t nr_clusters = 0;
const struct ec3_extent_info *extents
= ec3_image_ioctx_get_extent_info(image);
size_t nr_extents = image->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->tag_ident) {
continue;
}
if (x->ex_logical_cluster + x->ex_count > nr_clusters) {
nr_clusters = x->ex_logical_cluster + x->ex_count;
}
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag->tag_ident);
if (!tag_ioctx) {
*out_nr_clusters = nr_clusters;
return EC3_SUCCESS;
}
size_t highest_cached_cluster;
enum ec3_status status = cluster_cache_get_highest_cluster_id(
&tag_ioctx->io_cache,
&highest_cached_cluster);
if (status == EC3_SUCCESS && highest_cached_cluster >= nr_clusters) {
nr_clusters = highest_cached_cluster + 1;
}
*out_nr_clusters = nr_clusters;
return EC3_SUCCESS;
}