move cluster table management from ec3_reader/writer to ec3_pipeline

This commit is contained in:
2025-02-15 12:36:37 +00:00
parent 6ffbb791fe
commit de99b88097
17 changed files with 491 additions and 127 deletions

View File

@@ -5,6 +5,7 @@ static enum ec3_status encrypt(
const void *src,
size_t len,
void *dest,
size_t dest_max,
size_t *nr_written)
{
return EC3_SUCCESS;
@@ -13,7 +14,9 @@ static enum ec3_status encrypt(
static enum ec3_status decrypt(
struct ec3_pipeline_stage *stage,
const void *src,
size_t len,
void *dest,
size_t dest_max,
size_t *nr_read)
{
return EC3_SUCCESS;
@@ -22,6 +25,6 @@ static enum ec3_status decrypt(
const struct ec3_pipeline_stage_type pipeline_aes256 = {
.t_id = EC3_PIPELINE_AES256,
.t_flags = EC3_PIPELINE_F_NONE,
.t_data_in = decrypt,
.t_data_out = encrypt,
.t_cluster_in = decrypt,
.t_cluster_out = encrypt,
};

View File

@@ -537,6 +537,10 @@ int b_tree_get(struct b_tree *tree, b_tree_node_entry *to_get)
}
}
if (next_id == B_TREE_INVALID_PTR) {
break;
}
err = node_read(tree, next_id, next);
if (err != 0) {
return err;

View File

@@ -118,7 +118,7 @@ struct ec3_tag_table_entry {
b_i32 tag_checksum;
b_i32 tag_reserved1;
b_i64 tag_ident;
b_i64 tag_reserved2;
b_i64 tag_length;
};
/* Extents serve two purposes:
@@ -206,7 +206,6 @@ struct ec3_vnode_group {
b_i32 g_child_offsets[EC3_VNODES_PER_GROUP_4K + 1];
} g_4k;
#if 0
struct {
struct ec3_vnode g_vnodes[EC3_VNODES_PER_GROUP_8K];
b_i32 g_child_offsets[EC3_VNODES_PER_GROUP_8K + 1];
@@ -226,7 +225,6 @@ struct ec3_vnode_group {
struct ec3_vnode g_vnodes[EC3_VNODES_PER_GROUP_64K];
b_i32 g_child_offsets[EC3_VNODES_PER_GROUP_64K + 1];
} g_64k;
#endif
};
};
@@ -247,7 +245,6 @@ struct ec3_vnode_chunk_group {
b_i32 g_child_offsets[EC3_VNCH_PER_GROUP_4K + 1];
} g_4k;
#if 0
struct {
struct ec3_vnode_chunk g_links[EC3_VNCH_PER_GROUP_8K];
b_i32 g_child_offsets[EC3_VNCH_PER_GROUP_8K + 1];
@@ -267,7 +264,6 @@ struct ec3_vnode_chunk_group {
struct ec3_vnode_chunk g_links[EC3_VNCH_PER_GROUP_64K];
b_i32 g_child_offsets[EC3_VNCH_PER_GROUP_64K + 1];
} g_64k;
#endif
};
};

View File

@@ -18,7 +18,8 @@ static int node_init(struct ec3_cluster_group *n)
static int tree_get_node(struct b_tree *p, unsigned long id, b_tree_node *n)
{
struct cluster_table *table = (struct cluster_table *)p;
size_t offset = (id * sizeof(struct ec3_cluster_group));
size_t offset
= table->t_offset + (id * sizeof(struct ec3_cluster_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r
= fread(n,
@@ -34,7 +35,8 @@ static int tree_put_node(
const b_tree_node *n)
{
struct cluster_table *table = (struct cluster_table *)p;
size_t offset = (id * sizeof(struct ec3_cluster_group));
size_t offset
= table->t_offset + (id * sizeof(struct ec3_cluster_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r = fwrite(
n,
@@ -147,11 +149,15 @@ static const struct b_tree_ops cluster_table_ops = {
.entry_compare = entry_compare,
};
void cluster_table_init(struct cluster_table *table, FILE *storage)
void cluster_table_init(
struct cluster_table *table,
FILE *storage,
size_t offset)
{
memset(table, 0x0, sizeof *table);
table->t_storage = storage;
table->t_offset = offset;
b_tree_init(
&table->t_base,

View File

@@ -9,6 +9,7 @@ struct cluster_table {
struct b_tree t_base;
size_t t_nr_groups;
FILE *t_storage;
size_t t_offset;
};
struct cluster {
@@ -19,7 +20,10 @@ struct cluster {
unsigned int c_flags;
};
extern void cluster_table_init(struct cluster_table *table, FILE *storage);
extern void cluster_table_init(
struct cluster_table *table,
FILE *storage,
size_t offset);
extern void cluster_table_finish(struct cluster_table *table);
extern int cluster_table_get(

View File

@@ -5,6 +5,7 @@ enum command_id {
CMD_ROOT = 0,
CMD_CREATE,
CMD_WRAP,
CMD_UNWRAP,
CMD_CAPTURE,
CMD_SHELL,
CMD_EXTRACT,

View File

@@ -1,39 +0,0 @@
#include "pipeline.h"
#include <stdio.h>
static enum ec3_status write(
struct ec3_pipeline_stage *stage,
const void *src,
size_t len,
void *dest,
size_t *nr_written)
{
FILE *fp = stage->s_arg;
size_t r = fwrite(src, 1, len, fp);
*nr_written = r;
if (r < len) {
return EC3_ERR_IO_FAILURE;
}
return EC3_SUCCESS;
}
static enum ec3_status read(
struct ec3_pipeline_stage *stage,
const void *src,
void *dest,
size_t *nr_read)
{
FILE *fp = stage->s_arg;
return EC3_SUCCESS;
}
const struct ec3_pipeline_stage_type pipeline_file = {
.t_id = EC3_PIPELINE_FILE,
.t_flags = EC3_PIPELINE_F_NONE,
.t_data_in = read,
.t_data_out = write,
};

View File

@@ -1,4 +1,5 @@
#include "pipeline.h"
#include "bin.h"
#include <stdlib.h>
@@ -6,12 +7,10 @@
extern const struct ec3_pipeline_stage_type pipeline_zstd;
extern const struct ec3_pipeline_stage_type pipeline_aes256;
extern const struct ec3_pipeline_stage_type pipeline_file;
static const struct ec3_pipeline_stage_type *stage_types[] = {
[EC3_PIPELINE_ZSTD] = &pipeline_zstd,
[EC3_PIPELINE_AES256] = &pipeline_aes256,
[EC3_PIPELINE_FILE] = &pipeline_file,
};
static const size_t nr_stage_types = sizeof stage_types / sizeof stage_types[0];
@@ -58,6 +57,10 @@ extern enum ec3_status ec3_pipeline_create(
struct ec3_pipeline_stage_args stages[],
size_t nr_stages,
size_t cluster_size,
FILE *target,
size_t target_base_offset,
FILE *cluster_table,
size_t cluster_table_base_offset,
struct ec3_pipeline **out)
{
enum ec3_status status = EC3_SUCCESS;
@@ -100,6 +103,15 @@ extern enum ec3_status ec3_pipeline_create(
b_queue_push_back(&pipeline->p_stages, &stage->s_entry);
}
cluster_table_init(
&pipeline->p_cluster_table,
cluster_table,
cluster_table_base_offset);
pipeline->p_target = target;
pipeline->p_target_base_offset = target_base_offset;
pipeline->p_cluster_size = cluster_size;
*out = pipeline;
return status;
}
@@ -108,11 +120,11 @@ void ec3_pipeline_destroy(struct ec3_pipeline *p)
{
}
enum ec3_status ec3_pipeline_data_out(
enum ec3_status ec3_pipeline_write_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t len,
size_t *nr_written)
size_t *bytes_written)
{
b_queue_entry *cur = b_queue_first(&pipeline->p_stages);
enum ec3_status status = EC3_SUCCESS;
@@ -121,6 +133,8 @@ enum ec3_status ec3_pipeline_data_out(
size_t stage_in_size = len;
size_t stage_out_size = 0;
size_t dest_max = pipeline->p_cluster_size;
while (cur) {
struct ec3_pipeline_stage *stage
= b_unbox(struct ec3_pipeline_stage, cur, s_entry);
@@ -132,11 +146,12 @@ enum ec3_status ec3_pipeline_data_out(
dest = src;
}
status = stage->s_type->t_data_out(
status = stage->s_type->t_cluster_out(
stage,
src,
stage_in_size,
dest,
dest_max,
&stage_out_size);
if (status != EC3_SUCCESS) {
@@ -148,20 +163,112 @@ enum ec3_status ec3_pipeline_data_out(
cur = b_queue_next(cur);
}
if (nr_written) {
*nr_written = stage_out_size;
stage_out_size = fwrite(src, 1, stage_in_size, pipeline->p_target);
if (bytes_written) {
*bytes_written = stage_out_size;
}
struct cluster cluster = {
.c_id = pipeline->p_next_cluster_id++,
.c_base = pipeline->p_data_offset,
.c_len = stage_out_size,
.c_flags = pipeline->p_cluster_flags,
.c_checksum = 0,
};
pipeline->p_data_offset += stage_out_size;
cluster_table_put(&pipeline->p_cluster_table, &cluster);
return EC3_SUCCESS;
}
enum ec3_status ec3_pipeline_data_in(
enum ec3_status ec3_pipeline_seek(struct ec3_pipeline *pipeline, size_t pos)
{
fseek(pipeline->p_target,
pipeline->p_target_base_offset + pos,
SEEK_SET);
return EC3_SUCCESS;
}
enum ec3_status ec3_pipeline_read_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t max,
size_t cluster_id,
size_t *nr_read)
{
return EC3_ERR_NOT_SUPPORTED;
if (!pipeline->p_read_buf) {
pipeline->p_read_buf = malloc(pipeline->p_cluster_size);
if (!pipeline->p_read_buf) {
return EC3_ERR_NO_MEMORY;
}
}
struct cluster cluster = {0};
enum ec3_status status = cluster_table_get(
&pipeline->p_cluster_table,
cluster_id,
&cluster);
if (status != EC3_SUCCESS) {
return status;
}
b_queue_entry *cur = b_queue_first(&pipeline->p_stages);
void *src = pipeline->p_read_buf;
fseek(pipeline->p_target,
pipeline->p_target_base_offset + cluster.c_base,
SEEK_SET);
size_t stage_in_size
= fread(pipeline->p_read_buf,
1,
cluster.c_len,
pipeline->p_target);
size_t stage_out_size = 0;
if (stage_in_size != cluster.c_len) {
return EC3_ERR_END_OF_FILE;
}
while (cur) {
struct ec3_pipeline_stage *stage
= b_unbox(struct ec3_pipeline_stage, cur, s_entry);
void *dest;
if (stage->s_type->t_flags & EC3_PIPELINE_F_BUFFERED) {
dest = stage->s_buf;
} else {
dest = src;
}
status = stage->s_type->t_cluster_in(
stage,
src,
stage_in_size,
dest,
pipeline->p_cluster_size,
&stage_out_size);
if (status != EC3_SUCCESS) {
return status;
}
src = dest;
stage_in_size = stage_out_size;
cur = b_queue_next(cur);
}
memcpy(p, src, stage_in_size);
if (nr_read) {
*nr_read = stage_in_size;
}
return EC3_SUCCESS;
}
size_t ec3_get_cluster_size(unsigned int v)
@@ -189,4 +296,4 @@ enum ec3_pipeline_stage_type_id ec3_get_pipeline_stage_for_encryption_func(
default:
return EC3_PIPELINE_NONE;
}
}
}

View File

@@ -1,16 +1,17 @@
#ifndef PIPELINE_H_
#define PIPELINE_H_
#include "cluster.h"
#include "status.h"
#include <blue/core/queue.h>
#include <stddef.h>
#include <stdio.h>
enum ec3_pipeline_stage_type_id {
EC3_PIPELINE_NONE = 0,
EC3_PIPELINE_AES256,
EC3_PIPELINE_ZSTD,
EC3_PIPELINE_FILE,
};
enum ec3_pipeline_stage_type_flags {
@@ -24,16 +25,19 @@ struct ec3_pipeline_stage_type {
enum ec3_pipeline_stage_type_id t_id;
enum ec3_pipeline_stage_type_flags t_flags;
enum ec3_status (*t_data_in)(
struct ec3_pipeline_stage *,
const void *,
void *,
size_t *);
enum ec3_status (*t_data_out)(
enum ec3_status (*t_cluster_in)(
struct ec3_pipeline_stage *,
const void *,
size_t,
void *,
size_t,
size_t *);
enum ec3_status (*t_cluster_out)(
struct ec3_pipeline_stage *,
const void *,
size_t,
void *,
size_t,
size_t *);
};
@@ -50,6 +54,17 @@ struct ec3_pipeline_stage_args {
};
struct ec3_pipeline {
FILE *p_target;
size_t p_target_base_offset;
size_t p_cluster_size;
unsigned int p_cluster_flags;
struct cluster_table p_cluster_table;
size_t p_next_cluster_id;
size_t p_data_offset;
void *p_read_buf;
b_queue p_stages;
};
@@ -57,22 +72,28 @@ extern enum ec3_status ec3_pipeline_create(
struct ec3_pipeline_stage_args stages[],
size_t nr_stages,
size_t cluster_size,
FILE *target,
size_t target_base_offset,
FILE *cluster_table,
size_t cluster_table_base_offset,
struct ec3_pipeline **out);
extern void ec3_pipeline_destroy(struct ec3_pipeline *p);
extern enum ec3_status ec3_pipeline_data_out(
extern enum ec3_status ec3_pipeline_write_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t len,
size_t *nr_written);
extern enum ec3_status ec3_pipeline_data_in(
size_t *bytes_written);
extern enum ec3_status ec3_pipeline_read_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t max,
size_t cluster_id,
size_t *nr_read);
extern size_t ec3_get_cluster_size(unsigned int v);
extern enum ec3_pipeline_stage_id ec3_get_pipeline_stage_for_encryption_func(unsigned int func);
extern enum ec3_pipeline_stage_id ec3_get_pipeline_stage_for_compression_func(unsigned int func);
extern enum ec3_pipeline_stage_type_id
ec3_get_pipeline_stage_for_encryption_func(unsigned int func);
extern enum ec3_pipeline_stage_type_id
ec3_get_pipeline_stage_for_compression_func(unsigned int func);
#endif

View File

@@ -1,3 +1,4 @@
#include "bin.h"
#include "commands.h"
#include "misc.h"
#include "read.h"
@@ -23,6 +24,30 @@ static void tag_type_string(unsigned long in, char out[5])
out[4] = 0;
}
static const char *encryption_function_name(unsigned int v)
{
switch (v) {
case EC3_ENCRYPTION_NONE:
return "NONE";
case EC3_ENCRYPTION_AES256:
return "AES256";
default:
return "UNKNOWN";
}
}
static const char *compression_function_name(unsigned int v)
{
switch (v) {
case EC3_COMPRESSION_NONE:
return "NONE";
case EC3_COMPRESSION_ZSTD:
return "ZSTD";
default:
return "UNKNOWN";
}
}
static int query(
const b_command *self,
const b_arglist *opt,
@@ -65,6 +90,12 @@ static int query(
printf(" %-20s: %04x\n", "format version", c_info->c_version);
printf(" %-20s: %s\n", "identifier", container_id);
printf(" %-20s: %u\n", "cluster size", c_info->c_cluster_size);
printf(" %-20s: %s\n",
"compression",
compression_function_name(c_info->c_compression_function));
printf(" %-20s: %s\n",
"encryption",
encryption_function_name(c_info->c_encryption_function));
printf(" %-20s: 0x%zx\n",
"tag table offset",
c_info->c_tag_table_offset);
@@ -95,6 +126,9 @@ static int query(
printf(" (%lx/%s) %s\n", tags[i].tag_type, tag_type, tag_id);
printf(" %-8s: %08lx\n", "flags", tags[i].tag_flags);
printf(" %-8s: %08lx\n", "checksum", tags[i].tag_checksum);
printf(" %-8s: %llu bytes\n",
"size",
tags[i].tag_total_length);
}
printf("\nextents:\n");

View File

@@ -19,6 +19,13 @@ struct ec3_reader {
struct ec3_pipeline *r_pipeline;
};
struct ec3_tag_reader {
struct ec3_reader *tag_parent;
const struct ec3_tag_info *tag_info;
size_t tag_current_cluster;
unsigned char *tag_buf;
};
static void decode_header(const struct ec3_header *in, struct ec3_reader *out)
{
out->c_info.c_version = b_i16_btoh(in->h_version);
@@ -32,6 +39,8 @@ static void decode_header(const struct ec3_header *in, struct ec3_reader *out)
out->c_info.c_nr_tags = b_i32_btoh(in->h_tag_count);
out->c_info.c_nr_extents = b_i32_btoh(in->h_tag_count);
out->c_info.c_nr_cluster_groups = b_i32_btoh(in->h_cluster_group_count);
out->c_info.c_encryption_function = b_i16_btoh(in->h_encryption);
out->c_info.c_compression_function = b_i16_btoh(in->h_compression);
out->c_info.c_id = b_i64_btoh(in->h_app_magic);
}
@@ -43,6 +52,7 @@ static void decode_tag(
out->tag_flags = b_i32_btoh(in->tag_flags);
out->tag_checksum = b_i32_btoh(in->tag_checksum);
out->tag_ident = b_i64_btoh(in->tag_ident);
out->tag_total_length = b_i64_btoh(in->tag_length);
}
static void decode_extent(
@@ -127,16 +137,15 @@ enum ec3_status ec3_reader_create(FILE *inp, struct ec3_reader **out)
unsigned short compression = b_i16_btoh(header.h_compression);
unsigned short encryption = b_i16_btoh(header.h_encryption);
struct ec3_pipeline_stage_args stages[3] = {0};
stages[0].type = EC3_PIPELINE_FILE;
struct ec3_pipeline_stage_args stages[2] = {0};
if (encryption != EC3_ENCRYPTION_NONE) {
stages[1].type = ec3_get_pipeline_stage_for_encryption_func(
stages[0].type = ec3_get_pipeline_stage_for_encryption_func(
encryption);
}
if (compression != EC3_COMPRESSION_NONE) {
stages[2].type = ec3_get_pipeline_stage_for_compression_func(
stages[1].type = ec3_get_pipeline_stage_for_compression_func(
compression);
}
@@ -144,6 +153,10 @@ enum ec3_status ec3_reader_create(FILE *inp, struct ec3_reader **out)
stages,
sizeof stages / sizeof stages[0],
reader->c_info.c_cluster_size,
inp,
sizeof header,
inp,
reader->c_info.c_cluster_table_offset,
&reader->r_pipeline);
if (status != EC3_SUCCESS) {
@@ -215,12 +228,207 @@ const struct ec3_extent_info *ec3_reader_get_extents(
return reader->c_extents;
}
const struct ec3_tag_info *ec3_reader_get_tag_info(
const struct ec3_reader *reader,
unsigned long long ident)
{
for (size_t i = 0; i < reader->c_info.c_nr_tags; i++) {
if (reader->c_tags[i].tag_ident == ident) {
return &reader->c_tags[i];
}
}
return NULL;
}
enum ec3_status ec3_reader_open_tag(
struct ec3_reader *reader,
uint64_t tag_ident,
struct ec3_tag_reader **out)
{
return EC3_ERR_NOT_SUPPORTED;
const struct ec3_tag_info *tag_info = NULL;
for (unsigned int i = 0; i < reader->c_info.c_nr_tags; i++) {
if (reader->c_tags[i].tag_ident == tag_ident) {
tag_info = &reader->c_tags[i];
break;
}
}
if (!tag_info) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_reader *tag_reader = malloc(sizeof *tag_reader);
if (!tag_reader) {
return EC3_ERR_NO_MEMORY;
}
memset(tag_reader, 0x0, sizeof *tag_reader);
tag_reader->tag_parent = reader;
tag_reader->tag_info = tag_info;
tag_reader->tag_current_cluster = (size_t)-1;
tag_reader->tag_buf = malloc(reader->c_info.c_cluster_size);
if (!tag_reader->tag_buf) {
free(tag_reader);
return EC3_ERR_NO_MEMORY;
}
*out = tag_reader;
return EC3_SUCCESS;
}
static int logical_cluster_to_physical_cluster(
struct ec3_tag_reader *tag,
size_t cluster_id,
size_t *physical_cluster)
{
const struct ec3_extent_info *extent_buf = tag->tag_parent->c_extents;
size_t nr_extents = tag->tag_parent->c_info.c_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *extent = &extent_buf[i];
if (extent->ex_owner != tag->tag_info->tag_ident) {
continue;
}
if (cluster_id < extent->ex_logical_cluster) {
continue;
}
if (cluster_id
>= extent->ex_logical_cluster + extent->ex_count) {
continue;
}
*physical_cluster = (cluster_id - extent->ex_logical_cluster)
+ extent->ex_physical_cluster;
return 0;
}
return -1;
}
static enum ec3_status read_cluster(
struct ec3_tag_reader *tag,
size_t cluster_id,
size_t *nr_read)
{
size_t physical_cluster;
if (logical_cluster_to_physical_cluster(
tag,
cluster_id,
&physical_cluster)
!= 0) {
/* end-of-file */
*nr_read = 0;
return EC3_SUCCESS;
}
return ec3_pipeline_read_cluster(
tag->tag_parent->r_pipeline,
tag->tag_buf,
physical_cluster,
nr_read);
}
enum ec3_status ec3_tag_reader_read(
struct ec3_tag_reader *tag,
size_t first_cluster,
size_t nr_clusters,
void *buf,
size_t *bytes_read)
{
unsigned char *p = buf;
enum ec3_status status = EC3_SUCCESS;
size_t last_cluster = first_cluster + nr_clusters - 1;
size_t nr_read_from_cluster = 0;
size_t nr_read = 0;
for (size_t i = first_cluster; i <= last_cluster; i++) {
status = read_cluster(tag, i, &nr_read_from_cluster);
if (status != EC3_SUCCESS) {
return status;
}
memcpy(p + nr_read, tag->tag_buf, nr_read_from_cluster);
nr_read += nr_read_from_cluster;
}
*bytes_read = nr_read;
return EC3_SUCCESS;
#if 0
enum ec3_status status = EC3_SUCCESS;
unsigned char *p = buf;
size_t p_offset = 0;
size_t cluster_size = tag->tag_parent->c_info.c_cluster_size;
const unsigned char *cluster_data = tag->tag_buf;
size_t nr_read_from_cluster = 0;
size_t cluster = 0;
size_t start = offset;
size_t end = offset + count;
size_t first_cluster = start / cluster_size;
if ((start % cluster_size) > 0) {
cluster = start / cluster_size;
status = read_cluster(tag, cluster, &nr_read_from_cluster);
if (status != EC3_SUCCESS) {
return status;
}
size_t cluster_offset = start % cluster_size;
size_t copy_sz = nr_read_from_cluster - cluster_offset;
memcpy(p + p_offset, cluster_data + cluster_offset, copy_sz);
p_offset += copy_sz;
start += copy_sz;
}
if (p_offset == count) {
*nr_read = p_offset;
return EC3_SUCCESS;
}
for (size_t i = first_cluster; i <= last_cluster; i++) {
status = read_cluster(tag, cluster, &nr_read_from_cluster);
if (status != EC3_SUCCESS) {
return status;
}
memcpy(p + p_offset, cluster_data, nr_read_from_cluster);
p_offset += nr_read_from_cluster;
}
if ((end % cluster_size) > 0) {
cluster = end / cluster_size;
status = read_cluster(tag, cluster, &nr_read_from_cluster);
if (status != EC3_SUCCESS) {
return status;
}
size_t cluster_offset = 0;
size_t copy_sz = cluster_offset;
if (copy_sz > nr_read_from_cluster) {
copy_sz = nr_read_from_cluster;
}
memcpy(p + p_offset, cluster_data + cluster_offset, copy_sz);
p_offset += copy_sz;
start += copy_sz;
}
*nr_read = p_offset;
#endif
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_reader_close(struct ec3_tag_reader *tag)

View File

@@ -19,6 +19,9 @@ struct ec3_container_info {
unsigned int c_nr_extents;
unsigned int c_nr_cluster_groups;
unsigned int c_encryption_function;
unsigned int c_compression_function;
uint64_t c_id;
};
@@ -27,6 +30,7 @@ struct ec3_tag_info {
unsigned long tag_flags;
unsigned long tag_checksum;
unsigned long long tag_ident;
unsigned long long tag_total_length;
};
struct ec3_extent_info {
@@ -46,11 +50,21 @@ extern const struct ec3_tag_info *ec3_reader_get_tags(
extern const struct ec3_extent_info *ec3_reader_get_extents(
const struct ec3_reader *reader);
extern const struct ec3_tag_info *ec3_reader_get_tag_info(
const struct ec3_reader *reader,
unsigned long long ident);
extern enum ec3_status ec3_reader_open_tag(
struct ec3_reader *reader,
uint64_t tag_ident,
struct ec3_tag_reader **out);
extern enum ec3_status ec3_tag_reader_read(
struct ec3_tag_reader *tag,
size_t offset,
size_t count,
void *buf,
size_t *nr_read);
extern enum ec3_status ec3_tag_reader_close(struct ec3_tag_reader *tag);
#endif

View File

@@ -11,6 +11,7 @@ enum ec3_status {
EC3_ERR_INVALID_VALUE,
EC3_ERR_NAME_EXISTS,
EC3_ERR_IO_FAILURE,
EC3_ERR_END_OF_FILE,
};
#endif

View File

@@ -42,7 +42,7 @@ static enum ec3_status add_file(
struct ec3_tag_writer *tag = NULL;
enum ec3_status status
= ec3_writer_create_tag(writer, type, id, 0, &tag);
= ec3_writer_create_tag(writer, 0, type, id, 0, &tag);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 tag writer");

View File

@@ -25,7 +25,7 @@ struct ec3_writer {
size_t w_nr_tags;
FILE *w_data;
struct cluster_table w_cluster_table;
FILE *w_cluster_table;
FILE *w_extent_table;
FILE *w_tag_table;
@@ -33,11 +33,13 @@ struct ec3_writer {
};
struct ec3_tag_writer {
size_t w_index;
struct ec3_writer *w_parent;
unsigned long w_type;
uint64_t w_ident;
unsigned long w_flags;
unsigned char *w_buf;
size_t w_total_bytes;
size_t w_ptr;
size_t w_nr_clusters;
b_queue_entry w_entry;
@@ -57,14 +59,18 @@ enum ec3_status ec3_writer_create(
size_t cluster_size = ec3_get_cluster_size(param->p_cluster_size);
FILE *cluster_table = tmpfile();
writer->w_data = param->p_outp;
cluster_table_init(&writer->w_cluster_table, cluster_table);
writer->w_extent_table = tmpfile();
writer->w_tag_table = tmpfile();
writer->w_cluster_table = tmpfile();
struct ec3_pipeline_stage_args stages[3] = {0};
struct ec3_header header = {0};
size_t written = fwrite(&header, sizeof header, 1, writer->w_data);
if (written != 1) {
return EC3_ERR_IO_FAILURE;
}
struct ec3_pipeline_stage_args stages[2] = {0};
if (param->p_compression_func != EC3_COMPRESSION_NONE) {
stages[0].type = ec3_get_pipeline_stage_for_compression_func(
@@ -76,14 +82,15 @@ enum ec3_status ec3_writer_create(
param->p_encryption_func);
}
stages[2].type = EC3_PIPELINE_FILE;
stages[2].arg = writer->w_data;
struct ec3_pipeline *pipeline = NULL;
enum ec3_status status = ec3_pipeline_create(
stages,
sizeof stages / sizeof stages[0],
cluster_size,
writer->w_data,
0,
writer->w_cluster_table,
0,
&pipeline);
if (status != EC3_SUCCESS) {
@@ -92,12 +99,6 @@ enum ec3_status ec3_writer_create(
writer->w_pipeline = pipeline;
struct ec3_header header = {0};
size_t written = fwrite(&header, sizeof header, 1, writer->w_data);
if (written != 1) {
return EC3_ERR_IO_FAILURE;
}
*out = writer;
return EC3_SUCCESS;
}
@@ -167,7 +168,7 @@ void ec3_writer_finish(struct ec3_writer *w)
}
size_t cluster_table_offset = ftell(w->w_data);
status = copy_file(w->w_cluster_table.t_storage, w->w_data);
status = copy_file(w->w_cluster_table, w->w_data);
size_t extent_table_offset = ftell(w->w_data);
status = copy_file(w->w_extent_table, w->w_data);
@@ -187,8 +188,10 @@ void ec3_writer_finish(struct ec3_writer *w)
header.h_tag_count = b_i32_htob(w->w_nr_tags);
header.h_extent_count = b_i32_htob(w->w_nr_extents);
header.h_app_magic = b_i64_htob(w->w_param.p_ident);
header.h_encryption = b_i16_htob(w->w_param.p_encryption_func);
header.h_compression = b_i16_htob(w->w_param.p_compression_func);
header.h_cluster_group_count
= b_i32_htob(w->w_cluster_table.t_nr_groups);
= b_i32_htob(w->w_pipeline->p_cluster_table.t_nr_groups);
fwrite(&header, sizeof header, 1, w->w_data);
}
@@ -215,9 +218,10 @@ static bool is_tag_ident_free(struct ec3_writer *w, uint64_t ident)
enum ec3_status ec3_writer_create_tag(
struct ec3_writer *w,
enum ec3_tag_writer_flags writer_flags,
uint32_t tag_type,
uint64_t tag_ident,
unsigned int flags,
unsigned int tag_flags,
struct ec3_tag_writer **out_writer)
{
struct ec3_tag_writer *tag = malloc(sizeof *tag);
@@ -230,7 +234,7 @@ enum ec3_status ec3_writer_create_tag(
size_t cluster_size = ec3_get_cluster_size(w->w_param.p_cluster_size);
tag->w_parent = w;
tag->w_flags = flags;
tag->w_flags = tag_flags;
tag->w_type = tag_type;
tag->w_ident = tag_ident;
tag->w_buf = malloc(cluster_size);
@@ -240,19 +244,7 @@ enum ec3_status ec3_writer_create_tag(
return EC3_ERR_NO_MEMORY;
}
struct ec3_tag_table_entry entry = {0};
entry.tag_type = b_i32_htob(tag_type);
entry.tag_ident = b_i64_htob(tag_ident);
entry.tag_flags = b_i32_htob(flags);
size_t written = fwrite(&entry, sizeof entry, 1, w->w_tag_table);
if (written != 1) {
free(tag->w_buf);
free(tag);
return EC3_ERR_IO_FAILURE;
}
w->w_nr_tags++;
tag->w_index = w->w_nr_tags++;
*out_writer = tag;
return EC3_SUCCESS;
}
@@ -267,7 +259,7 @@ static enum ec3_status flush_tag_buffer(struct ec3_tag_writer *w)
enum ec3_status status = EC3_SUCCESS;
size_t nr_written = 0;
status = ec3_pipeline_data_out(
status = ec3_pipeline_write_cluster(
container->w_pipeline,
buf,
w->w_ptr,
@@ -277,17 +269,6 @@ static enum ec3_status flush_tag_buffer(struct ec3_tag_writer *w)
return status;
}
struct cluster cluster = {
.c_id = container->w_next_cluster_id++,
.c_base = container->w_data_offset,
.c_len = nr_written,
.c_flags = w->w_flags,
.c_checksum = 0,
};
cluster_table_put(&container->w_cluster_table, &cluster);
container->w_data_offset += nr_written;
w->w_ptr = 0;
w->w_nr_clusters++;
@@ -335,6 +316,7 @@ enum ec3_status ec3_tag_writer_write(
remaining -= to_write;
w->w_ptr += to_write;
w->w_total_bytes += to_write;
if (remaining == 0) {
status = flush_tag_buffer(w);
}
@@ -361,5 +343,18 @@ enum ec3_status ec3_tag_writer_finish(struct ec3_tag_writer *w)
return status;
}
struct ec3_tag_table_entry entry = {0};
entry.tag_type = b_i32_htob(w->w_type);
entry.tag_ident = b_i64_htob(w->w_ident);
entry.tag_flags = b_i32_htob(w->w_flags);
entry.tag_length = b_i64_htob(w->w_total_bytes);
size_t written
= fwrite(&entry, sizeof entry, 1, w->w_parent->w_tag_table);
if (written != 1) {
return EC3_ERR_IO_FAILURE;
}
return status;
}

View File

@@ -19,6 +19,10 @@ struct ec3_parameters {
size_t p_encryption_key_size;
};
enum ec3_tag_writer_flags {
EC3_TAG_WRITER_BUFFERED = 0x01u,
};
struct ec3_writer;
struct ec3_tag_writer;
@@ -30,9 +34,10 @@ extern void ec3_writer_write_image(struct ec3_writer *w, FILE *fp);
extern enum ec3_status ec3_writer_create_tag(
struct ec3_writer *w,
enum ec3_tag_writer_flags writer_flags,
uint32_t tag_type,
uint64_t tag_ident,
unsigned int flags,
unsigned int tag_flags,
struct ec3_tag_writer **out_writer);
extern enum ec3_status ec3_tag_writer_write(

View File

@@ -7,24 +7,28 @@ static enum ec3_status compress(
const void *src,
size_t len,
void *dest,
size_t dest_max,
size_t *nr_written)
{
*nr_written = ZSTD_compress(dest, len, src, len, 10);
*nr_written = ZSTD_compress(dest, dest_max, src, len, 10);
return EC3_SUCCESS;
}
static enum ec3_status decompress(
struct ec3_pipeline_stage *stage,
const void *src,
size_t len,
void *dest,
size_t dest_max,
size_t *nr_read)
{
*nr_read = ZSTD_decompress(dest, dest_max, src, len);
return EC3_SUCCESS;
}
const struct ec3_pipeline_stage_type pipeline_zstd = {
.t_id = EC3_PIPELINE_ZSTD,
.t_flags = EC3_PIPELINE_F_BUFFERED,
.t_data_in = decompress,
.t_data_out = compress,
.t_cluster_in = decompress,
.t_cluster_out = compress,
};