replace ec3_reader and ec3_writer with a unified image and tag io interface

This commit is contained in:
2025-02-23 20:52:59 +00:00
parent 2e4ee5c1b6
commit 9aeae388a4
32 changed files with 3458 additions and 264 deletions

View File

@@ -23,6 +23,7 @@
#define EC3_TAG_VOLU 0x564F4C55
#define EC3_TAG_CTAB 0x43544142
#define EC3_TAG_CDAT 0x43444154
#define EC3_TAG_XATR 0x58415452
#define EC3_TAG_STAB 0x53544142
#define EC3_TAG_MFST 0x4D465354
@@ -159,7 +160,6 @@ struct ec3_chunk_group {
b_i32 g_child_offsets[EC3_CHUNKS_PER_GROUP_4K + 1];
} g_4k;
#if 0
struct {
struct ec3_chunk g_chunks[EC3_CHUNKS_PER_GROUP_8K];
b_i32 g_child_offsets[EC3_CHUNKS_PER_GROUP_8K + 1];
@@ -179,7 +179,6 @@ struct ec3_chunk_group {
struct ec3_chunk g_chunks[EC3_CHUNKS_PER_GROUP_64K];
b_i32 g_child_offsets[EC3_CHUNKS_PER_GROUP_64K + 1];
} g_64k;
#endif
};
};

View File

@@ -1,6 +1,18 @@
#if 0
#include "bin.h"
#include "chunk-table.h"
#include "commands.h"
#include "misc.h"
#include "pipeline.h"
#include "status.h"
#include "string-table.h"
#include "write.h"
#include <blue/cmd.h>
#include <blue/io/directory.h>
#include <blue/term/print.h>
#include <errno.h>
#include <stdio.h>
#define BUFFER_SIZE 65536
@@ -8,7 +20,11 @@ enum {
OPT_OUTPATH,
OPT_OUTPATH_PATH,
ARG_DIRECTORY,
OPT_IDENT,
OPT_IDENT_VAL,
OPT_DIRECTORY,
OPT_DIRECTORY_PATH,
OPT_TAGGED_DIRECTORY,
OPT_TAGGED_DIRECTORY_TAG,
@@ -17,11 +33,206 @@ enum {
OPT_VERBOSE,
};
static enum ec3_status capture_directory(
struct ec3_writer *writer,
struct chunk_table *chunk_tab,
uint64_t id,
const char *cpath)
{
b_path *path = b_path_create_from_cstr(cpath);
b_directory *dir;
b_status status = b_directory_open(NULL, path, &dir);
b_path_release(path);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_writer *volu;
enum ec3_status s2 = ec3_writer_create_tag(
writer,
EC3_TAG_WRITER_BUFFERED,
EC3_TAG_VOLU,
id,
0,
&volu);
if (s2 != EC3_SUCCESS) {
b_directory_release(dir);
return s2;
}
struct string_table *stab = ec3_writer_get_strings(writer);
b_directory_iterator it;
b_directory_iterator_begin(dir, &it, B_DIRECTORY_ITERATE_PARENT_FIRST);
while (b_directory_iterator_is_valid(&it)) {
printf("%s\n", b_path_ptr(it.filepath));
size_t key = string_table_get(stab, it.filename);
b_directory_iterator_next(&it);
}
ec3_tag_writer_finish(volu);
b_directory_release(dir);
return EC3_SUCCESS;
}
static int capture(
const b_command *self,
const b_arglist *opt,
const b_array *args)
{
const char *out_path = NULL;
b_arglist_get_string(opt, OPT_OUTPATH, OPT_OUTPATH_PATH, 0, &out_path);
FILE *outp = fopen(out_path, "wb");
if (!outp) {
b_err("cannot open '%s'", out_path);
b_i("reason: %s", strerror(errno));
return -1;
}
enum ec3_status status = EC3_SUCCESS;
uint64_t ident = 0;
const char *ident_str;
b_arglist_get_string(opt, OPT_IDENT, OPT_IDENT_VAL, 0, &ident_str);
if (ident_str) {
status = ec3_identifier_from_string(ident_str, &ident);
}
if (status != EC3_SUCCESS) {
b_err("'%s' is not a valid container identifier", ident_str);
return -1;
}
struct ec3_writer *writer = NULL;
struct ec3_parameters param = {
.p_outp = outp,
.p_cluster_size = EC3_CLUSTER_16K,
.p_compression_func = EC3_COMPRESSION_ZSTD,
.p_ident = ident,
};
status = ec3_writer_create(&param, &writer);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 writer");
return -1;
}
uint64_t ctab_id, cdat_id;
ec3_identifier_from_string("_CHKTAB0", &ctab_id);
ec3_identifier_from_string("_CHKDAT0", &cdat_id);
struct ec3_tag_writer *ctab, *cdat;
status = ec3_writer_create_tag(
writer,
EC3_TAG_WRITER_BUFFERED,
EC3_TAG_CTAB,
ctab_id,
0,
&ctab);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 writer");
return -1;
}
status = ec3_writer_create_tag(
writer,
EC3_TAG_WRITER_BUFFERED,
EC3_TAG_CDAT,
cdat_id,
0,
&cdat);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 writer");
return -1;
}
struct chunk_table chunk_tab;
#if 0
chunk_table_init_write(
ctab,
cdat,
ec3_get_cluster_size(param.p_cluster_size),
&chunk_tab);
#endif
uint64_t next_auto_id = 0;
b_arglist_iterator it = {0};
b_arglist_foreach_filtered(&it, opt, OPT_DIRECTORY, OPT_DIRECTORY_PATH)
{
printf("%s\n", it.value->val_str);
status = capture_directory(
writer,
&chunk_tab,
next_auto_id,
it.value->val_str);
next_auto_id++;
if (status != EC3_SUCCESS) {
b_err("an error occurred while writing to the "
"container");
return -1;
}
}
for (size_t i = 0;; i++) {
b_arglist_option *option = NULL;
b_status err = b_arglist_get_option(
opt,
OPT_TAGGED_DIRECTORY,
i,
&option);
if (!option) {
break;
}
b_arglist_value *tag = NULL, *path = NULL;
err = b_arglist_option_get_value(
option,
OPT_TAGGED_DIRECTORY_TAG,
0,
&tag);
err = b_arglist_option_get_value(
option,
OPT_TAGGED_DIRECTORY_PATH,
0,
&path);
printf("%s:%s\n", tag->val_str, path->val_str);
uint64_t id = 0;
status = ec3_identifier_from_string(tag->val_str, &id);
if (status != EC3_SUCCESS) {
b_err("'%s' is not a valid tag identifier", id);
return -1;
}
status = capture_directory(
writer,
&chunk_tab,
id,
path->val_str);
if (status != EC3_SUCCESS) {
b_err("an error occurred while writing to the "
"container");
return -1;
}
}
ec3_tag_writer_finish(ctab);
ec3_tag_writer_finish(cdat);
ec3_writer_finish(writer);
fclose(outp);
return 0;
}
@@ -39,6 +250,21 @@ B_COMMAND(CMD_CAPTURE, CMD_ROOT)
B_COMMAND_HELP_OPTION();
B_COMMAND_OPTION(OPT_IDENT)
{
B_OPTION_SHORT_NAME('I');
B_OPTION_LONG_NAME("ident");
B_OPTION_DESC(
"the string or number to use as the container "
"identifier");
B_OPTION_ARG(OPT_IDENT_VAL)
{
B_ARG_NAME("value");
B_ARG_NR_VALUES(1);
}
}
B_COMMAND_OPTION(OPT_OUTPATH)
{
B_OPTION_SHORT_NAME('o');
@@ -52,16 +278,21 @@ B_COMMAND(CMD_CAPTURE, CMD_ROOT)
}
}
B_COMMAND_ARG(ARG_DIRECTORY)
B_COMMAND_OPTION(OPT_DIRECTORY)
{
B_ARG_NAME("directory");
B_ARG_DESC(
B_OPTION_SHORT_NAME('d');
B_OPTION_LONG_NAME("directory");
B_OPTION_DESC(
"a directory to add to the container. a volume "
"will be created "
"within the container to store the specified "
"directory.");
B_ARG_NR_VALUES(B_ARG_1_OR_MORE_VALUES);
B_OPTION_ARG(OPT_DIRECTORY_PATH)
{
B_ARG_NAME("path");
B_ARG_NR_VALUES(1);
}
}
B_COMMAND_OPTION(OPT_TAGGED_DIRECTORY)
@@ -104,7 +335,7 @@ B_COMMAND(CMD_CAPTURE, CMD_ROOT)
B_COMMAND_USAGE()
{
B_COMMAND_USAGE_OPT(OPT_OUTPATH);
B_COMMAND_USAGE_ARG(ARG_DIRECTORY);
B_COMMAND_USAGE_OPT(OPT_DIRECTORY);
}
B_COMMAND_USAGE()
@@ -113,3 +344,4 @@ B_COMMAND(CMD_CAPTURE, CMD_ROOT)
B_COMMAND_USAGE_OPT(OPT_TAGGED_DIRECTORY);
}
}
#endif

280
src/chunk-table.c Normal file
View File

@@ -0,0 +1,280 @@
#include "chunk-table.h"
#include "b-tree.h"
#include "pipeline.h"
#include "read.h"
#include <string.h>
#if 0
struct cache_entry {
b_btree_node e_node;
ec3_chunk_id e_id;
};
static int compare_cache_node(
const struct cache_entry *a,
const struct cache_entry *b)
{
return memcmp(a->e_id, b->e_id, sizeof a->e_id);
}
B_BTREE_DEFINE_INSERT(
struct cache_entry,
e_node,
e_id,
get_cache_entry,
compare_cache_node)
struct cache_entry *cache_entry_get(const b_btree *tree, ec3_chunk_id key)
{
b_btree_node *cur = tree->b_root;
while (cur) {
struct cache_entry *cur_node
= b_unbox(struct cache_entry, cur, e_node);
int cmp = memcmp(key, cur_node->e_id, sizeof cur_node->e_id);
if (cmp > 0) {
cur = b_btree_right(cur);
} else if (cmp < 0) {
cur = b_btree_left(cur);
} else {
return cur_node;
}
}
return NULL;
}
static int node_init(struct chunk_table *tab, struct ec3_chunk_group *n)
{
memset(n, 0x0, sizeof *n);
b_i32 *children;
unsigned int nr_children;
switch (tab->tab_cluster_size) {
case EC3_CLUSTER_4K:
children = n->g_4k.g_child_offsets;
nr_children = EC3_CHUNKS_PER_GROUP_4K;
break;
case EC3_CLUSTER_8K:
children = n->g_8k.g_child_offsets;
nr_children = EC3_CHUNKS_PER_GROUP_8K;
break;
case EC3_CLUSTER_16K:
children = n->g_16k.g_child_offsets;
nr_children = EC3_CHUNKS_PER_GROUP_16K;
break;
case EC3_CLUSTER_32K:
children = n->g_32k.g_child_offsets;
nr_children = EC3_CHUNKS_PER_GROUP_32K;
break;
case EC3_CLUSTER_64K:
children = n->g_64k.g_child_offsets;
nr_children = EC3_CHUNKS_PER_GROUP_64K;
break;
default:
return -1;
}
for (unsigned int i = 0; i < nr_children; i++) {
children[i] = b_i32_htob(EC3_INVALID_OFFSET);
}
return 0;
}
static int tree_get_node(struct b_tree *p, unsigned long id, b_tree_node *n)
{
struct chunk_table *table = (struct chunk_table *)p;
if (table->tab_mode != CHUNK_TABLE_READ) {
return -1;
}
size_t nr_read;
ec3_tag_reader_read(table->tab_read.ctab, id, 1, n, &nr_read);
size_t cluster_size = ec3_get_cluster_size(table->tab_cluster_size);
return nr_read == cluster_size ? 0 : -1;
}
static int tree_put_node(
struct b_tree *p,
unsigned long id,
const b_tree_node *n)
{
struct chunk_table *table = (struct chunk_table *)p;
size_t offset = table->t_offset + (id * sizeof(struct ec3_chunk_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r = fwrite(
n,
sizeof(struct ec3_chunk_group),
1,
table->t_storage);
return r == 1 ? 0 : -1;
}
static long tree_alloc_node(struct b_tree *p)
{
struct chunk_table *table = (struct chunk_table *)p;
size_t pos = ftell(table->t_storage);
fseek(table->t_storage, 0, SEEK_END);
size_t len = ftell(table->t_storage);
struct ec3_chunk_group *n
= (struct ec3_chunk_group *)b_tree_cache_alloc_node(p);
node_init(n);
fwrite(&n, sizeof *n, 1, table->t_storage);
fseek(table->t_storage, pos, SEEK_SET);
len /= sizeof *n;
table->t_nr_groups++;
return (long)len;
}
static unsigned long node_get_nr_entries(b_tree_node *n)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
return b_i16_btoh(node->g_nr_chunks);
}
static void node_set_nr_entries(b_tree_node *n, unsigned long val)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
node->g_nr_chunks = b_i16_htob(val);
}
static b_tree_node_entry *node_get_entry(b_tree_node *n, unsigned long index)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
return (b_tree_node_entry *)&node->g_chunks[index];
}
static void node_set_entry(
b_tree_node *n,
unsigned long index,
const b_tree_node_entry *entry)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
memmove(&node->g_chunks[index], entry, sizeof(struct ec3_chunk));
}
static void node_kill_entry(b_tree_node *n, unsigned long index)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
memset(&node->g_chunks[index], 0x0, sizeof(struct ec3_chunk));
}
static unsigned long node_get_child(b_tree_node *n, unsigned long index)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
b_i32 enc_child = node->g_child_offsets[index];
unsigned long child = b_i32_btoh(enc_child);
return child == EC3_INVALID_OFFSET ? B_TREE_INVALID_PTR : child;
}
static void node_set_child(
b_tree_node *n,
unsigned long index,
unsigned long ptr)
{
struct ec3_chunk_group *node = (struct ec3_chunk_group *)n;
unsigned long child = ptr == B_TREE_INVALID_PTR ? EC3_INVALID_OFFSET
: (uint16_t)ptr;
node->g_child_offsets[index] = b_i32_htob(child);
}
static int entry_compare(
const b_tree_node_entry *e0,
const b_tree_node_entry *e1)
{
struct ec3_chunk *a = (struct ec3_chunk *)e0,
*b = (struct ec3_chunk *)e1;
unsigned long a_id = b_i32_btoh(a->c_id);
unsigned long b_id = b_i32_btoh(b->c_id);
if (a_id < b_id) {
return -1;
}
if (a_id > b_id) {
return 1;
}
return 0;
}
static const struct b_tree_ops cluster_table_ops = {
.tree_get_node = tree_get_node,
.tree_put_node = tree_put_node,
.tree_alloc_node = tree_alloc_node,
.node_get_nr_entries = node_get_nr_entries,
.node_set_nr_entries = node_set_nr_entries,
.node_get_entry = node_get_entry,
.node_set_entry = node_set_entry,
.node_kill_entry = node_kill_entry,
.node_get_child = node_get_child,
.node_set_child = node_set_child,
.entry_compare = entry_compare,
};
enum ec3_status chunk_table_init_read(
struct ec3_tag_reader *ctab,
struct ec3_tag_reader *cdat,
size_t cluster_size,
struct chunk_table *tab)
{
memset(tab, 0x0, sizeof *tab);
tab->tab_mode = CHUNK_TABLE_READ;
tab->tab_read.ctab = ctab;
tab->tab_read.cdat = cdat;
tab->tab_cluster_size = cluster_size;
return EC3_SUCCESS;
}
enum ec3_status chunk_table_init_write(
struct ec3_tag_writer *ctab,
struct ec3_tag_writer *cdat,
size_t cluster_size,
struct chunk_table *tab)
{
memset(tab, 0x0, sizeof *tab);
tab->tab_mode = CHUNK_TABLE_WRITE;
tab->tab_write.ctab = ctab;
tab->tab_write.cdat = cdat;
tab->tab_cluster_size = cluster_size;
return EC3_SUCCESS;
}
void chunk_table_finish(struct chunk_table *tab)
{
}
enum ec3_status chunk_table_get(
struct chunk_table *tab,
ec3_chunk_id id,
void *out_data,
size_t *out_len)
{
return EC3_SUCCESS;
}
enum ec3_status chunk_table_put(
struct chunk_table *tab,
const void *data,
size_t len,
ec3_chunk_id out_id)
{
return EC3_SUCCESS;
}
#endif

62
src/chunk-table.h Normal file
View File

@@ -0,0 +1,62 @@
#ifndef CHUNK_TABLE_H_
#define CHUNK_TABLE_H_
#include "bin.h"
#include <blue/core/btree.h>
#include <blue/core/hash.h>
#include <stdio.h>
struct ec3_tag_reader;
struct ec3_tag_writer;
enum chunk_table_mode {
CHUNK_TABLE_READ,
CHUNK_TABLE_WRITE,
};
struct chunk_table {
enum chunk_table_mode tab_mode;
unsigned int tab_cluster_size;
b_btree tab_cache;
union {
struct {
struct ec3_tag_reader *ctab;
struct ec3_tag_reader *cdat;
} tab_read;
struct {
struct ec3_tag_writer *ctab;
struct ec3_tag_writer *cdat;
} tab_write;
};
};
extern enum ec3_status chunk_table_init_read(
struct ec3_tag_reader *ctab,
struct ec3_tag_reader *cdat,
size_t cluster_size,
struct chunk_table *tab);
extern enum ec3_status chunk_table_init_write(
struct ec3_tag_writer *ctab,
struct ec3_tag_writer *cdat,
size_t cluster_size,
struct chunk_table *tab);
extern void chunk_table_finish(struct chunk_table *tab);
extern enum ec3_status chunk_table_get(
struct chunk_table *tab,
ec3_chunk_id id,
void *out_data,
size_t *out_len);
extern enum ec3_status chunk_table_put(
struct chunk_table *tab,
const void *data,
size_t len,
ec3_chunk_id out_id);
extern size_t chunk_table_bytes_per_chunk(struct chunk_table *tab);
#endif

245
src/cluster-cache.c Normal file
View File

@@ -0,0 +1,245 @@
#include "cluster-cache.h"
#include <blue/io/file.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct cluster_cache_entry {
size_t e_cluster_id;
size_t e_storage_offset;
size_t e_length;
void *e_data;
b_btree_node e_node;
};
B_BTREE_DEFINE_SIMPLE_GET(
struct cluster_cache_entry,
size_t,
e_node,
e_cluster_id,
get_cluster)
B_BTREE_DEFINE_SIMPLE_INSERT(
struct cluster_cache_entry,
e_node,
e_cluster_id,
put_cluster)
enum ec3_status cluster_cache_init(
struct cluster_cache *out,
bool use_disk,
size_t cluster_size)
{
memset(out, 0x0, sizeof *out);
if (use_disk) {
enum b_status status = b_file_open_temp(
B_FILE_READ_WRITE | B_FILE_BINARY,
&out->c_storage);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
}
out->c_memcache_count = 4;
out->c_cluster_size = cluster_size;
return EC3_SUCCESS;
}
void cluster_cache_finish(struct cluster_cache *cache)
{
if (cache->c_storage) {
b_file_release(cache->c_storage);
}
}
static enum ec3_status evict_one_cache_data(
struct cluster_cache *cache,
void **buf)
{
struct cluster_cache_entry *entry = NULL;
b_btree_iterator it;
b_btree_foreach(&it, &cache->c_entries)
{
struct cluster_cache_entry *cur
= b_unbox(struct cluster_cache_entry, it.node, e_node);
if (cur->e_data) {
entry = cur;
break;
}
}
if (!entry) {
return EC3_ERR_BAD_STATE;
}
if (cache->c_storage) {
size_t nr_written;
b_status status = b_file_write(
cache->c_storage,
entry->e_storage_offset,
entry->e_length,
entry->e_data,
&nr_written);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_written != entry->e_length) {
return EC3_ERR_IO_FAILURE;
}
}
*buf = entry->e_data;
entry->e_data = NULL;
return EC3_SUCCESS;
}
enum ec3_status cluster_cache_get(
struct cluster_cache *cache,
size_t cluster_id,
void *out,
size_t *cluster_size)
{
struct cluster_cache_entry *entry
= get_cluster(&cache->c_entries, cluster_id);
if (!entry) {
return EC3_ERR_NO_ENTRY;
}
if (entry->e_data) {
/* this cluster's data is still cached in memory. */
memcpy(out, entry->e_data, cache->c_cluster_size);
*cluster_size = entry->e_length;
return EC3_SUCCESS;
}
if (!cache->c_storage) {
return EC3_ERR_NO_ENTRY;
}
/* this cluster's data is stored on-disk. */
enum ec3_status status = EC3_SUCCESS;
void *buf;
if (cache->c_memcache_count >= cache->c_memcache_max) {
status = evict_one_cache_data(cache, &buf);
} else {
buf = malloc(cache->c_cluster_size);
if (buf) {
cache->c_memcache_count++;
}
status = buf ? EC3_SUCCESS : EC3_ERR_NO_MEMORY;
}
if (status != EC3_SUCCESS) {
return status;
}
size_t nr_read = 0;
enum b_status status2 = b_file_read(
cache->c_storage,
entry->e_storage_offset,
entry->e_length,
buf,
&nr_read);
if (!B_OK(status2)) {
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
}
if (nr_read != entry->e_length) {
return EC3_ERR_IO_FAILURE;
}
memcpy(out, buf, nr_read);
*cluster_size = nr_read;
return EC3_SUCCESS;
}
enum ec3_status cluster_cache_put(
struct cluster_cache *cache,
size_t cluster_id,
const void *data,
size_t len)
{
if (len > cache->c_cluster_size) {
return EC3_ERR_INVALID_VALUE;
}
struct cluster_cache_entry *entry
= get_cluster(&cache->c_entries, cluster_id);
if (!entry) {
entry = malloc(sizeof *entry);
if (!entry) {
return EC3_ERR_NO_MEMORY;
}
memset(entry, 0x0, sizeof *entry);
entry->e_cluster_id = cluster_id;
entry->e_length = len;
if (cache->c_storage) {
b_file_size(cache->c_storage, &entry->e_storage_offset);
}
put_cluster(&cache->c_entries, entry);
}
void *buf = NULL;
enum ec3_status status = EC3_SUCCESS;
if (entry && entry->e_data) {
buf = entry->e_data;
} else if (cache->c_memcache_count >= cache->c_memcache_max) {
status = evict_one_cache_data(cache, &buf);
} else {
buf = malloc(cache->c_cluster_size);
if (buf) {
cache->c_memcache_count++;
}
status = buf ? EC3_SUCCESS : EC3_ERR_NO_MEMORY;
}
if (status != EC3_SUCCESS) {
return status;
}
memcpy(buf, data, len);
entry->e_data = buf;
return EC3_SUCCESS;
}
enum ec3_status cluster_cache_get_highest_cluster_id(
struct cluster_cache *cache,
size_t *highest_cluster)
{
b_btree_node *node = b_btree_last(&cache->c_entries);
if (!node) {
return EC3_ERR_NO_ENTRY;
}
struct cluster_cache_entry *entry
= b_unbox(struct cluster_cache_entry, node, e_node);
*highest_cluster = entry->e_cluster_id;
return EC3_SUCCESS;
}

40
src/cluster-cache.h Normal file
View File

@@ -0,0 +1,40 @@
#ifndef CLUSTER_CACHE_H_
#define CLUSTER_CACHE_H_
#include "status.h"
#include <blue/core/btree.h>
#include <stddef.h>
struct b_file;
struct cluster_cache {
struct b_file *c_storage;
size_t c_cluster_size;
size_t c_memcache_count;
size_t c_memcache_max;
b_btree c_entries;
};
extern enum ec3_status cluster_cache_init(
struct cluster_cache *out,
bool use_disk,
size_t cluster_size);
extern void cluster_cache_finish(struct cluster_cache *cache);
extern enum ec3_status cluster_cache_get(
struct cluster_cache *cache,
size_t cluster_id,
void *out,
size_t *cluster_size);
extern enum ec3_status cluster_cache_put(
struct cluster_cache *cache,
size_t cluster_id,
const void *data,
size_t len);
extern enum ec3_status cluster_cache_get_highest_cluster_id(
struct cluster_cache *cache,
size_t *highest_cluster);
#endif

View File

@@ -1,9 +1,20 @@
#include "cluster.h"
#include "cluster-table.h"
#include "bin.h"
#include <blue/io/file.h>
#include <string.h>
static const size_t cluster_sizes[] = {
[EC3_CLUSTER_4K] = 0x1000,
[EC3_CLUSTER_8K] = 0x2000,
[EC3_CLUSTER_16K] = 0x4000,
[EC3_CLUSTER_32K] = 0x8000,
[EC3_CLUSTER_64K] = 0x10000,
};
static const size_t nr_cluster_sizes
= sizeof cluster_sizes / sizeof cluster_sizes[0];
static int node_init(struct ec3_cluster_group *n)
{
memset(n, 0x0, sizeof *n);
@@ -20,13 +31,19 @@ static int tree_get_node(struct b_tree *p, unsigned long id, b_tree_node *n)
struct cluster_table *table = (struct cluster_table *)p;
size_t offset
= table->t_offset + (id * sizeof(struct ec3_cluster_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r
= fread(n,
sizeof(struct ec3_cluster_group),
1,
table->t_storage);
return r == 1 ? 0 : -1;
size_t r;
b_status status = b_file_read(
table->t_storage,
offset,
sizeof(struct ec3_cluster_group),
n,
&r);
if (!B_OK(status) || r != sizeof(struct ec3_cluster_group)) {
return -1;
}
return 0;
}
static int tree_put_node(
@@ -37,29 +54,42 @@ static int tree_put_node(
struct cluster_table *table = (struct cluster_table *)p;
size_t offset
= table->t_offset + (id * sizeof(struct ec3_cluster_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r = fwrite(
n,
size_t w;
b_status status = b_file_write(
table->t_storage,
offset,
sizeof(struct ec3_cluster_group),
1,
table->t_storage);
return r == 1 ? 0 : -1;
n,
&w);
if (!B_OK(status) || w != sizeof(struct ec3_cluster_group)) {
return -1;
}
return 0;
}
static long tree_alloc_node(struct b_tree *p)
{
struct cluster_table *table = (struct cluster_table *)p;
size_t pos = ftell(table->t_storage);
fseek(table->t_storage, 0, SEEK_END);
size_t len = ftell(table->t_storage);
size_t cursor = 0;
b_file_cursor(table->t_storage, &cursor);
size_t len = 0;
b_file_size(table->t_storage, &len);
struct ec3_cluster_group *n
= (struct ec3_cluster_group *)b_tree_cache_alloc_node(p);
node_init(n);
fwrite(&n, sizeof *n, 1, table->t_storage);
size_t w = 0;
b_file_write(table->t_storage, len, sizeof *n, n, &w);
fseek(table->t_storage, pos, SEEK_SET);
if (w != sizeof *n) {
return -1;
}
b_file_seek(table->t_storage, cursor, B_SEEK_BEGINNING);
len /= sizeof *n;
table->t_nr_groups++;
@@ -158,7 +188,7 @@ static const struct b_tree_ops cluster_table_ops = {
void cluster_table_init(
struct cluster_table *table,
FILE *storage,
b_file *storage,
size_t offset)
{
memset(table, 0x0, sizeof *table);
@@ -172,7 +202,10 @@ void cluster_table_init(
sizeof(struct ec3_cluster_group),
sizeof(struct ec3_cluster),
EC3_CLUSTERS_PER_GROUP + 1);
}
void cluster_table_init_empty_table(struct cluster_table *table)
{
/* allocate root node */
struct ec3_cluster_group *root
= (struct ec3_cluster_group *)b_tree_cache_alloc_node(
@@ -214,7 +247,7 @@ void cluster_decode(const struct ec3_cluster *in, struct cluster *out)
out->c_len = ((size_t)bounds_hi >> 16) & 0xFFFF;
}
int cluster_table_get(
enum ec3_status cluster_table_get(
struct cluster_table *table,
unsigned long id,
struct cluster *out)
@@ -225,33 +258,67 @@ int cluster_table_get(
int err = b_tree_get(&table->t_base, (b_tree_node_entry *)&entry);
if (err != 0) {
return err;
return EC3_ERR_IO_FAILURE;
}
cluster_decode(&entry, out);
return 0;
return EC3_SUCCESS;
}
int cluster_table_put(struct cluster_table *table, const struct cluster *in)
enum ec3_status cluster_table_put(
struct cluster_table *table,
const struct cluster *in)
{
struct ec3_cluster entry = {0};
encode_cluster(in, &entry);
return b_tree_put(&table->t_base, (b_tree_node_entry *)&entry);
int err = b_tree_put(&table->t_base, (b_tree_node_entry *)&entry);
return err == 0 ? EC3_SUCCESS : EC3_ERR_IO_FAILURE;
}
int cluster_table_get_group(
enum ec3_status cluster_table_get_group(
struct cluster_table *table,
unsigned long index,
struct ec3_cluster_group *out)
{
size_t offset
= table->t_offset + (index * sizeof(struct ec3_cluster_group));
fseek(table->t_storage, offset, SEEK_SET);
size_t r
= fread(out,
sizeof(struct ec3_cluster_group),
1,
table->t_storage);
return r == 1 ? 0 : -1;
size_t r;
b_status status = b_file_read(
table->t_storage,
offset,
sizeof(struct ec3_cluster_group),
out,
&r);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
if (r != sizeof(struct ec3_cluster_group)) {
return EC3_ERR_BAD_FORMAT;
}
return EC3_SUCCESS;
}
size_t ec3_cluster_size_id_to_bytes(unsigned int v)
{
if (v >= nr_cluster_sizes) {
return 0;
}
return cluster_sizes[v];
}
unsigned int ec3_cluster_size_bytes_to_id(size_t bytes)
{
for (size_t i = 0; i < nr_cluster_sizes; i++) {
if (cluster_sizes[i] == bytes) {
return i;
}
}
return (unsigned int)-1;
}

View File

@@ -3,7 +3,7 @@
#include "b-tree.h"
#include <stdio.h>
struct b_file;
struct ec3_cluster;
struct ec3_cluster_group;
@@ -11,7 +11,7 @@ struct ec3_cluster_group;
struct cluster_table {
struct b_tree t_base;
size_t t_nr_groups;
FILE *t_storage;
struct b_file *t_storage;
size_t t_offset;
};
@@ -27,21 +27,26 @@ extern void cluster_decode(const struct ec3_cluster *in, struct cluster *out);
extern void cluster_table_init(
struct cluster_table *table,
FILE *storage,
struct b_file *storage,
size_t offset);
extern void cluster_table_finish(struct cluster_table *table);
extern int cluster_table_get(
extern void cluster_table_init_empty_table(struct cluster_table *table);
extern enum ec3_status cluster_table_get(
struct cluster_table *table,
unsigned long id,
struct cluster *out);
extern int cluster_table_put(
extern enum ec3_status cluster_table_put(
struct cluster_table *table,
const struct cluster *in);
extern int cluster_table_get_group(
extern enum ec3_status cluster_table_get_group(
struct cluster_table *table,
unsigned long index,
struct ec3_cluster_group *out);
extern size_t ec3_cluster_size_id_to_bytes(unsigned int v);
extern unsigned int ec3_cluster_size_bytes_to_id(size_t v);
#endif

View File

@@ -1,3 +1,4 @@
#if 0
#include "b-tree.h"
#include "bin.h"
#include "commands.h"
@@ -331,3 +332,4 @@ B_COMMAND(CMD_CREATE, CMD_ROOT)
B_COMMAND_USAGE_OPT(OPT_OUTPATH);
}
}
#endif

935
src/image.c Normal file
View File

@@ -0,0 +1,935 @@
#include "image.h"
#include "bin.h"
#include "pipeline.h"
#include "shadow-image.h"
#include "status.h"
#include <blue/io/directory.h>
#include <blue/io/file.h>
#include <blue/io/path.h>
#include <blue/object/buffer.h>
#include <stdlib.h>
#include <string.h>
B_BTREE_DEFINE_SIMPLE_GET(
struct ec3_tag_ioctx,
uint64_t,
io_node,
io_tag_info->tag_ident,
get_opened_tag)
B_BTREE_DEFINE_SIMPLE_INSERT(
struct ec3_tag_ioctx,
io_node,
io_tag_info->tag_ident,
put_opened_tag)
static enum ec3_status decode_header(
const struct ec3_header *in,
struct ec3_image_info *out)
{
out->img_version = b_i16_btoh(in->h_version);
out->img_cluster_size
= ec3_cluster_size_id_to_bytes(b_i16_btoh(in->h_cluster_size));
out->img_tag_table_offset = b_i64_btoh(in->h_tag_table_offset);
out->img_extent_table_offset = b_i64_btoh(in->h_extent_table_offset);
out->img_cluster_table_offset = b_i64_btoh(in->h_cluster_table_offset);
out->img_cluster_data_offset = sizeof(struct ec3_header);
out->img_nr_tags = b_i32_btoh(in->h_tag_count);
out->img_nr_extents = b_i32_btoh(in->h_extent_count);
out->img_nr_cluster_groups = b_i32_btoh(in->h_cluster_group_count);
out->img_encryption_function = b_i16_btoh(in->h_encryption);
out->img_compression_function = b_i16_btoh(in->h_compression);
out->img_id = b_i64_btoh(in->h_app_magic);
return EC3_SUCCESS;
}
static void decode_tag(
const struct ec3_tag_table_entry *in,
struct ec3_tag_info *out)
{
out->tag_type = b_i32_btoh(in->tag_type);
out->tag_flags = b_i32_btoh(in->tag_flags);
out->tag_checksum = b_i32_btoh(in->tag_checksum);
out->tag_ident = b_i64_btoh(in->tag_ident);
out->tag_total_length = b_i64_btoh(in->tag_length);
}
static void decode_extent(
const struct ec3_extent *in,
struct ec3_extent_info *out)
{
out->ex_owner = b_i64_btoh(in->ex_owner);
out->ex_logical_cluster = b_i32_btoh(in->ex_logical_cluster);
out->ex_physical_cluster = b_i32_btoh(in->ex_physical_cluster);
out->ex_count = b_i32_btoh(in->ex_count);
}
static enum ec3_status read_header(
b_file *image_file,
struct ec3_image_info *out_info)
{
struct ec3_header header;
size_t nr_read;
enum b_status status
= b_file_read(image_file, 0, sizeof header, &header, &nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
return decode_header(&header, out_info);
}
static enum ec3_status read_tag_table(
b_file *image_file,
const struct ec3_image_info *image_info,
b_buffer *out_table)
{
size_t offset = image_info->img_tag_table_offset;
b_buffer_resize(out_table, image_info->img_nr_tags);
struct ec3_tag_table_entry tag;
size_t nr_read;
for (size_t i = 0; i < image_info->img_nr_tags; i++) {
enum b_status status = b_file_read(
image_file,
offset,
sizeof tag,
&tag,
&nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_read != sizeof tag) {
return EC3_ERR_BAD_FORMAT;
}
struct ec3_tag_info *tag_info = b_buffer_get(out_table, i);
decode_tag(&tag, tag_info);
offset += sizeof tag;
}
return EC3_SUCCESS;
}
static enum ec3_status read_extent_table(
b_file *image_file,
const struct ec3_image_info *image_info,
b_buffer *out_table)
{
size_t offset = image_info->img_extent_table_offset;
b_buffer_resize(out_table, image_info->img_nr_extents);
struct ec3_extent extent;
size_t nr_read;
for (size_t i = 0; i < image_info->img_nr_extents; i++) {
enum b_status status = b_file_read(
image_file,
offset,
sizeof extent,
&extent,
&nr_read);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_read != sizeof extent) {
return EC3_ERR_BAD_FORMAT;
}
struct ec3_extent_info *extent_info
= b_buffer_get(out_table, i);
decode_extent(&extent, extent_info);
offset += sizeof extent;
}
return EC3_SUCCESS;
}
static enum ec3_status create_ioctx(struct ec3_image_ioctx **out)
{
struct ec3_image_ioctx *ioctx = malloc(sizeof *ioctx);
if (!ioctx) {
return EC3_ERR_NO_MEMORY;
}
memset(ioctx, 0x0, sizeof *ioctx);
ioctx->io_tag_table = b_buffer_create(sizeof(struct ec3_tag_info));
ioctx->io_extent_table
= b_buffer_create(sizeof(struct ec3_extent_info));
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_ro(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_ONLY | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_header(image_file, &ioctx->io_header);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_tag_table(
image_file,
&ioctx->io_header,
ioctx->io_tag_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_extent_table(
image_file,
&ioctx->io_header,
ioctx->io_extent_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
cluster_table_init(
&ioctx->io_cluster_table,
image_file,
ioctx->io_header.img_cluster_table_offset);
ioctx->io_mode = mode;
ioctx->io_main = image_file;
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_rw(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_WRITE | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_header(image_file, &ioctx->io_header);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_tag_table(
image_file,
&ioctx->io_header,
ioctx->io_tag_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
status2 = read_extent_table(
image_file,
&ioctx->io_header,
ioctx->io_extent_table);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
cluster_table_init(
&ioctx->io_cluster_table,
image_file,
ioctx->io_header.img_cluster_table_offset);
ioctx->io_mode = mode;
ioctx->io_main = image_file;
*out = ioctx;
return EC3_SUCCESS;
}
static enum ec3_status open_image_create(
b_path *image_path,
enum ec3_image_ioctx_mode mode,
const struct ec3_parameters *param,
struct ec3_image_ioctx **out)
{
b_file_mode io_mode = B_FILE_READ_WRITE | B_FILE_CREATE | B_FILE_BINARY;
b_file *image_file;
b_status status = b_file_open(
B_DIRECTORY_ROOT,
image_path,
io_mode,
&image_file);
if (!B_OK(status)) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_image_ioctx *ioctx;
enum ec3_status status2 = create_ioctx(&ioctx);
if (status2 != EC3_SUCCESS) {
b_file_release(image_file);
return status2;
}
ioctx->io_mode = mode;
ioctx->io_main = image_file;
ioctx->io_header.img_id = param->p_ident;
ioctx->io_header.img_cluster_size
= ec3_cluster_size_id_to_bytes(param->p_cluster_size);
ioctx->io_header.img_encryption_function = param->p_encryption_func;
ioctx->io_header.img_compression_function = param->p_compression_func;
ioctx->io_header.img_version = EC3_VERSION_1_0;
*out = ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open(
const char *path,
const struct ec3_parameters *param,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out)
{
if (mode == 0) {
return EC3_ERR_INVALID_VALUE;
}
b_path *image_path = b_path_create_from_cstr(path);
b_file_info image_info;
enum b_status status = b_path_stat(image_path, &image_info);
if ((status == B_ERR_NO_ENTRY) && (mode & EC3_IMAGE_IO_WRITE)) {
return open_image_create(image_path, mode, param, out);
}
if ((status == B_SUCCESS) && (mode & EC3_IMAGE_IO_WRITE)) {
return open_image_rw(image_path, mode, out);
}
if ((status == B_SUCCESS) && (mode & EC3_IMAGE_IO_READ)) {
return open_image_ro(image_path, mode, out);
}
return EC3_ERR_NO_ENTRY;
}
static void destroy_image_ioctx(struct ec3_image_ioctx *image)
{
if (image->io_main) {
b_file_release(image->io_main);
}
if (image->io_tag_table) {
b_buffer_release(image->io_tag_table);
}
if (image->io_extent_table) {
b_buffer_release(image->io_extent_table);
}
b_btree_iterator it;
b_btree_iterator_begin(&image->io_opened_tags, &it);
while (b_btree_iterator_is_valid(&it)) {
struct ec3_tag_ioctx *tag
= b_unbox(struct ec3_tag_ioctx, it.node, io_node);
b_btree_iterator_erase(&it);
if (tag->io_f_data) {
b_file_release(tag->io_f_data);
}
if (tag->io_f_image) {
b_file_release(tag->io_f_image);
}
if (tag->io_f_cluster_table) {
b_file_release(tag->io_f_cluster_table);
}
free(tag);
}
free(image);
}
enum ec3_status ec3_image_ioctx_close(struct ec3_image_ioctx *image)
{
if (!(image->io_mode & EC3_IMAGE_IO_WRITE)) {
destroy_image_ioctx(image);
return EC3_SUCCESS;
}
enum ec3_status status = EC3_SUCCESS;
struct shadow_image shadow;
status = shadow_image_init(image, &shadow);
if (status != EC3_SUCCESS) {
return status;
}
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
/* first set of tags to write are those that haven't been opened,
* or were opened read-only. we can read clusters directly from
* the source image to the dest without re-encoding them */
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
const struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (tag_io) {
/* this tag has been written to. handle it later */
continue;
}
status = shadow_image_write_tag(image, tag, NULL, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
/* next, write the set of tags that have been opened
* non-sequential read-write. the modified clusters for these
* tags is stored unencoded in an on-disk cache. the unmodified clusters
* are read from the original image. each modified cluster needs to be
* read into memory, encoded, and written to the dest image
*/
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (!tag_io || !(tag_io->io_mode & EC3_TAG_IO_WRITE)) {
/* this tag has not been written to, and was handled
* earlier */
continue;
}
if (tag_io->io_mode & EC3_TAG_IO_SEQUENTIAL) {
/* this tag is sequential write-only, and will be
* handled later */
continue;
}
status = shadow_image_write_tag(image, tag, tag_io, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
/* finally, write the set of tags that have been opened
* sequential write-only. the clusters for these tags
* have been written as a single continuous stream of
* encoded clusters to a temporary file on-disk, along
* with a corresponding cluster table. append the
* encoded cluster data directly to the image, and merge
* the tag's cluster table with that of the dest image
*/
for (size_t i = 0; i < nr_tags; i++) {
const struct ec3_tag_info *tag = &tags[i];
struct ec3_tag_ioctx *tag_io = get_opened_tag(
&image->io_opened_tags,
tag->tag_ident);
if (!tag_io || !(tag_io->io_mode & EC3_TAG_IO_WRITE)) {
/* this tag has not been written to, and was handled
* earlier */
continue;
}
if (!(tag_io->io_mode & EC3_TAG_IO_SEQUENTIAL)) {
/* this tag is not sequential write-only, and was
* handled earlier */
continue;
}
status = shadow_image_write_tag(image, tag, tag_io, &shadow);
if (status != EC3_SUCCESS) {
shadow_image_cancel(&shadow);
return status;
}
}
shadow_image_finish(image, &shadow);
destroy_image_ioctx(image);
return EC3_SUCCESS;
}
const struct ec3_image_info *ec3_image_ioctx_get_info(
struct ec3_image_ioctx *image)
{
return &image->io_header;
}
static const struct ec3_tag_info *get_tag_info_by_type(
struct ec3_image_ioctx *image,
uint32_t type,
unsigned int index)
{
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_type != type) {
continue;
}
if (index > 0) {
index--;
continue;
}
return &tags[i];
}
return NULL;
}
static const struct ec3_tag_info *get_tag_info_by_id(
struct ec3_image_ioctx *image,
uint64_t id)
{
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_ident == id) {
return &tags[i];
}
}
return NULL;
}
const struct ec3_tag_info *ec3_image_ioctx_get_tag_info(
struct ec3_image_ioctx *image)
{
return b_buffer_ptr(image->io_tag_table);
}
const struct ec3_tag_info *ec3_image_ioctx_get_tag_info_by_id(
struct ec3_image_ioctx *image,
uint64_t id)
{
return get_tag_info_by_id(image, id);
}
const struct ec3_extent_info *ec3_image_ioctx_get_extent_info(
struct ec3_image_ioctx *image)
{
return b_buffer_ptr(image->io_extent_table);
}
enum ec3_status ec3_image_ioctx_open_string_table(
struct ec3_image_ioctx *image,
enum ec3_image_ioctx_mode mode,
struct string_table **out)
{
return EC3_ERR_NOT_SUPPORTED;
}
enum ec3_status ec3_image_ioctx_open_chunk_table(
struct ec3_image_ioctx *image,
enum ec3_image_ioctx_mode mode,
struct chunk_table **out)
{
return EC3_ERR_NOT_SUPPORTED;
}
static uint64_t allocate_tag_id(struct ec3_image_ioctx *image)
{
uint64_t id = 0;
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(image);
size_t nr_tags = image->io_header.img_nr_tags;
for (size_t i = 0; i < nr_tags; i++) {
if (tags[i].tag_ident < 4096 && tags[i].tag_ident > id) {
id = tags[i].tag_ident + 1;
}
}
return id;
}
static enum ec3_status init_pipeline(
struct ec3_image_ioctx *image,
struct ec3_pipeline **pipeline)
{
struct ec3_pipeline_stage_args stages[2] = {0};
if (image->io_header.img_compression_function != EC3_COMPRESSION_NONE) {
stages[0].type = ec3_get_pipeline_stage_for_compression_func(
image->io_header.img_compression_function);
}
if (image->io_header.img_encryption_function != EC3_ENCRYPTION_NONE) {
stages[1].type = ec3_get_pipeline_stage_for_encryption_func(
image->io_header.img_encryption_function);
}
return ec3_pipeline_create(
stages,
sizeof stages / sizeof stages[0],
image->io_header.img_cluster_size,
pipeline);
}
static enum ec3_status init_tag_ioctx_ro(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
cluster_table_init(
&tag->io_cluster_table,
image->io_main,
image->io_header.img_cluster_table_offset);
return EC3_SUCCESS;
}
static enum ec3_status init_tag_ioctx_rw(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
return EC3_SUCCESS;
}
static enum ec3_status init_tag_ioctx_create(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag_info,
struct ec3_tag_ioctx *tag)
{
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open_tag_by_type(
struct ec3_image_ioctx *image,
uint32_t type,
unsigned int index,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
if ((mode & EC3_TAG_IO_WRITE)
&& !(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (!(mode & (EC3_TAG_IO_READ | EC3_TAG_IO_WRITE))) {
return EC3_ERR_INVALID_VALUE;
}
const struct ec3_tag_info *tag_info
= get_tag_info_by_type(image, type, index);
if (!tag_info) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag_info->tag_ident);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
if (mode & EC3_TAG_IO_WRITE) {
status = init_tag_ioctx_rw(image, tag_info, tag_ioctx);
} else {
status = init_tag_ioctx_ro(image, tag_info, tag_ioctx);
}
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_open_tag_by_id(
struct ec3_image_ioctx *image,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
if ((mode & EC3_TAG_IO_WRITE)
&& !(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (!(mode & (EC3_TAG_IO_READ | EC3_TAG_IO_WRITE))) {
return EC3_ERR_INVALID_VALUE;
}
const struct ec3_tag_info *tag_info = get_tag_info_by_id(image, id);
if (!tag_info) {
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag_info->tag_ident);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
if (mode & EC3_TAG_IO_WRITE) {
status = init_tag_ioctx_rw(image, tag_info, tag_ioctx);
} else {
status = init_tag_ioctx_ro(image, tag_info, tag_ioctx);
}
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_create_tag(
struct ec3_image_ioctx *image,
uint32_t type,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out)
{
mode |= EC3_TAG_IO_WRITE;
if (!(image->io_mode & EC3_IMAGE_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (id == 0) {
id = allocate_tag_id(image);
}
struct ec3_tag_info *tag_info
= (struct ec3_tag_info *)get_tag_info_by_id(image, id);
if (tag_info) {
return EC3_ERR_NAME_EXISTS;
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, id);
if (tag_ioctx) {
return EC3_ERR_BAD_STATE;
}
enum ec3_status status = ec3_tag_ioctx_create(
&tag_ioctx,
image->io_header.img_cluster_size,
mode);
if (status != EC3_SUCCESS) {
return status;
}
status = init_pipeline(image, &tag_ioctx->io_pipeline);
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
b_buffer_push_back(image->io_tag_table, 1, (void **)&tag_info);
tag_info->tag_ident = id;
tag_info->tag_type = type;
tag_ioctx->io_mode = mode;
tag_ioctx->io_parent = image;
tag_ioctx->io_tag_info = tag_info;
tag_ioctx->io_f_image = b_file_retain(image->io_main);
status = init_tag_ioctx_create(image, tag_info, tag_ioctx);
if (status != EC3_SUCCESS) {
b_buffer_pop_back(image->io_tag_table, 1);
ec3_tag_ioctx_close(tag_ioctx);
return status;
}
image->io_header.img_nr_tags++;
put_opened_tag(&image->io_opened_tags, tag_ioctx);
*out = tag_ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_image_ioctx_cluster_logical_to_physical(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t logical_cluster,
size_t *out_physical_cluster)
{
const struct ec3_extent_info *extents
= b_buffer_ptr(image->io_extent_table);
size_t nr_extents = image->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->tag_ident) {
continue;
}
if (logical_cluster < x->ex_logical_cluster
|| logical_cluster >= x->ex_logical_cluster + x->ex_count) {
continue;
}
*out_physical_cluster
= x->ex_physical_cluster
+ (logical_cluster - x->ex_logical_cluster);
return EC3_SUCCESS;
}
return EC3_ERR_OUT_OF_BOUNDS;
}
enum ec3_status ec3_image_ioctx_get_tag_nr_clusters(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t *out_nr_clusters)
{
size_t nr_clusters = 0;
const struct ec3_extent_info *extents
= ec3_image_ioctx_get_extent_info(image);
size_t nr_extents = image->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->tag_ident) {
continue;
}
if (x->ex_logical_cluster + x->ex_count > nr_clusters) {
nr_clusters = x->ex_logical_cluster + x->ex_count;
}
}
struct ec3_tag_ioctx *tag_ioctx
= get_opened_tag(&image->io_opened_tags, tag->tag_ident);
if (!tag_ioctx) {
*out_nr_clusters = nr_clusters;
return EC3_SUCCESS;
}
size_t highest_cached_cluster;
enum ec3_status status = cluster_cache_get_highest_cluster_id(
&tag_ioctx->io_cache,
&highest_cached_cluster);
if (status == EC3_SUCCESS && highest_cached_cluster >= nr_clusters) {
nr_clusters = highest_cached_cluster + 1;
}
*out_nr_clusters = nr_clusters;
return EC3_SUCCESS;
}

139
src/image.h Normal file
View File

@@ -0,0 +1,139 @@
#ifndef IMAGE_H_
#define IMAGE_H_
#include "status.h"
#include "tag.h"
#include <stddef.h>
#include <stdint.h>
struct b_file;
struct b_buffer;
struct cluster;
struct string_table;
struct chunk_table;
enum ec3_tag_ioctx_mode;
struct ec3_parameters {
unsigned int p_cluster_size;
unsigned int p_compression_func;
unsigned int p_encryption_func;
uint64_t p_ident;
const void *p_encryption_key;
size_t p_encryption_key_size;
};
struct ec3_image_info {
unsigned short img_version;
unsigned short img_cluster_size;
size_t img_tag_table_offset;
size_t img_extent_table_offset;
size_t img_cluster_table_offset;
size_t img_cluster_data_offset;
unsigned int img_nr_tags;
unsigned int img_nr_extents;
unsigned int img_nr_cluster_groups;
unsigned int img_encryption_function;
unsigned int img_compression_function;
uint64_t img_id;
};
struct ec3_tag_info {
unsigned long tag_type;
unsigned long tag_flags;
unsigned long tag_checksum;
unsigned long long tag_ident;
unsigned long long tag_total_length;
};
struct ec3_extent_info {
unsigned long long ex_owner;
unsigned long ex_logical_cluster;
unsigned long ex_physical_cluster;
unsigned long ex_count;
};
enum ec3_image_ioctx_mode {
EC3_IMAGE_IO_READ = 0x01u,
EC3_IMAGE_IO_WRITE = 0x02u,
EC3_IMAGE_IO_READWRITE = EC3_IMAGE_IO_READ | EC3_IMAGE_IO_WRITE,
};
struct ec3_image_ioctx {
enum ec3_image_ioctx_mode io_mode;
struct ec3_image_info io_header;
/* buffer of struct ec3_tag_info */
struct b_buffer *io_tag_table;
/* buffer of struct ec3_tag_info */
struct b_buffer *io_extent_table;
/* btree list of struct ec3_tag_ioctx */
struct b_btree io_opened_tags;
/* when reading existing images, this will reference the on-disk cluster
* table */
struct cluster_table io_cluster_table;
struct b_file *io_main;
};
extern enum ec3_status ec3_image_ioctx_open(
const char *path,
const struct ec3_parameters *param,
enum ec3_image_ioctx_mode mode,
struct ec3_image_ioctx **out);
extern enum ec3_status ec3_image_ioctx_close(struct ec3_image_ioctx *image);
extern const struct ec3_image_info *ec3_image_ioctx_get_info(
struct ec3_image_ioctx *image);
extern const struct ec3_tag_info *ec3_image_ioctx_get_tag_info(
struct ec3_image_ioctx *image);
extern const struct ec3_tag_info *ec3_image_ioctx_get_tag_info_by_id(
struct ec3_image_ioctx *image,
uint64_t id);
extern const struct ec3_extent_info *ec3_image_ioctx_get_extent_info(
struct ec3_image_ioctx *image);
extern enum ec3_status ec3_image_ioctx_open_string_table(
struct ec3_image_ioctx *image,
enum ec3_image_ioctx_mode mode,
struct string_table **out);
extern enum ec3_status ec3_image_ioctx_open_chunk_table(
struct ec3_image_ioctx *image,
enum ec3_image_ioctx_mode mode,
struct chunk_table **out);
extern enum ec3_status ec3_image_ioctx_open_tag_by_type(
struct ec3_image_ioctx *image,
uint32_t type,
unsigned int index,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out);
extern enum ec3_status ec3_image_ioctx_open_tag_by_id(
struct ec3_image_ioctx *image,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out);
extern enum ec3_status ec3_image_ioctx_create_tag(
struct ec3_image_ioctx *image,
uint32_t type,
uint64_t id,
enum ec3_tag_ioctx_mode mode,
struct ec3_tag_ioctx **out);
extern enum ec3_status ec3_image_ioctx_cluster_logical_to_physical(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t logical_cluster,
size_t *out_physical_cluster);
extern enum ec3_status ec3_image_ioctx_get_tag_nr_clusters(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t *out_nr_cluster);
#endif

View File

@@ -12,8 +12,6 @@ static enum ec3_status identifier_from_int_string(
uint64_t *out,
int base)
{
/* skip leading '0x' */
s += 2;
char *ep = NULL;
uint64_t v = strtoull(s, &ep, base);
@@ -39,7 +37,7 @@ static bool is_base10_string(const char *s)
enum ec3_status ec3_identifier_from_string(const char *s, uint64_t *out)
{
if (s[0] == '0' && s[1] == 'x') {
return identifier_from_int_string(s, out, 16);
return identifier_from_int_string(s + 2, out, 16);
}
if (is_base10_string(s)) {

View File

@@ -6,6 +6,9 @@
#include <stddef.h>
#include <stdint.h>
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#ifdef __GNUC__
#define PACK(__Declaration__) __Declaration__ __attribute__((__packed__))
#endif

View File

@@ -1,6 +1,7 @@
#include "pipeline.h"
#include "bin.h"
#include "misc.h"
#include <stdlib.h>
#include <string.h>
@@ -14,14 +15,6 @@ static const struct ec3_pipeline_stage_type *stage_types[] = {
};
static const size_t nr_stage_types = sizeof stage_types / sizeof stage_types[0];
static const size_t cluster_sizes[] = {
[EC3_CLUSTER_4K] = 0x1000,
[EC3_CLUSTER_8K] = 0x2000,
[EC3_CLUSTER_16K] = 0x4000,
[EC3_CLUSTER_32K] = 0x8000,
[EC3_CLUSTER_64K] = 0x10000,
};
static enum ec3_status create_pipeline_stage(
const struct ec3_pipeline_stage_type *type,
size_t cluster_size,
@@ -57,10 +50,6 @@ extern enum ec3_status ec3_pipeline_create(
struct ec3_pipeline_stage_args stages[],
size_t nr_stages,
size_t cluster_size,
FILE *target,
size_t target_base_offset,
FILE *cluster_table,
size_t cluster_table_base_offset,
struct ec3_pipeline **out)
{
enum ec3_status status = EC3_SUCCESS;
@@ -103,13 +92,6 @@ extern enum ec3_status ec3_pipeline_create(
b_queue_push_back(&pipeline->p_stages, &stage->s_entry);
}
cluster_table_init(
&pipeline->p_cluster_table,
cluster_table,
cluster_table_base_offset);
pipeline->p_target = target;
pipeline->p_target_base_offset = target_base_offset;
pipeline->p_cluster_size = cluster_size;
*out = pipeline;
@@ -120,17 +102,19 @@ void ec3_pipeline_destroy(struct ec3_pipeline *p)
{
}
enum ec3_status ec3_pipeline_write_cluster(
enum ec3_status ec3_pipeline_encode_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t len,
size_t *bytes_written)
void *in,
size_t in_len,
void *out,
size_t out_max,
size_t *out_len)
{
b_queue_entry *cur = b_queue_first(&pipeline->p_stages);
enum ec3_status status = EC3_SUCCESS;
void *src = p;
void *src = in;
size_t stage_in_size = len;
size_t stage_in_size = in_len;
size_t stage_out_size = 0;
size_t dest_max = pipeline->p_cluster_size;
@@ -163,76 +147,26 @@ enum ec3_status ec3_pipeline_write_cluster(
cur = b_queue_next(cur);
}
stage_out_size = fwrite(src, 1, stage_in_size, pipeline->p_target);
if (bytes_written) {
*bytes_written = stage_out_size;
}
struct cluster cluster = {
.c_id = pipeline->p_next_cluster_id++,
.c_base = pipeline->p_data_offset,
.c_len = stage_out_size,
.c_flags = pipeline->p_cluster_flags,
.c_checksum = 0,
};
pipeline->p_data_offset += stage_out_size;
cluster_table_put(&pipeline->p_cluster_table, &cluster);
memmove(out, src, MIN(stage_in_size, out_max));
*out_len = MIN(stage_in_size, out_max);
return EC3_SUCCESS;
}
enum ec3_status ec3_pipeline_seek(struct ec3_pipeline *pipeline, size_t pos)
{
fseek(pipeline->p_target,
pipeline->p_target_base_offset + pos,
SEEK_SET);
return EC3_SUCCESS;
}
enum ec3_status ec3_pipeline_read_cluster(
enum ec3_status ec3_pipeline_decode_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t cluster_id,
size_t *nr_read)
void *in,
size_t in_len,
void *out,
size_t out_max,
size_t *out_len)
{
if (!pipeline->p_read_buf) {
pipeline->p_read_buf = malloc(pipeline->p_cluster_size);
if (!pipeline->p_read_buf) {
return EC3_ERR_NO_MEMORY;
}
}
struct cluster cluster = {0};
enum ec3_status status = cluster_table_get(
&pipeline->p_cluster_table,
cluster_id,
&cluster);
if (status != EC3_SUCCESS) {
return status;
}
b_queue_entry *cur = b_queue_first(&pipeline->p_stages);
void *src = pipeline->p_read_buf;
fseek(pipeline->p_target,
pipeline->p_target_base_offset + cluster.c_base,
SEEK_SET);
size_t stage_in_size
= fread(pipeline->p_read_buf,
1,
cluster.c_len,
pipeline->p_target);
void *src = in;
size_t stage_in_size = in_len;
size_t stage_out_size = 0;
if (stage_in_size != cluster.c_len) {
return EC3_ERR_END_OF_FILE;
}
enum ec3_status status = EC3_SUCCESS;
while (cur) {
struct ec3_pipeline_stage *stage
@@ -262,15 +196,13 @@ enum ec3_status ec3_pipeline_read_cluster(
cur = b_queue_next(cur);
}
memcpy(p, src, stage_in_size);
if (nr_read) {
*nr_read = stage_in_size;
}
memmove(out, src, MIN(stage_in_size, out_max));
*out_len = MIN(stage_out_size, out_max);
return EC3_SUCCESS;
}
#if 0
enum ec3_status ec3_pipeline_copy_all(
struct ec3_pipeline *dest,
struct cluster_table *clusters,
@@ -346,11 +278,7 @@ enum ec3_status ec3_pipeline_copy_all(
free(group);
return EC3_SUCCESS;
}
size_t ec3_get_cluster_size(unsigned int v)
{
return cluster_sizes[v];
}
#endif
enum ec3_pipeline_stage_type_id ec3_get_pipeline_stage_for_compression_func(
unsigned int func)

View File

@@ -1,7 +1,6 @@
#ifndef PIPELINE_H_
#define PIPELINE_H_
#include "cluster.h"
#include "status.h"
#include <blue/core/queue.h>
@@ -54,17 +53,7 @@ struct ec3_pipeline_stage_args {
};
struct ec3_pipeline {
FILE *p_target;
size_t p_target_base_offset;
size_t p_cluster_size;
unsigned int p_cluster_flags;
struct cluster_table p_cluster_table;
size_t p_next_cluster_id;
size_t p_data_offset;
void *p_read_buf;
b_queue p_stages;
};
@@ -72,30 +61,31 @@ extern enum ec3_status ec3_pipeline_create(
struct ec3_pipeline_stage_args stages[],
size_t nr_stages,
size_t cluster_size,
FILE *target,
size_t target_base_offset,
FILE *cluster_table,
size_t cluster_table_base_offset,
struct ec3_pipeline **out);
extern void ec3_pipeline_destroy(struct ec3_pipeline *p);
extern enum ec3_status ec3_pipeline_write_cluster(
extern enum ec3_status ec3_pipeline_encode_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t len,
size_t *bytes_written);
extern enum ec3_status ec3_pipeline_read_cluster(
void *in,
size_t in_len,
void *out,
size_t out_max,
size_t *out_len);
extern enum ec3_status ec3_pipeline_decode_cluster(
struct ec3_pipeline *pipeline,
void *p,
size_t cluster_id,
size_t *nr_read);
void *in,
size_t in_len,
void *out,
size_t out_max,
size_t *out_len);
#if 0
extern enum ec3_status ec3_pipeline_copy_all(
struct ec3_pipeline *dest,
struct cluster_table *clusters,
FILE *data);
#endif
extern size_t ec3_get_cluster_size(unsigned int v);
extern enum ec3_pipeline_stage_type_id
ec3_get_pipeline_stage_for_encryption_func(unsigned int func);
extern enum ec3_pipeline_stage_type_id

View File

@@ -1,7 +1,7 @@
#include "bin.h"
#include "commands.h"
#include "image.h"
#include "misc.h"
#include "read.h"
#include "status.h"
#include <blue/cmd.h>
@@ -62,14 +62,13 @@ static int query(
&in_path);
FILE *fp = fopen(in_path, "rb");
if (!fp) {
b_err("cannot open container '%s'", in_path);
b_i("reason: %s", strerror(errno));
return -1;
}
struct ec3_reader *reader = NULL;
enum ec3_status status = ec3_reader_create(fp, &reader);
struct ec3_image_ioctx *reader = NULL;
enum ec3_status status = ec3_image_ioctx_open(
in_path,
NULL,
EC3_IMAGE_IO_READ,
&reader);
if (status != EC3_SUCCESS) {
fclose(fp);
@@ -77,43 +76,43 @@ static int query(
return -1;
}
const struct ec3_container_info *c_info
= ec3_reader_get_container_info(reader);
const struct ec3_image_info *img_info
= ec3_image_ioctx_get_info(reader);
char container_id[32];
ec3_identifier_to_string(
c_info->c_id,
img_info->img_id,
container_id,
sizeof container_id);
printf("container details:\n");
printf(" %-20s: %04x\n", "format version", c_info->c_version);
printf(" %-20s: %04x\n", "format version", img_info->img_version);
printf(" %-20s: %s\n", "identifier", container_id);
printf(" %-20s: %u\n", "cluster size", c_info->c_cluster_size);
printf(" %-20s: %u\n", "cluster size", img_info->img_cluster_size);
printf(" %-20s: %s\n",
"compression",
compression_function_name(c_info->c_compression_function));
compression_function_name(img_info->img_compression_function));
printf(" %-20s: %s\n",
"encryption",
encryption_function_name(c_info->c_encryption_function));
encryption_function_name(img_info->img_encryption_function));
printf(" %-20s: 0x%zx\n",
"tag table offset",
c_info->c_tag_table_offset);
img_info->img_tag_table_offset);
printf(" %-20s: 0x%zx\n",
"extent table offset",
c_info->c_extent_table_offset);
img_info->img_extent_table_offset);
printf(" %-20s: 0x%zx\n",
"cluster table offset",
c_info->c_cluster_table_offset);
printf(" %-20s: %u\n", "tag count", c_info->c_nr_tags);
printf(" %-20s: %u\n", "extent count", c_info->c_nr_extents);
img_info->img_cluster_table_offset);
printf(" %-20s: %u\n", "tag count", img_info->img_nr_tags);
printf(" %-20s: %u\n", "extent count", img_info->img_nr_extents);
printf(" %-20s: %u\n",
"cluster group count",
c_info->c_nr_cluster_groups);
img_info->img_nr_cluster_groups);
printf("\ntags:\n");
const struct ec3_tag_info *tags = ec3_reader_get_tags(reader);
for (unsigned int i = 0; i < c_info->c_nr_tags; i++) {
const struct ec3_tag_info *tags = ec3_image_ioctx_get_tag_info(reader);
for (unsigned int i = 0; i < img_info->img_nr_tags; i++) {
char tag_type[5];
tag_type_string(tags[i].tag_type, tag_type);
@@ -132,8 +131,9 @@ static int query(
}
printf("\nextents:\n");
const struct ec3_extent_info *extents = ec3_reader_get_extents(reader);
for (unsigned int i = 0; i < c_info->c_nr_extents; i++) {
const struct ec3_extent_info *extents
= ec3_image_ioctx_get_extent_info(reader);
for (unsigned int i = 0; i < img_info->img_nr_extents; i++) {
char tag_id[32];
ec3_identifier_to_string(
extents[i].ex_owner,
@@ -153,7 +153,7 @@ static int query(
physical_limit);
}
ec3_reader_finish(reader);
ec3_image_ioctx_close(reader);
fclose(fp);
return 0;

View File

@@ -1,3 +1,4 @@
#if 0
#include "read.h"
#include "bin.h"
@@ -328,8 +329,8 @@ static enum ec3_status read_cluster(
return ec3_pipeline_read_cluster(
tag->tag_parent->r_pipeline,
tag->tag_buf,
physical_cluster,
tag->tag_buf,
nr_read);
}
@@ -435,3 +436,4 @@ enum ec3_status ec3_tag_reader_close(struct ec3_tag_reader *tag)
{
return EC3_ERR_NOT_SUPPORTED;
}
#endif

View File

@@ -61,8 +61,8 @@ extern enum ec3_status ec3_reader_open_tag(
extern enum ec3_status ec3_tag_reader_read(
struct ec3_tag_reader *tag,
size_t offset,
size_t count,
size_t first_cluster,
size_t nr_clusters,
void *buf,
size_t *nr_read);
extern enum ec3_status ec3_tag_reader_close(struct ec3_tag_reader *tag);

754
src/shadow-image.c Normal file
View File

@@ -0,0 +1,754 @@
#include "shadow-image.h"
#include "bin.h"
#include "image.h"
#include "pipeline.h"
#include "tag.h"
#include <blue/io/file.h>
#include <blue/object/buffer.h>
#include <limits.h>
#include <stdlib.h>
static void encode_header(
const struct ec3_image_info *in,
struct ec3_header *out)
{
memset(out, 0x0, sizeof *out);
out->h_magic = b_i32_htob(EC3_SIGNATURE);
out->h_version = b_i16_htob(in->img_version);
out->h_cluster_size = b_i16_htob(
ec3_cluster_size_bytes_to_id(in->img_cluster_size));
out->h_tag_table_offset = b_i64_htob(in->img_tag_table_offset);
out->h_extent_table_offset = b_i64_htob(in->img_extent_table_offset);
out->h_cluster_table_offset = b_i64_htob(in->img_cluster_table_offset);
out->h_tag_count = b_i32_htob(in->img_nr_tags);
out->h_extent_count = b_i32_htob(in->img_nr_extents);
out->h_cluster_group_count = b_i32_htob(in->img_nr_cluster_groups);
out->h_encryption = b_i16_htob(in->img_encryption_function);
out->h_compression = b_i16_htob(in->img_compression_function);
out->h_app_magic = b_i64_htob(in->img_id);
}
static void encode_tag(
const struct ec3_tag_info *in,
struct ec3_tag_table_entry *out)
{
memset(out, 0x0, sizeof *out);
out->tag_type = b_i32_htob(in->tag_type);
out->tag_flags = b_i32_htob(in->tag_flags);
out->tag_checksum = b_i32_htob(in->tag_checksum);
out->tag_ident = b_i64_htob(in->tag_ident);
out->tag_length = b_i64_htob(in->tag_total_length);
}
static void encode_extent(
const struct ec3_extent_info *in,
struct ec3_extent *out)
{
memset(out, 0x0, sizeof *out);
out->ex_owner = b_i64_htob(in->ex_owner);
out->ex_logical_cluster = b_i32_htob(in->ex_logical_cluster);
out->ex_physical_cluster = b_i32_htob(in->ex_physical_cluster);
out->ex_count = b_i32_htob(in->ex_count);
}
enum ec3_status shadow_image_init(
struct ec3_image_ioctx *image,
struct shadow_image *out)
{
memset(out, 0x0, sizeof *out);
b_status status = b_file_open_shadow(
image->io_main,
B_FILE_WRITE_ONLY | B_FILE_BINARY,
&out->img_f_data);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
size_t nr_written;
struct ec3_header header = {0};
status = b_file_write(
out->img_f_data,
0,
sizeof header,
&header,
&nr_written);
if (!B_OK(status)) {
shadow_image_cancel(out);
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
if (nr_written != sizeof header) {
shadow_image_cancel(out);
return EC3_ERR_IO_FAILURE;
}
status = b_file_open_temp(
B_FILE_READ_WRITE | B_FILE_BINARY,
&out->img_f_cluster_table);
if (!B_OK(status)) {
shadow_image_cancel(out);
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
cluster_table_init(
&out->img_cluster_table,
out->img_f_cluster_table,
0);
cluster_table_init_empty_table(&out->img_cluster_table);
out->img_nr_bytes = sizeof header;
out->img_extent_table = b_buffer_create(sizeof(struct ec3_extent_info));
out->img_tag_table = b_buffer_create(sizeof(struct ec3_tag_info));
return EC3_SUCCESS;
}
static enum ec3_status align_data_file(
struct shadow_image *image,
size_t boundary)
{
b_status status = B_SUCCESS;
size_t nr_written = 0;
while ((image->img_nr_bytes % boundary) != 0) {
char c = 0;
status = b_file_write(
image->img_f_data,
B_OFFSET_CURRENT,
1,
&c,
&nr_written);
if (!B_OK(status)) {
break;
}
if (nr_written != sizeof c) {
status = B_ERR_IO_FAILURE;
break;
}
image->img_nr_bytes++;
}
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
enum ec3_status shadow_image_finish(
struct ec3_image_ioctx *image,
struct shadow_image *shadow)
{
b_status status = B_SUCCESS;
size_t nr_read = 0;
size_t nr_written = 0;
align_data_file(shadow, 16);
size_t cluster_table_offset = shadow->img_nr_bytes;
struct ec3_cluster_group *buf = malloc(sizeof *buf);
if (!buf) {
return EC3_ERR_NO_MEMORY;
}
b_file_seek(shadow->img_f_data, 0, B_SEEK_END);
b_file_seek(shadow->img_f_cluster_table, 0, B_SEEK_BEGINNING);
for (size_t i = 0;; i += sizeof *buf) {
status = b_file_read(
shadow->img_f_cluster_table,
B_OFFSET_CURRENT,
sizeof *buf,
buf,
&nr_read);
if (!B_OK(status)) {
free(buf);
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_read < sizeof *buf) {
status = B_SUCCESS;
break;
}
status = b_file_write(
shadow->img_f_data,
B_OFFSET_CURRENT,
sizeof *buf,
buf,
&nr_written);
if (!B_OK(status)) {
break;
}
if (nr_written != sizeof *buf) {
status = B_ERR_IO_FAILURE;
break;
}
shadow->img_nr_bytes += sizeof *buf;
}
free(buf);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
align_data_file(shadow, 16);
size_t extent_table_offset = shadow->img_nr_bytes;
const struct ec3_extent_info *extents
= b_buffer_ptr(shadow->img_extent_table);
size_t nr_extents = b_buffer_get_size(shadow->img_extent_table);
for (size_t i = 0; i < nr_extents; i++) {
struct ec3_extent extent;
encode_extent(&extents[i], &extent);
status = b_file_write(
shadow->img_f_data,
B_OFFSET_CURRENT,
sizeof extent,
&extent,
&nr_written);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_written != sizeof extent) {
return EC3_ERR_IO_FAILURE;
}
shadow->img_nr_bytes += sizeof extent;
}
align_data_file(shadow, 16);
size_t tag_table_offset = shadow->img_nr_bytes;
const struct ec3_tag_info *tags = b_buffer_ptr(shadow->img_tag_table);
size_t nr_tags = b_buffer_get_size(shadow->img_tag_table);
for (size_t i = 0; i < nr_tags; i++) {
struct ec3_tag_table_entry tag;
encode_tag(&tags[i], &tag);
status = b_file_write(
shadow->img_f_data,
B_OFFSET_CURRENT,
sizeof tag,
&tag,
&nr_written);
if (!B_OK(status)) {
return ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
}
if (nr_written != sizeof tag) {
return EC3_ERR_IO_FAILURE;
}
shadow->img_nr_bytes += sizeof tag;
}
struct ec3_image_info image_info = {0};
memcpy(&image_info, &image->io_header, sizeof image_info);
image_info.img_nr_tags = nr_tags;
image_info.img_nr_extents = nr_extents;
image_info.img_tag_table_offset = tag_table_offset;
image_info.img_extent_table_offset = extent_table_offset;
image_info.img_cluster_table_offset = cluster_table_offset;
image_info.img_cluster_data_offset = sizeof(struct ec3_header);
image_info.img_nr_cluster_groups
= shadow->img_cluster_table.t_nr_groups;
struct ec3_header header;
encode_header(&image_info, &header);
status = b_file_write(
shadow->img_f_data,
0,
sizeof header,
&header,
&nr_written);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
if (nr_written < sizeof header) {
return EC3_ERR_IO_FAILURE;
}
b_file_swap_shadow(image->io_main, shadow->img_f_data);
cluster_table_finish(&shadow->img_cluster_table);
b_file_release(shadow->img_f_data);
b_file_release(shadow->img_f_cluster_table);
return EC3_SUCCESS;
}
void shadow_image_cancel(struct shadow_image *shadow)
{
cluster_table_finish(&shadow->img_cluster_table);
if (shadow->img_f_data) {
b_file_release(shadow->img_f_data);
}
if (shadow->img_f_cluster_table) {
b_file_release(shadow->img_f_cluster_table);
}
}
static const struct ec3_extent_info *get_extent_for_logical_cluster(
struct ec3_image_ioctx *image,
const struct ec3_tag_info *tag,
size_t logical_cluster)
{
const struct ec3_extent_info *extents
= ec3_image_ioctx_get_extent_info(image);
size_t nr_extents = image->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->tag_ident) {
continue;
}
if (logical_cluster < x->ex_logical_cluster
|| logical_cluster
>= (x->ex_logical_cluster + x->ex_count)) {
continue;
}
return x;
}
return NULL;
}
static enum ec3_status put_empty_cluster(
struct shadow_image *image,
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf)
{
size_t cluster_size = tag->io_parent->io_header.img_cluster_size;
size_t nr_read = cluster_size;
memset(buf, 0x0, nr_read);
size_t encoded_size = 0;
enum ec3_status status = ec3_pipeline_encode_cluster(
tag->io_pipeline,
buf,
nr_read,
buf,
cluster_size,
&encoded_size);
if (status != EC3_SUCCESS) {
return status;
}
size_t nr_written;
enum b_status status2 = b_file_write(
image->img_f_data,
B_OFFSET_CURRENT,
encoded_size,
buf,
&nr_written);
if (!B_OK(status2)) {
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
}
if (nr_written != encoded_size) {
return EC3_ERR_IO_FAILURE;
}
struct cluster cluster = {0};
cluster.c_id = image->img_nr_clusters;
cluster.c_base = image->img_nr_bytes;
cluster.c_len = encoded_size;
image->img_nr_clusters++;
image->img_nr_bytes += nr_read;
cluster_table_put(&image->img_cluster_table, &cluster);
return EC3_SUCCESS;
}
/* retrieve a cached cluster from a tag io context, encode it, and write it
* to the shadow image */
static enum ec3_status copy_cached_cluster(
struct shadow_image *image,
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf)
{
size_t cluster_size = tag->io_parent->io_header.img_cluster_size;
size_t nr_read;
enum ec3_status status
= cluster_cache_get(&tag->io_cache, cluster_id, buf, &nr_read);
if (status != EC3_SUCCESS) {
return status;
}
size_t encoded_size = 0;
status = ec3_pipeline_encode_cluster(
tag->io_pipeline,
buf,
nr_read,
buf,
cluster_size,
&encoded_size);
if (status != EC3_SUCCESS) {
return status;
}
size_t nr_written;
enum b_status status2 = b_file_write(
image->img_f_data,
B_OFFSET_CURRENT,
encoded_size,
buf,
&nr_written);
if (!B_OK(status2)) {
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
}
if (nr_written != encoded_size) {
return EC3_ERR_IO_FAILURE;
}
struct cluster cluster = {0};
cluster.c_id = image->img_nr_clusters;
cluster.c_base = image->img_nr_bytes;
cluster.c_len = encoded_size;
image->img_nr_clusters++;
image->img_nr_bytes += nr_read;
cluster_table_put(&image->img_cluster_table, &cluster);
return EC3_SUCCESS;
}
/* read a range of clusters from the source image, delimited by an extent.
* write the clusters to the shadow image. the tag's cache is also checked for
* each cluster in the extent. of a cached version of the cluster's contents
* exists, that is used instead */
static enum ec3_status copy_cluster_range(
struct ec3_image_ioctx *src,
struct shadow_image *dest,
struct ec3_tag_ioctx *tag_ioctx,
const struct ec3_tag_info *tag_info,
const struct ec3_extent_info *extent,
size_t first_logical_cluster,
void *buf)
{
enum ec3_status status = EC3_SUCCESS;
enum b_status status2 = B_SUCCESS;
size_t cluster_size = src->io_header.img_cluster_size;
for (size_t logical_cluster = first_logical_cluster;
logical_cluster < extent->ex_logical_cluster + extent->ex_count;
logical_cluster++) {
size_t len = 0;
status = copy_cached_cluster(
dest,
tag_ioctx,
logical_cluster,
buf);
if (status == EC3_SUCCESS) {
logical_cluster++;
continue;
}
size_t physical_cluster = logical_cluster
- extent->ex_logical_cluster
+ extent->ex_physical_cluster;
struct cluster cluster;
status = cluster_table_get(
&src->io_cluster_table,
physical_cluster,
&cluster);
if (status != EC3_SUCCESS) {
return status;
}
if (cluster.c_len > cluster_size) {
return EC3_ERR_BAD_FORMAT;
}
size_t data_offset = src->io_header.img_cluster_data_offset
+ cluster.c_base;
size_t nr_read = 0;
status2 = b_file_read(
src->io_main,
data_offset,
cluster.c_len,
buf,
&nr_read);
if (!B_OK(status2)) {
return ec3_status_from_b_status(
status2,
EC3_ERR_IO_FAILURE);
}
if (nr_read != cluster.c_len) {
return EC3_ERR_BAD_FORMAT;
}
cluster.c_id = dest->img_nr_clusters;
cluster.c_base = dest->img_nr_bytes;
size_t nr_written = 0;
status2 = b_file_write(
dest->img_f_data,
B_OFFSET_CURRENT,
cluster.c_len,
buf,
&nr_written);
if (!B_OK(status2)) {
return ec3_status_from_b_status(
status2,
EC3_ERR_IO_FAILURE);
}
if (nr_written != cluster.c_len) {
return EC3_ERR_IO_FAILURE;
}
cluster_table_put(&dest->img_cluster_table, &cluster);
}
return EC3_SUCCESS;
}
static enum ec3_status write_wseq_tag(
struct ec3_image_ioctx *src,
const struct ec3_tag_info *tag,
struct ec3_tag_ioctx *ioctx,
struct shadow_image *dest)
{
size_t cluster_id_offset = dest->img_nr_clusters;
size_t cluster_id_limit = cluster_id_offset;
size_t data_offset = dest->img_nr_bytes;
size_t first_logical_cluster = 0;
struct cluster_table *clusters = &ioctx->io_cluster_table;
struct ec3_cluster_group *group = malloc(sizeof *group);
if (!group) {
return EC3_ERR_NO_MEMORY;
}
for (size_t i = 0; i < clusters->t_nr_groups; i++) {
int err = cluster_table_get_group(clusters, i, group);
if (err != 0) {
free(group);
return EC3_ERR_IO_FAILURE;
}
unsigned int nr_entries = b_i16_btoh(group->g_nr_clusters);
for (unsigned int ii = 0; ii < nr_entries; ii++) {
struct ec3_cluster *raw_cluster
= &group->g_clusters[ii];
struct cluster cluster;
cluster_decode(raw_cluster, &cluster);
cluster.c_id += cluster_id_offset;
cluster.c_base += data_offset;
if (cluster.c_id > cluster_id_limit) {
cluster_id_limit = cluster.c_id;
}
cluster_table_put(&dest->img_cluster_table, &cluster);
}
}
size_t data_len = 0;
b_file_size(ioctx->io_f_data, &data_len);
b_file_seek(ioctx->io_f_data, 0, B_SEEK_BEGINNING);
unsigned char *buf = (unsigned char *)group;
size_t buf_len = sizeof *group;
enum ec3_status status2 = EC3_SUCCESS;
for (size_t i = 0; i < data_len; i += buf_len) {
size_t r;
b_status status = b_file_read(
ioctx->io_f_data,
B_OFFSET_CURRENT,
buf_len,
buf,
&r);
if (!B_OK(status)) {
status2 = ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
break;
}
size_t w;
status = b_file_write(
dest->img_f_data,
B_OFFSET_CURRENT,
r,
buf,
&w);
if (!B_OK(status)) {
status2 = ec3_status_from_b_status(
status,
EC3_ERR_IO_FAILURE);
break;
}
dest->img_nr_bytes += r;
if (r != buf_len) {
break;
}
}
dest->img_nr_clusters = cluster_id_limit + 1;
free(group);
return status2;
}
static enum ec3_status write_rw_tag(
struct ec3_image_ioctx *src,
const struct ec3_tag_info *tag,
struct ec3_tag_ioctx *ioctx,
struct shadow_image *dest)
{
char *buf = malloc(src->io_header.img_cluster_size);
if (!buf) {
return EC3_ERR_NO_MEMORY;
}
enum ec3_status status = EC3_SUCCESS;
size_t logical_cluster;
size_t nr_clusters = SIZE_T_MAX;
ec3_image_ioctx_get_tag_nr_clusters(src, tag, &nr_clusters);
for (logical_cluster = 0; logical_cluster < nr_clusters;) {
size_t len = 0;
status = copy_cached_cluster(dest, ioctx, logical_cluster, buf);
if (status == EC3_SUCCESS) {
logical_cluster++;
continue;
}
if (status != EC3_ERR_NO_ENTRY) {
break;
}
/* try and find an extent that covers this logical
cluster */
const struct ec3_extent_info *x
= get_extent_for_logical_cluster(
src,
tag,
logical_cluster);
if (!x) {
put_empty_cluster(dest, ioctx, logical_cluster, buf);
continue;
}
status = copy_cluster_range(
src,
dest,
ioctx,
tag,
x,
logical_cluster,
buf);
if (status != EC3_SUCCESS) {
break;
}
logical_cluster += x->ex_count;
}
free(buf);
return EC3_SUCCESS;
}
enum ec3_status shadow_image_write_tag(
struct ec3_image_ioctx *src,
const struct ec3_tag_info *tag,
struct ec3_tag_ioctx *ioctx,
struct shadow_image *dest)
{
enum ec3_status status = EC3_SUCCESS;
size_t first_cluster = dest->img_nr_clusters;
if (ioctx && (ioctx->io_mode & EC3_TAG_IO_SEQUENTIAL)) {
status = write_wseq_tag(src, tag, ioctx, dest);
} else {
status = write_rw_tag(src, tag, ioctx, dest);
}
if (status != EC3_SUCCESS) {
return status;
}
size_t last_cluster = dest->img_nr_clusters;
struct ec3_extent_info *extent = NULL;
b_buffer_push_back(dest->img_extent_table, 1, (void **)&extent);
extent->ex_owner = tag->tag_ident;
extent->ex_logical_cluster = 0;
extent->ex_physical_cluster = first_cluster;
extent->ex_count = last_cluster - first_cluster;
b_buffer_append(dest->img_tag_table, tag, 1);
return status;
}

39
src/shadow-image.h Normal file
View File

@@ -0,0 +1,39 @@
#ifndef SHADOW_IMAGE_H_
#define SHADOW_IMAGE_H_
#include "cluster-table.h"
#include "status.h"
#include <stddef.h>
struct b_file;
struct b_buffer;
struct ec3_image_ioctx;
struct ec3_tag_ioctx;
struct ec3_tag_info;
struct shadow_image {
size_t img_nr_clusters;
size_t img_nr_bytes;
struct b_file *img_f_data;
struct b_file *img_f_cluster_table;
struct cluster_table img_cluster_table;
struct b_buffer *img_extent_table;
struct b_buffer *img_tag_table;
};
extern enum ec3_status shadow_image_init(
struct ec3_image_ioctx *parent,
struct shadow_image *out);
extern enum ec3_status shadow_image_finish(
struct ec3_image_ioctx *parent,
struct shadow_image *image);
extern void shadow_image_cancel(struct shadow_image *image);
extern enum ec3_status shadow_image_write_tag(
struct ec3_image_ioctx *src,
const struct ec3_tag_info *tag,
struct ec3_tag_ioctx *ioctx,
struct shadow_image *dest);
#endif

31
src/status.c Normal file
View File

@@ -0,0 +1,31 @@
#include "status.h"
enum ec3_status ec3_status_from_b_status(
b_status status,
enum ec3_status default_value)
{
return default_value;
}
#define ENUM_STR(v) \
case v: \
return #v
const char *ec3_status_to_string(enum ec3_status status)
{
switch (status) {
ENUM_STR(EC3_SUCCESS);
ENUM_STR(EC3_ERR_NO_MEMORY);
ENUM_STR(EC3_ERR_NO_ENTRY);
ENUM_STR(EC3_ERR_NOT_SUPPORTED);
ENUM_STR(EC3_ERR_BAD_STATE);
ENUM_STR(EC3_ERR_BAD_FORMAT);
ENUM_STR(EC3_ERR_INVALID_VALUE);
ENUM_STR(EC3_ERR_NAME_EXISTS);
ENUM_STR(EC3_ERR_IO_FAILURE);
ENUM_STR(EC3_ERR_END_OF_FILE);
ENUM_STR(EC3_ERR_OUT_OF_BOUNDS);
default:
return "";
}
}

View File

@@ -1,6 +1,8 @@
#ifndef STATUS_H_
#define STATUS_H_
#include <blue/core/status.h>
enum ec3_status {
EC3_SUCCESS = 0,
EC3_ERR_NO_MEMORY,
@@ -12,6 +14,12 @@ enum ec3_status {
EC3_ERR_NAME_EXISTS,
EC3_ERR_IO_FAILURE,
EC3_ERR_END_OF_FILE,
EC3_ERR_OUT_OF_BOUNDS,
};
extern enum ec3_status ec3_status_from_b_status(
b_status status,
enum ec3_status default_value);
extern const char *ec3_status_to_string(enum ec3_status status);
#endif

View File

@@ -4,14 +4,6 @@
#include <stdlib.h>
#include <string.h>
struct string_table_entry {
b_btree_node e_hash_node;
b_btree_node e_offset_node;
size_t e_hash;
size_t e_offset;
char e_str[];
};
B_BTREE_DEFINE_SIMPLE_INSERT(
struct string_table_entry,
e_hash_node,
@@ -105,6 +97,7 @@ size_t string_table_get(struct string_table *tab, const char *s)
put_string_by_hash(&tab->s_hash_tree, entry);
put_string_by_offset(&tab->s_offset_tree, entry);
tab->s_nr_entries++;
return entry->e_offset;
}

View File

@@ -3,10 +3,19 @@
#include <blue/core/btree.h>
struct string_table_entry {
b_btree_node e_hash_node;
b_btree_node e_offset_node;
size_t e_hash;
size_t e_offset;
char e_str[];
};
struct string_table {
b_btree s_hash_tree;
b_btree s_offset_tree;
size_t s_next_offset;
size_t s_nr_entries;
};
extern void string_table_init(struct string_table *out);

275
src/tag.c Normal file
View File

@@ -0,0 +1,275 @@
#include "tag.h"
#include "image.h"
#include "pipeline.h"
#include "status.h"
#include <blue/io/file.h>
#include <stdlib.h>
#include <string.h>
static enum ec3_status init_seq_write_temp_files(struct ec3_tag_ioctx *tag)
{
b_status status = b_file_open_temp(
B_FILE_READ_WRITE | B_FILE_BINARY,
&tag->io_f_data);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
status = b_file_open_temp(
B_FILE_READ_WRITE | B_FILE_BINARY,
&tag->io_f_cluster_table);
if (!B_OK(status)) {
return ec3_status_from_b_status(status, EC3_ERR_IO_FAILURE);
}
cluster_table_init(&tag->io_cluster_table, tag->io_f_cluster_table, 0);
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_ioctx_create(
struct ec3_tag_ioctx **out,
size_t cluster_size,
enum ec3_tag_ioctx_mode mode)
{
if ((mode & EC3_TAG_IO_READ) && (mode & EC3_TAG_IO_SEQUENTIAL)) {
return EC3_ERR_INVALID_VALUE;
}
struct ec3_tag_ioctx *ioctx = malloc(sizeof *ioctx);
if (!ioctx) {
return EC3_ERR_NO_MEMORY;
}
memset(ioctx, 0x0, sizeof *ioctx);
ioctx->io_mode = mode;
ioctx->io_cluster_buf = malloc(cluster_size);
if (!ioctx->io_cluster_buf) {
free(ioctx);
return EC3_ERR_NO_MEMORY;
}
enum ec3_status status = EC3_SUCCESS;
if (mode & EC3_TAG_IO_SEQUENTIAL) {
status = init_seq_write_temp_files(ioctx);
} else {
status = cluster_cache_init(
&ioctx->io_cache,
(mode & EC3_TAG_IO_WRITE) == EC3_TAG_IO_WRITE,
cluster_size);
}
if (status != EC3_SUCCESS) {
ec3_tag_ioctx_close(ioctx);
return status;
}
*out = ioctx;
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_ioctx_close(struct ec3_tag_ioctx *tag)
{
enum ec3_tag_ioctx_mode mode = tag->io_mode;
tag->io_mode |= EC3_TAG_IO_CLOSED;
if (tag->io_cluster_buf) {
free(tag->io_cluster_buf);
}
if (mode & EC3_TAG_IO_WRITE) {
/* leave the rest of the data for the image ioctx to use */
return EC3_SUCCESS;
}
b_btree_delete(&tag->io_parent->io_opened_tags, &tag->io_node);
free(tag);
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_ioctx_read_cluster(
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf,
size_t *nr_read)
{
if (!(tag->io_mode & EC3_TAG_IO_READ)) {
return EC3_ERR_NOT_SUPPORTED;
}
if (tag->io_mode & EC3_TAG_IO_SEQUENTIAL) {
return EC3_ERR_NOT_SUPPORTED;
}
enum ec3_status status
= cluster_cache_get(&tag->io_cache, cluster_id, buf, nr_read);
if (status == EC3_SUCCESS) {
return EC3_SUCCESS;
}
size_t physical_cluster_id = 0;
status = ec3_image_ioctx_cluster_logical_to_physical(
tag->io_parent,
tag->io_tag_info,
cluster_id,
&physical_cluster_id);
if (status != EC3_SUCCESS) {
return status;
}
/* the cluster isn't in the cache, and needs to be read from the main
* image */
struct cluster cluster_info;
status = cluster_table_get(
&tag->io_cluster_table,
physical_cluster_id,
&cluster_info);
if (status != EC3_SUCCESS) {
return status;
}
if (cluster_info.c_len > tag->io_parent->io_header.img_cluster_size) {
return EC3_ERR_BAD_FORMAT;
}
size_t offset = tag->io_cluster_data_offset + cluster_info.c_base;
size_t r;
enum b_status status2 = b_file_read(
tag->io_f_image,
offset,
cluster_info.c_len,
tag->io_cluster_buf,
&r);
if (!B_OK(status2)) {
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
}
status = ec3_pipeline_decode_cluster(
tag->io_pipeline,
tag->io_cluster_buf,
cluster_info.c_len,
buf,
tag->io_parent->io_header.img_cluster_size,
nr_read);
if (status != EC3_SUCCESS) {
return status;
}
cluster_cache_put(&tag->io_cache, cluster_id, buf, *nr_read);
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_ioctx_write_cluster(
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf,
size_t len,
size_t *nr_written)
{
if (!(tag->io_mode & EC3_TAG_IO_WRITE)) {
return EC3_ERR_NOT_SUPPORTED;
}
enum ec3_status status = EC3_SUCCESS;
if (!(tag->io_mode & EC3_TAG_IO_SEQUENTIAL)) {
status = cluster_cache_put(
tag->io_cluster_buf,
cluster_id,
buf,
len);
if (status != EC3_SUCCESS) {
return status;
}
*nr_written = len;
return EC3_SUCCESS;
}
if (cluster_id != tag->io_seq_cluster_id) {
/* can only write to each cluster once, sequentially, in this
* mode */
return EC3_ERR_NOT_SUPPORTED;
}
size_t offset = 0;
b_file_size(tag->io_f_data, &offset);
size_t encoded_len = 0;
ec3_pipeline_encode_cluster(
tag->io_pipeline,
buf,
len,
tag->io_cluster_buf,
tag->io_parent->io_header.img_cluster_size,
&encoded_len);
b_status status2 = b_file_write(
tag->io_f_data,
offset,
encoded_len,
tag->io_cluster_buf,
nr_written);
if (!B_OK(status2)) {
return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE);
}
struct cluster cluster = {0};
cluster.c_id = cluster_id;
cluster.c_base = offset;
cluster.c_len = *nr_written;
cluster_table_put(&tag->io_cluster_table, &cluster);
tag->io_seq_cluster_id++;
return EC3_SUCCESS;
}
enum ec3_status ec3_tag_ioctx_get_nr_clusters(
struct ec3_tag_ioctx *tag,
size_t *out_nr_clusters)
{
size_t nr_clusters = 0;
const struct ec3_extent_info *extents
= ec3_image_ioctx_get_extent_info(tag->io_parent);
size_t nr_extents = tag->io_parent->io_header.img_nr_extents;
for (size_t i = 0; i < nr_extents; i++) {
const struct ec3_extent_info *x = &extents[i];
if (x->ex_owner != tag->io_tag_info->tag_ident) {
continue;
}
if (x->ex_logical_cluster + x->ex_count > nr_clusters) {
nr_clusters = x->ex_logical_cluster + x->ex_count;
}
}
size_t highest_cached_cluster;
enum ec3_status status = cluster_cache_get_highest_cluster_id(
&tag->io_cache,
&highest_cached_cluster);
if (status == EC3_SUCCESS && highest_cached_cluster >= nr_clusters) {
nr_clusters = highest_cached_cluster + 1;
}
*out_nr_clusters = nr_clusters;
return EC3_SUCCESS;
}

View File

@@ -1,4 +1,101 @@
#ifndef TAG_H_
#define TAG_H_
#include "cluster-cache.h"
#include "cluster-table.h"
#include <blue/core/btree.h>
struct ec3_image_ioctx;
struct ec3_pipeline;
struct ec3_tag_info;
struct b_file;
enum ec3_tag_ioctx_mode {
EC3_TAG_IO_READ = 0x01u,
EC3_TAG_IO_WRITE = 0x02u,
EC3_TAG_IO_READWRITE = 0x03u,
EC3_TAG_IO_SEQUENTIAL = 0x04u,
EC3_TAG_IO_CLOSED = 0x10u,
};
struct ec3_tag_ioctx {
struct ec3_image_ioctx *io_parent;
b_btree_node io_node;
enum ec3_tag_ioctx_mode io_mode;
struct ec3_pipeline *io_pipeline;
/* temp buffer of cluster size, used for pipeline operations */
void *io_cluster_buf;
/* the offset to the start of the cluster table in io_f_rotag */
size_t io_cluster_data_offset;
/* this points to memory belonging to ec3_image_ioctx */
const struct ec3_tag_info *io_tag_info;
/* io_f_rotag is a reference to the main image file. a data and cluster
* table offset will be used to read clusters from the image if the tag
* is open in read-only mode. if the tag is open for read-write or
* write-only access, io_f_rotag is only used to prepare the io_f_data
* cache, and to write the data back to disk when the ioctx is closed.
*/
struct b_file *io_f_image;
/* for sequential write access, cluster data is written to this temp
* file. when the tag ioctx is closed, the data in this file will
* be copied to the main image */
struct b_file *io_f_data;
/* in all io modes except for sequential write-only, cluster data read
* from the image is stored in the cluster cache, and any modified
* clusters are written to the cache. if the tag ioctx is closed with
* dirty clusters still in the cache, it is retained by the image ioctx
* until that too is closed, when the image file is written to disk
* including the modified clusters.
*
* in sequential write-only mode, cluster data is immediately encoded
* and written straight to a temp file.. */
struct cluster_cache io_cache;
/* if the tag is open for sequential write-only access,
* io_f_cluster_table will be a reference to a temporary binary file
* where the tag's cluster table entries will be stored.
* io_cluster_table will also reference this file.
*
* for read-only access, this pointer is null.
*
* for read-write access, cluster table construction is deferred until
* the ioctx is closed and the clusters are encoded and written to the
* main image file, so this pointer will be null. */
struct b_file *io_f_cluster_table;
/* cluster table data structure backed by io_f_cluster_table */
struct cluster_table io_cluster_table;
/* how many clusters have been written sequentially to this tag */
size_t io_seq_cluster_id;
};
extern enum ec3_status ec3_tag_ioctx_create(
struct ec3_tag_ioctx **out,
size_t cluster_size,
enum ec3_tag_ioctx_mode mode);
extern enum ec3_status ec3_tag_ioctx_close(struct ec3_tag_ioctx *tag);
extern enum ec3_status ec3_tag_ioctx_read_cluster(
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf,
size_t *nr_read);
extern enum ec3_status ec3_tag_ioctx_write_cluster(
struct ec3_tag_ioctx *tag,
size_t cluster_id,
void *buf,
size_t len,
size_t *nr_written);
extern enum ec3_status ec3_tag_ioctx_get_nr_clusters(
struct ec3_tag_ioctx *tag,
size_t *nr_clusters);
#endif

View File

@@ -1,6 +1,7 @@
#include "commands.h"
#include "image.h"
#include "misc.h"
#include "read.h"
#include "tag.h"
#include <blue/cmd.h>
#include <blue/term.h>
@@ -26,29 +27,22 @@ static int unwrap(
0,
&in_path);
FILE *inp = fopen(in_path, "rb");
if (!inp) {
b_err("cannot open '%s'", in_path);
b_i("reason: %s", strerror(errno));
return -1;
}
struct ec3_reader *reader = NULL;
enum ec3_status status = ec3_reader_create(inp, &reader);
struct ec3_image_ioctx *image = NULL;
enum ec3_status status = ec3_image_ioctx_open(
in_path,
NULL,
EC3_IMAGE_IO_READ,
&image);
if (status != EC3_SUCCESS) {
fclose(inp);
b_err("cannot open '%s'", in_path);
b_i("reason: corrupted/unknown file format");
return -1;
}
const struct ec3_container_info *c_info
= ec3_reader_get_container_info(reader);
if (c_info->c_nr_tags == 0) {
ec3_reader_finish(reader);
fclose(inp);
const struct ec3_image_info *c_info = ec3_image_ioctx_get_info(image);
if (c_info->img_nr_tags == 0) {
ec3_image_ioctx_close(image);
b_i("container '%s' has no tags", in_path);
return -1;
@@ -68,37 +62,41 @@ static int unwrap(
target_tag_ident_str,
&target_tag_ident);
} else {
const struct ec3_tag_info *tags = ec3_reader_get_tags(reader);
const struct ec3_tag_info *tags
= ec3_image_ioctx_get_tag_info(image);
target_tag_ident = tags[0].tag_ident;
}
char ident_str[32];
ec3_identifier_to_string(target_tag_ident, ident_str, sizeof ident_str);
struct ec3_tag_reader *tag_reader = NULL;
status = ec3_reader_open_tag(reader, target_tag_ident, &tag_reader);
struct ec3_tag_ioctx *tag = NULL;
status = ec3_image_ioctx_open_tag_by_id(
image,
target_tag_ident,
EC3_TAG_IO_READ,
&tag);
if (status != EC3_SUCCESS) {
b_i("container '%s' has no %s tag", in_path, ident_str);
ec3_reader_finish(reader);
fclose(inp);
ec3_image_ioctx_close(image);
return -1;
}
const struct ec3_tag_info *target_tag_info
= ec3_reader_get_tag_info(reader, target_tag_ident);
= ec3_image_ioctx_get_tag_info_by_id(image, target_tag_ident);
size_t tag_length = target_tag_info->tag_total_length;
printf("reading %zu bytes from tag %s...\n", tag_length, ident_str);
char *buf = malloc(c_info->c_cluster_size);
char *buf = malloc(c_info->img_cluster_size);
int ret = 0;
FILE *outp = fopen("data.bin", "wb");
for (size_t i = 0;; i++) {
size_t nr_read = 0;
enum ec3_status status
= ec3_tag_reader_read(tag_reader, i, 1, buf, &nr_read);
= ec3_tag_ioctx_read_cluster(tag, i, buf, &nr_read);
if (status != EC3_SUCCESS) {
b_err("I/O failure while reading from container",
@@ -110,15 +108,16 @@ static int unwrap(
printf("read %zu bytes from cluster %zu\n", nr_read, i);
fwrite(buf, 1, nr_read, outp);
if (nr_read < c_info->c_cluster_size) {
if (nr_read < c_info->img_cluster_size) {
break;
}
}
free(buf);
fclose(inp);
fclose(outp);
ec3_tag_ioctx_close(tag);
ec3_image_ioctx_close(image);
return ret;
}

0
src/volume.c Normal file
View File

0
src/volume.h Normal file
View File

View File

@@ -1,10 +1,12 @@
#include "bin.h"
#include "commands.h"
#include "write.h"
#include "image.h"
#include <blue/cmd.h>
#include <blue/io/path.h>
#include <blue/term.h>
#include <errno.h>
#include <stdlib.h>
enum {
OPT_OUTPATH,
@@ -28,7 +30,7 @@ enum {
};
static enum ec3_status add_file(
struct ec3_writer *writer,
struct ec3_image_ioctx *image,
unsigned long type,
uint64_t id,
const char *path)
@@ -40,32 +42,40 @@ static enum ec3_status add_file(
return EC3_ERR_NO_ENTRY;
}
struct ec3_tag_writer *tag = NULL;
enum ec3_status status = ec3_writer_create_tag(
writer,
EC3_TAG_WRITER_BUFFERED,
struct ec3_tag_ioctx *tag = NULL;
enum ec3_status status = ec3_image_ioctx_create_tag(
image,
type,
id,
0,
EC3_TAG_IO_WRITE | EC3_TAG_IO_SEQUENTIAL,
&tag);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 tag writer");
b_i("reason: %s", ec3_status_to_string(status));
return status;
}
char buf[4096];
const struct ec3_image_info *image_info
= ec3_image_ioctx_get_info(image);
size_t cluster_size = image_info->img_cluster_size;
char *buf = malloc(cluster_size);
size_t i = 0;
while (1) {
size_t r = fread(buf, 1, sizeof buf, inp);
status = ec3_tag_writer_write(tag, buf, r);
size_t r = fread(buf, 1, cluster_size, inp);
size_t w;
status = ec3_tag_ioctx_write_cluster(tag, i++, buf, r, &w);
if (r < sizeof buf) {
if (r < cluster_size) {
break;
}
}
ec3_tag_writer_finish(tag);
free(buf);
ec3_tag_ioctx_close(tag);
fclose(inp);
return EC3_SUCCESS;
@@ -79,13 +89,6 @@ static int wrap(
const char *out_path = NULL;
b_arglist_get_string(opt, OPT_OUTPATH, OPT_OUTPATH_PATH, 0, &out_path);
FILE *outp = fopen(out_path, "wb");
if (!outp) {
b_err("cannot open '%s'", out_path);
b_i("reason: %s", strerror(errno));
return -1;
}
enum ec3_status status = EC3_SUCCESS;
uint64_t ident = 0;
@@ -100,14 +103,23 @@ static int wrap(
return -1;
}
struct ec3_writer *writer = NULL;
b_path *image_path = b_path_create_from_cstr(out_path);
if (b_path_exists(image_path)) {
b_path_unlink(image_path);
}
b_path_release(image_path);
struct ec3_image_ioctx *image = NULL;
struct ec3_parameters param = {
.p_outp = outp,
.p_cluster_size = EC3_CLUSTER_16K,
.p_compression_func = EC3_COMPRESSION_ZSTD,
.p_ident = ident,
};
status = ec3_writer_create(&param, &writer);
status = ec3_image_ioctx_open(
out_path,
&param,
EC3_IMAGE_IO_WRITE,
&image);
if (status != EC3_SUCCESS) {
b_err("cannot initialise EC3 writer");
@@ -122,7 +134,7 @@ static int wrap(
printf("%s\n", it.value->val_str);
#if 1
status = add_file(
writer,
image,
EC3_TAG_BLOB,
next_auto_id,
it.value->val_str);
@@ -141,7 +153,7 @@ static int wrap(
printf("%s\n", it.value->val_str);
#if 1
status = add_file(
writer,
image,
EC3_TAG_EXEC,
next_auto_id,
it.value->val_str);
@@ -189,7 +201,7 @@ static int wrap(
return -1;
}
status = add_file(writer, EC3_TAG_BLOB, id, path->val_str);
status = add_file(image, EC3_TAG_BLOB, id, path->val_str);
if (status != EC3_SUCCESS) {
b_err("an error occurred while writing to the "
@@ -233,7 +245,7 @@ static int wrap(
return -1;
}
status = add_file(writer, EC3_TAG_EXEC, id, path->val_str);
status = add_file(image, EC3_TAG_EXEC, id, path->val_str);
if (status != EC3_SUCCESS) {
b_err("an error occurred while writing to the "
@@ -243,9 +255,7 @@ static int wrap(
#endif
}
ec3_writer_finish(writer);
fclose(outp);
ec3_image_ioctx_close(image);
return 0;
}

View File

@@ -1,3 +1,4 @@
#if 0
#include "write.h"
#include "bin.h"
@@ -195,10 +196,46 @@ static enum ec3_status flush_extent_entry(struct ec3_writer *w)
w->w_extent_nr_clusters);
}
static enum ec3_status write_string_table(struct ec3_writer *w)
{
struct ec3_tag_writer *stab;
enum ec3_status status
= ec3_writer_create_tag(w, 0, EC3_TAG_STAB, 0, 0, &stab);
if (status != EC3_SUCCESS) {
return status;
}
b_btree_iterator it;
b_btree_foreach(&it, &w->w_strings.s_offset_tree)
{
struct string_table_entry *entry = b_unbox(
struct string_table_entry,
it.node,
e_offset_node);
ec3_tag_writer_write(
stab,
entry->e_str,
strlen(entry->e_str) + 1);
}
ec3_tag_writer_finish(stab);
return EC3_SUCCESS;
}
void ec3_writer_finish(struct ec3_writer *w)
{
enum ec3_status status = EC3_SUCCESS;
if (w->w_strings.s_nr_entries > 0) {
status = write_string_table(w);
}
if (status != EC3_SUCCESS) {
return;
}
if (w->w_extent_nr_clusters > 0) {
status = flush_extent_entry(w);
}
@@ -240,6 +277,11 @@ void ec3_writer_write_image(struct ec3_writer *w, FILE *fp)
{
}
struct string_table *ec3_writer_get_strings(struct ec3_writer *w)
{
return &w->w_strings;
}
static bool is_tag_ident_free(struct ec3_writer *w, uint64_t ident)
{
b_queue_iterator it = {0};
@@ -300,9 +342,16 @@ enum ec3_status ec3_writer_create_tag(
}
if (status != EC3_SUCCESS) {
fclose(tag->w_data);
fclose(tag->w_cluster_table);
free(tag);
return status;
}
if (tag_ident == 0) {
tag->w_ident = w->w_next_tag_ident++;
}
tag->w_index = w->w_nr_tags++;
*out_writer = tag;
return EC3_SUCCESS;
@@ -326,6 +375,7 @@ static enum ec3_status flush_tag_buffer(struct ec3_tag_writer *w)
status = ec3_pipeline_write_cluster(
pipeline,
w->w_nr_clusters,
buf,
w->w_ptr,
&nr_written);
@@ -452,3 +502,4 @@ enum ec3_status ec3_tag_writer_finish(struct ec3_tag_writer *w)
return status;
}
#endif

View File

@@ -31,6 +31,7 @@ extern enum ec3_status ec3_writer_create(
struct ec3_writer **out);
extern void ec3_writer_finish(struct ec3_writer *w);
extern void ec3_writer_write_image(struct ec3_writer *w, FILE *fp);
extern struct string_table *ec3_writer_get_strings(struct ec3_writer *w);
extern enum ec3_status ec3_writer_create_tag(
struct ec3_writer *w,