From 34ab04fe4edb76f7d3fbc6f960e1dda7a3ef108b Mon Sep 17 00:00:00 2001 From: Max Wash Date: Mon, 23 Jun 2025 13:21:36 +0100 Subject: [PATCH] pipeline: handle incompressible clusters --- src/aes256.c | 5 ++-- src/bin.h | 10 +++++--- src/pipeline.c | 43 +++++++++++++++++++++++++++------- src/pipeline.h | 44 +++++++++++++++++++++++++++++++++-- src/shadow-image.c | 26 +++++++++++++++++---- src/tag.c | 24 ++++++++++++++++++- src/zstd.c | 57 ++++++++++++++++++++++++++++++++++++++++++---- 7 files changed, 183 insertions(+), 26 deletions(-) diff --git a/src/aes256.c b/src/aes256.c index 679f7ad..32b73dc 100644 --- a/src/aes256.c +++ b/src/aes256.c @@ -1,6 +1,6 @@ #include "pipeline.h" -static enum ec3_status encrypt( +static struct ec3_pipeline_stage_result encrypt( struct ec3_pipeline_stage *stage, const void *src, size_t len, @@ -8,7 +8,7 @@ static enum ec3_status encrypt( size_t dest_max, size_t *nr_written) { - return EC3_SUCCESS; + return EC3_PIPELINE_STAGE_RESULT_OK(); } static enum ec3_status decrypt( @@ -24,6 +24,7 @@ static enum ec3_status decrypt( const struct ec3_pipeline_stage_type pipeline_aes256 = { .t_id = EC3_PIPELINE_AES256, + .t_class = EC3_PIPELINE_CLASS_ENCRYPTION, .t_flags = EC3_PIPELINE_F_NONE, .t_cluster_in = decrypt, .t_cluster_out = encrypt, diff --git a/src/bin.h b/src/bin.h index 1d29457..f8a1979 100644 --- a/src/bin.h +++ b/src/bin.h @@ -19,6 +19,12 @@ enum ec3_cluster_size { EC3_CLUSTER_64K = 0x04u }; +enum ec3_cluster_flags { + EC3_CLUSTER_F_NONE = 0x00u, + EC3_CLUSTER_F_COMPRESSED = 0x01u, + EC3_CLUSTER_F_ENCRYPTED = 0x02u, +}; + enum ec3_compression_function { EC3_COMPRESSION_NONE = 0x00u, EC3_COMPRESSION_ZSTD = 0x01u @@ -43,9 +49,7 @@ enum ec3_tag_type { }; enum ec3_tag_flags { - EC3_TAG_SIGNED = 0x00000001u, - EC3_TAG_COMPRESSED = 0x00000002u, - EC3_TAG_ENCRYPTED = 0x00000004u + EC3_TAG_F_NONE = 0x00000000u, }; enum ec3_vnode_mode { diff --git a/src/pipeline.c b/src/pipeline.c index 19e15b8..42a8b93 100644 --- a/src/pipeline.c +++ b/src/pipeline.c @@ -6,6 +6,17 @@ #include #include +#define RESULT_OK(stages) \ + ((struct ec3_pipeline_result) { \ + .r_status = EC3_SUCCESS, \ + .r_applied_stages = (stages), \ + }) +#define RESULT_ERR(status) \ + ((struct ec3_pipeline_result) { \ + .r_status = (status), \ + .r_applied_stages = 0, \ + }) + extern const struct ec3_pipeline_stage_type pipeline_zstd; extern const struct ec3_pipeline_stage_type pipeline_aes256; @@ -117,7 +128,7 @@ void ec3_pipeline_destroy(struct ec3_pipeline *p) free(p); } -enum ec3_status ec3_pipeline_encode_cluster( +struct ec3_pipeline_result ec3_pipeline_encode_cluster( struct ec3_pipeline *pipeline, const void *in, size_t in_len, @@ -126,7 +137,7 @@ enum ec3_status ec3_pipeline_encode_cluster( size_t *out_len) { if (in_len > out_max || out_max < pipeline->p_cluster_size) { - return EC3_ERR_INVALID_VALUE; + return RESULT_ERR(EC3_ERR_INVALID_VALUE); } b_queue_entry *cur = b_queue_first(&pipeline->p_stages); @@ -139,6 +150,8 @@ enum ec3_status ec3_pipeline_encode_cluster( size_t stage_out_size = 0; size_t dest_max = pipeline->p_cluster_size; + struct ec3_pipeline_stage_result stage_result; + unsigned long stages = 0; while (cur) { struct ec3_pipeline_stage *stage @@ -151,7 +164,7 @@ enum ec3_status ec3_pipeline_encode_cluster( dest = src; } - status = stage->s_type->t_cluster_out( + stage_result = stage->s_type->t_cluster_out( stage, src, stage_in_size, @@ -159,23 +172,30 @@ enum ec3_status ec3_pipeline_encode_cluster( dest_max, &stage_out_size); - if (status != EC3_SUCCESS) { - return status; + switch (stage_result.r_result) { + case EC3_PIPELINE_STAGE_R_OK: + src = dest; + stage_in_size = stage_out_size; + stages |= stage->s_type->t_class; + break; + case EC3_PIPELINE_STAGE_R_ERROR: + return RESULT_ERR(stage_result.r_status); + case EC3_PIPELINE_STAGE_R_UNSUPPORTED: + break; } - src = dest; - stage_in_size = stage_out_size; cur = b_queue_next(cur); } memmove(out, src, MIN(stage_in_size, out_max)); *out_len = MIN(stage_in_size, out_max); - return EC3_SUCCESS; + return RESULT_OK(stages); } enum ec3_status ec3_pipeline_decode_cluster( struct ec3_pipeline *pipeline, + enum ec3_pipeline_stage_class stages, const void *in, size_t in_len, void *out, @@ -193,6 +213,11 @@ enum ec3_status ec3_pipeline_decode_cluster( while (cur) { struct ec3_pipeline_stage *stage = b_unbox(struct ec3_pipeline_stage, cur, s_entry); + if ((stages & stage->s_type->t_class) + != stage->s_type->t_class) { + stage_out_size = stage_in_size; + goto skip; + } void *dest; if (stage->s_type->t_flags & EC3_PIPELINE_F_BUFFERED) { @@ -215,6 +240,8 @@ enum ec3_status ec3_pipeline_decode_cluster( src = dest; stage_in_size = stage_out_size; + + skip: cur = b_queue_next(cur); } diff --git a/src/pipeline.h b/src/pipeline.h index 63fba70..5d1e05b 100644 --- a/src/pipeline.h +++ b/src/pipeline.h @@ -7,6 +7,22 @@ #include #include +#define EC3_PIPELINE_STAGE_RESULT_OK() \ + ((struct ec3_pipeline_stage_result) { \ + .r_status = EC3_SUCCESS, \ + .r_result = EC3_PIPELINE_STAGE_R_OK, \ + }) +#define EC3_PIPELINE_STAGE_RESULT_UNSUPPORTED() \ + ((struct ec3_pipeline_stage_result) { \ + .r_status = EC3_SUCCESS, \ + .r_result = EC3_PIPELINE_STAGE_R_UNSUPPORTED, \ + }) +#define EC3_PIPELINE_STAGE_RESULT_ERR(status) \ + ((struct ec3_pipeline_stage_result) { \ + .r_status = (status), \ + .r_result = EC3_PIPELINE_STAGE_R_ERROR, \ + }) + enum ec3_pipeline_stage_type_id { EC3_PIPELINE_NONE = 0, EC3_PIPELINE_AES256, @@ -18,10 +34,33 @@ enum ec3_pipeline_stage_type_flags { EC3_PIPELINE_F_BUFFERED = 0x01u, }; +enum ec3_pipeline_stage_class { + EC3_PIPELINE_CLASS_NONE = 0x00u, + EC3_PIPELINE_CLASS_COMPRESSION = 0x01u, + EC3_PIPELINE_CLASS_ENCRYPTION = 0x02u, +}; + +enum ec3_pipeline_stage_result_code { + EC3_PIPELINE_STAGE_R_OK = 0, + EC3_PIPELINE_STAGE_R_UNSUPPORTED, + EC3_PIPELINE_STAGE_R_ERROR, +}; + struct ec3_pipeline_stage; +struct ec3_pipeline_stage_result { + enum ec3_status r_status; + enum ec3_pipeline_stage_result_code r_result; +}; + +struct ec3_pipeline_result { + enum ec3_status r_status; + enum ec3_pipeline_stage_class r_applied_stages; +}; + struct ec3_pipeline_stage_type { enum ec3_pipeline_stage_type_id t_id; + enum ec3_pipeline_stage_class t_class; enum ec3_pipeline_stage_type_flags t_flags; enum ec3_status (*t_cluster_in)( @@ -31,7 +70,7 @@ struct ec3_pipeline_stage_type { void *, size_t, size_t *); - enum ec3_status (*t_cluster_out)( + struct ec3_pipeline_stage_result (*t_cluster_out)( struct ec3_pipeline_stage *, const void *, size_t, @@ -64,7 +103,7 @@ extern enum ec3_status ec3_pipeline_create( struct ec3_pipeline **out); extern void ec3_pipeline_destroy(struct ec3_pipeline *p); -extern enum ec3_status ec3_pipeline_encode_cluster( +extern struct ec3_pipeline_result ec3_pipeline_encode_cluster( struct ec3_pipeline *pipeline, const void *in, size_t in_len, @@ -73,6 +112,7 @@ extern enum ec3_status ec3_pipeline_encode_cluster( size_t *out_len); extern enum ec3_status ec3_pipeline_decode_cluster( struct ec3_pipeline *pipeline, + enum ec3_pipeline_stage_class stages, const void *in, size_t in_len, void *out, diff --git a/src/shadow-image.c b/src/shadow-image.c index 36a37ad..2bcb5d1 100644 --- a/src/shadow-image.c +++ b/src/shadow-image.c @@ -405,7 +405,7 @@ static enum ec3_status put_empty_cluster( memset(buf, 0x0, nr_read); size_t encoded_size = 0; - enum ec3_status status = ec3_pipeline_encode_cluster( + struct ec3_pipeline_result result = ec3_pipeline_encode_cluster( tag->io_pipeline, buf, nr_read, @@ -413,8 +413,8 @@ static enum ec3_status put_empty_cluster( cluster_size, &encoded_size); - if (status != EC3_SUCCESS) { - return status; + if (result.r_status != EC3_SUCCESS) { + return result.r_status; } size_t nr_written; @@ -438,6 +438,14 @@ static enum ec3_status put_empty_cluster( cluster.c_base = image->img_nr_bytes - image->img_cluster_data_offset; cluster.c_len = encoded_size; + if (result.r_applied_stages & EC3_PIPELINE_CLASS_ENCRYPTION) { + cluster.c_flags |= EC3_CLUSTER_F_ENCRYPTED; + } + + if (result.r_applied_stages & EC3_PIPELINE_CLASS_COMPRESSION) { + cluster.c_flags |= EC3_CLUSTER_F_COMPRESSED; + } + image->img_nr_clusters++; image->img_nr_bytes += nr_read; @@ -464,7 +472,7 @@ static enum ec3_status copy_cached_cluster( } size_t encoded_size = 0; - status = ec3_pipeline_encode_cluster( + struct ec3_pipeline_result result = ec3_pipeline_encode_cluster( tag->io_pipeline, buf, nr_read, @@ -472,7 +480,7 @@ static enum ec3_status copy_cached_cluster( cluster_size, &encoded_size); - if (status != EC3_SUCCESS) { + if (result.r_status != EC3_SUCCESS) { return status; } @@ -497,6 +505,14 @@ static enum ec3_status copy_cached_cluster( cluster.c_base = image->img_nr_bytes - image->img_cluster_data_offset; cluster.c_len = encoded_size; + if (result.r_applied_stages & EC3_PIPELINE_CLASS_ENCRYPTION) { + cluster.c_flags |= EC3_CLUSTER_F_ENCRYPTED; + } + + if (result.r_applied_stages & EC3_PIPELINE_CLASS_COMPRESSION) { + cluster.c_flags |= EC3_CLUSTER_F_COMPRESSED; + } + image->img_nr_clusters++; image->img_nr_bytes += nr_written; diff --git a/src/tag.c b/src/tag.c index 453f579..179839f 100644 --- a/src/tag.c +++ b/src/tag.c @@ -199,8 +199,18 @@ enum ec3_status ec3_tag_ioctx_read_cluster( return ec3_status_from_b_status(status2, EC3_ERR_IO_FAILURE); } + enum ec3_pipeline_stage_class stages = 0; + if (cluster_info.c_flags & EC3_CLUSTER_F_COMPRESSED) { + stages |= EC3_PIPELINE_CLASS_COMPRESSION; + } + + if (cluster_info.c_flags & EC3_CLUSTER_F_ENCRYPTED) { + stages |= EC3_PIPELINE_CLASS_ENCRYPTION; + } + status = ec3_pipeline_decode_cluster( tag->io_pipeline, + stages, tag->io_cluster_buf, cluster_info.c_len, buf, @@ -253,7 +263,7 @@ enum ec3_status ec3_tag_ioctx_write_cluster( b_file_size(tag->io_f_data, &offset); size_t encoded_len = 0; - ec3_pipeline_encode_cluster( + struct ec3_pipeline_result result = ec3_pipeline_encode_cluster( tag->io_pipeline, buf, len, @@ -261,6 +271,10 @@ enum ec3_status ec3_tag_ioctx_write_cluster( tag->io_parent->io_header.img_cluster_size, &encoded_len); + if (result.r_status != EC3_SUCCESS) { + return result.r_status; + } + b_status status2 = b_file_write( tag->io_f_data, offset, @@ -277,6 +291,14 @@ enum ec3_status ec3_tag_ioctx_write_cluster( cluster.c_base = offset; cluster.c_len = *nr_written; + if (result.r_applied_stages & EC3_PIPELINE_CLASS_ENCRYPTION) { + cluster.c_flags |= EC3_CLUSTER_F_ENCRYPTED; + } + + if (result.r_applied_stages & EC3_PIPELINE_CLASS_COMPRESSION) { + cluster.c_flags |= EC3_CLUSTER_F_COMPRESSED; + } + cluster_table_put(&tag->io_cluster_table, &cluster); tag->io_seq_cluster_id++; diff --git a/src/zstd.c b/src/zstd.c index 95eaf04..192f3e4 100644 --- a/src/zstd.c +++ b/src/zstd.c @@ -1,8 +1,36 @@ #include "pipeline.h" +#include #include -static enum ec3_status compress( +#define CHECK(cond, ...) \ + do { \ + if (!(cond)) { \ + fprintf(stderr, \ + "%s:%d CHECK(%s) failed: ", \ + __FILE__, \ + __LINE__, \ + #cond); \ + fprintf(stderr, "" __VA_ARGS__); \ + fprintf(stderr, "\n"); \ + exit(1); \ + } \ + } while (0) + +/*! CHECK_ZSTD + * Check the zstd error code and die if an error occurred after printing a + * message. + */ +#define CHECK_ZSTD(fn) \ + do { \ + size_t const err = (fn); \ + CHECK(!ZSTD_isError(err), \ + "%s (%d)", \ + ZSTD_getErrorName(err), \ + ZSTD_getErrorCode(err)); \ + } while (0) + +static struct ec3_pipeline_stage_result compress( struct ec3_pipeline_stage *stage, const void *src, size_t len, @@ -10,8 +38,20 @@ static enum ec3_status compress( size_t dest_max, size_t *nr_written) { - *nr_written = ZSTD_compress(dest, dest_max, src, len, 10); - return EC3_SUCCESS; + size_t w = ZSTD_compress(dest, dest_max, src, len, 10); + if (!ZSTD_isError(w)) { + *nr_written = w; + return EC3_PIPELINE_STAGE_RESULT_OK(); + } + + int err = ZSTD_getErrorCode(w); + switch (err) { + case ZSTD_error_dstSize_tooSmall: + return EC3_PIPELINE_STAGE_RESULT_UNSUPPORTED(); + default: + CHECK_ZSTD(w); + return EC3_PIPELINE_STAGE_RESULT_ERR(EC3_ERR_INTERNAL_FAILURE); + } } static enum ec3_status decompress( @@ -22,12 +62,19 @@ static enum ec3_status decompress( size_t dest_max, size_t *nr_read) { - *nr_read = ZSTD_decompress(dest, dest_max, src, len); - return EC3_SUCCESS; + size_t r = ZSTD_decompress(dest, dest_max, src, len); + if (!ZSTD_isError(r)) { + *nr_read = r; + return EC3_SUCCESS; + } + + CHECK_ZSTD(r); + return EC3_ERR_INTERNAL_FAILURE; } const struct ec3_pipeline_stage_type pipeline_zstd = { .t_id = EC3_PIPELINE_ZSTD, + .t_class = EC3_PIPELINE_CLASS_COMPRESSION, .t_flags = EC3_PIPELINE_F_BUFFERED, .t_cluster_in = decompress, .t_cluster_out = compress,