diff --git a/include/avif/avif.h b/include/avif/avif.h index b690bd7795..6bd7da8bdc 100644 --- a/include/avif/avif.h +++ b/include/avif/avif.h @@ -85,6 +85,8 @@ typedef int avifBool; #define AVIF_SPEED_SLOWEST 0 #define AVIF_SPEED_FASTEST 10 +#define MAX_AV1_LAYER_COUNT 4 + typedef enum avifPlanesFlag { AVIF_PLANES_YUV = (1 << 0), @@ -146,7 +148,8 @@ typedef enum avifResult AVIF_RESULT_WAITING_ON_IO, // similar to EAGAIN/EWOULDBLOCK, this means the avifIO doesn't have necessary data available yet AVIF_RESULT_INVALID_ARGUMENT, // an argument passed into this function is invalid AVIF_RESULT_NOT_IMPLEMENTED, // a requested code path is not (yet) implemented - AVIF_RESULT_OUT_OF_MEMORY + AVIF_RESULT_OUT_OF_MEMORY, + AVIF_RESULT_INVALID_LAYERS } avifResult; AVIF_API const char * avifResultToString(avifResult result); @@ -782,9 +785,11 @@ typedef enum avifProgressiveState // for an image sequence. AVIF_PROGRESSIVE_STATE_UNAVAILABLE = 0, - // The current AVIF/Source offers a progressive image, but avifDecoder.allowProgressive is not - // enabled, so it will behave as if the image was not progressive and will simply decode the - // best version of this item. + // For decoder, this means the current AVIF/Source offers a progressive image, but + // avifDecoder.allowProgressive is not enabled, so it will behave as if the image was not + // progressive and will simply decode the best version of this item. + // For encoder, this means at least one of color and alpha image has multiple layers and + // indicates this is a progressive image. AVIF_PROGRESSIVE_STATE_AVAILABLE, // The current AVIF/Source offers a progressive image, and avifDecoder.allowProgressive is true. @@ -993,6 +998,20 @@ AVIF_API avifResult avifDecoderNthImageMaxExtent(const avifDecoder * decoder, ui struct avifEncoderData; struct avifCodecSpecificOptions; +typedef struct avifScalingMode +{ + uint64_t numerator; + uint64_t denominator; +} avifScalingMode; + +typedef struct avifLayerConfig +{ + int minQuantizer; + int maxQuantizer; + avifScalingMode horizontalMode; + avifScalingMode verticalMode; +} avifLayerConfig; + // Notes: // * If avifEncoderWrite() returns AVIF_RESULT_OK, output must be freed with avifRWDataFree() // * If (maxThreads < 2), multithreading is disabled @@ -1021,6 +1040,14 @@ typedef struct avifEncoder int keyframeInterval; // How many frames between automatic forced keyframes; 0 to disable (default). uint64_t timescale; // timescale of the media (Hz) + // Layers (used by progressive rendering) + // * Note: libavif currently can only properly decode images without alpha, + // or images whose extraLayerCount == extraLayerCountAlpha, if progressive decode is enabled. + int extraLayerCount; // Extra color layers; 0 for regular single-layer color image (default). + int extraLayerCountAlpha; // Extra alpha layers; 0 for regular single-layer alpha image (default). + avifLayerConfig layers[MAX_AV1_LAYER_COUNT]; + avifLayerConfig layersAlpha[MAX_AV1_LAYER_COUNT]; + // stats from the most recent write avifIOStats ioStats; @@ -1043,9 +1070,9 @@ typedef enum avifAddImageFlag // Force this frame to be a keyframe (sync frame). AVIF_ADD_IMAGE_FLAG_FORCE_KEYFRAME = (1 << 0), - // Use this flag when encoding a single image. Signals "still_picture" to AV1 encoders, which - // tweaks various compression rules. This is enabled automatically when using the - // avifEncoderWrite() single-image encode path. + // Use this flag when encoding a single frame, single layer image. + // Signals "still_picture" to AV1 encoders, which tweaks various compression rules. + // This is enabled automatically when using the avifEncoderWrite() single-image encode path. AVIF_ADD_IMAGE_FLAG_SINGLE = (1 << 1) } avifAddImageFlag; typedef uint32_t avifAddImageFlags; @@ -1058,17 +1085,22 @@ typedef uint32_t avifAddImageFlags; // * avifEncoderAddImage() ... [repeatedly; at least once] // OR // * avifEncoderAddImageGrid() [exactly once, AVIF_ADD_IMAGE_FLAG_SINGLE is assumed] +// OR +// * avifEncoderAddImageProgressive() [exactly once, AVIF_ADD_IMAGE_FLAG_SINGLE is assumed] // * avifEncoderFinish() // * avifEncoderDestroy() // // durationInTimescales is ignored if AVIF_ADD_IMAGE_FLAG_SINGLE is set in addImageFlags. AVIF_API avifResult avifEncoderAddImage(avifEncoder * encoder, const avifImage * image, uint64_t durationInTimescales, avifAddImageFlags addImageFlags); +// cellImages should have gridCols * gridRows * (max(encoder->extraLayerCount, encoder->extraLayerCountAlpha) + 1) elements. AVIF_API avifResult avifEncoderAddImageGrid(avifEncoder * encoder, uint32_t gridCols, uint32_t gridRows, const avifImage * const * cellImages, avifAddImageFlags addImageFlags); +// layerImages should have max(encoder->extraLayerCount, encoder->extraLayerCountAlpha) + 1 elements. +avifResult avifEncoderAddImageProgressive(avifEncoder * encoder, const avifImage * const * layerImages, avifAddImageFlags addImageFlags); AVIF_API avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output); // Codec-specific, optional "advanced" tuning settings, in the form of string key/value pairs. These diff --git a/include/avif/internal.h b/include/avif/internal.h index ace7611b25..5ea4d877a0 100644 --- a/include/avif/internal.h +++ b/include/avif/internal.h @@ -259,6 +259,7 @@ typedef avifResult (*avifCodecEncodeImageFunc)(struct avifCodec * codec, avifEncoder * encoder, const avifImage * image, avifBool alpha, + uint32_t layerIndex, avifAddImageFlags addImageFlags, avifCodecEncodeOutput * output); typedef avifBool (*avifCodecEncodeFinishFunc)(struct avifCodec * codec, avifCodecEncodeOutput * output); diff --git a/src/avif.c b/src/avif.c index 5ee6ea65d2..f52fd8d9ea 100644 --- a/src/avif.c +++ b/src/avif.c @@ -94,6 +94,7 @@ const char * avifResultToString(avifResult result) case AVIF_RESULT_INVALID_ARGUMENT: return "Invalid argument"; case AVIF_RESULT_NOT_IMPLEMENTED: return "Not implemented"; case AVIF_RESULT_OUT_OF_MEMORY: return "Out of memory"; + case AVIF_RESULT_INVALID_LAYERS: return "Invalid layer image"; case AVIF_RESULT_UNKNOWN_ERROR: default: break; diff --git a/src/codec_aom.c b/src/codec_aom.c index 44deab9bb0..cfab69d32b 100644 --- a/src/codec_aom.c +++ b/src/codec_aom.c @@ -62,8 +62,9 @@ struct avifCodecInternal aom_codec_ctx_t encoder; avifPixelFormatInfo formatInfo; aom_img_fmt_t aomFormat; + aom_codec_enc_cfg_t cfg; avifBool monochromeEnabled; - // Whether cfg.rc_end_usage was set with an + // Whether cfg->rc_end_usage was set with an // avifEncoderSetCodecSpecificOption(encoder, "end-usage", value) call. avifBool endUsageSet; // Whether cq-level was set with an @@ -284,6 +285,31 @@ static aom_img_fmt_t avifImageCalcAOMFmt(const avifImage * image, avifBool alpha return fmt; } +struct aomScalingModeMapList +{ + avifScalingMode avifMode; + AOM_SCALING_MODE aomMode; +}; + +static const struct aomScalingModeMapList scalingModeMap[] = { + { { 1, 1 }, AOME_NORMAL }, { { 1, 2 }, AOME_ONETWO }, { { 1, 4 }, AOME_ONEFOUR }, { { 1, 8 }, AOME_ONEEIGHT }, + { { 3, 4 }, AOME_THREEFOUR }, { { 3, 5 }, AOME_THREEFIVE }, { { 4, 5 }, AOME_FOURFIVE }, +}; + +static const int scalingModeMapSize = sizeof(scalingModeMap) / sizeof(scalingModeMap[0]); + +static avifBool avifFindAOMScalingMode(const avifScalingMode * avifMode, AOM_SCALING_MODE * aomMode) +{ + for (int i = 0; i < scalingModeMapSize; ++i) { + if (scalingModeMap[i].avifMode.numerator == avifMode->numerator && scalingModeMap[i].avifMode.denominator == avifMode->denominator) { + *aomMode = scalingModeMap[i].aomMode; + return AVIF_TRUE; + } + } + + return AVIF_FALSE; +} + #if !defined(HAVE_AOM_CODEC_SET_OPTION) static avifBool aomOptionParseInt(const char * str, int * val) { @@ -525,23 +551,29 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, avifEncoder * encoder, const avifImage * image, avifBool alpha, + uint32_t layerIndex, avifAddImageFlags addImageFlags, avifCodecEncodeOutput * output) { + uint32_t extraLayerCount = alpha ? encoder->extraLayerCountAlpha : encoder->extraLayerCount; + const avifLayerConfig * layers = alpha ? encoder->layersAlpha : encoder->layers; + + aom_codec_enc_cfg_t * cfg = &codec->internal->cfg; + // Map encoder speed to AOM usage + CpuUsed: + // Speed 0: GoodQuality CpuUsed 0 + // Speed 1: GoodQuality CpuUsed 1 + // Speed 2: GoodQuality CpuUsed 2 + // Speed 3: GoodQuality CpuUsed 3 + // Speed 4: GoodQuality CpuUsed 4 + // Speed 5: GoodQuality CpuUsed 5 + // Speed 6: GoodQuality CpuUsed 6 + // Speed 7: RealTime CpuUsed 7 + // Speed 8: RealTime CpuUsed 8 + // Speed 9: RealTime CpuUsed 9 + // Speed 10: RealTime CpuUsed 9 + unsigned int aomUsage = AOM_USAGE_GOOD_QUALITY; + if (!codec->internal->encoderInitialized) { - // Map encoder speed to AOM usage + CpuUsed: - // Speed 0: GoodQuality CpuUsed 0 - // Speed 1: GoodQuality CpuUsed 1 - // Speed 2: GoodQuality CpuUsed 2 - // Speed 3: GoodQuality CpuUsed 3 - // Speed 4: GoodQuality CpuUsed 4 - // Speed 5: GoodQuality CpuUsed 5 - // Speed 6: GoodQuality CpuUsed 6 - // Speed 7: RealTime CpuUsed 7 - // Speed 8: RealTime CpuUsed 8 - // Speed 9: RealTime CpuUsed 9 - // Speed 10: RealTime CpuUsed 9 - unsigned int aomUsage = AOM_USAGE_GOOD_QUALITY; // Use the new AOM_USAGE_ALL_INTRA (added in https://crbug.com/aomedia/2959) for still // image encoding if it is available. #if defined(AOM_USAGE_ALL_INTRA) @@ -594,8 +626,7 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, avifGetPixelFormatInfo(image->yuvFormat, &codec->internal->formatInfo); aom_codec_iface_t * encoderInterface = aom_codec_av1_cx(); - struct aom_codec_enc_cfg cfg; - aom_codec_err_t err = aom_codec_enc_config_default(encoderInterface, &cfg, aomUsage); + aom_codec_err_t err = aom_codec_enc_config_default(encoderInterface, cfg, aomUsage); if (err != AOM_CODEC_OK) { avifDiagnosticsPrintf(codec->diag, "aom_codec_enc_config_default() failed: %s", aom_codec_err_to_string(err)); return AVIF_RESULT_UNKNOWN_ERROR; @@ -607,16 +638,16 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, // libaom's default is AOM_VBR. Change the default to AOM_Q since we don't need to // hit a certain target bit rate. It's easier to control the worst quality in Q // mode. - cfg.rc_end_usage = AOM_Q; + cfg->rc_end_usage = AOM_Q; break; case AOM_USAGE_REALTIME: // For real-time mode we need to use CBR rate control mode. AOM_Q doesn't fit the // rate control requirements for real-time mode. CBR does. - cfg.rc_end_usage = AOM_CBR; + cfg->rc_end_usage = AOM_CBR; break; #if defined(AOM_USAGE_ALL_INTRA) case AOM_USAGE_ALL_INTRA: - cfg.rc_end_usage = AOM_Q; + cfg->rc_end_usage = AOM_Q; break; #endif } @@ -655,16 +686,18 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, } } - cfg.g_profile = seqProfile; - cfg.g_bit_depth = image->depth; - cfg.g_input_bit_depth = image->depth; - cfg.g_w = image->width; - cfg.g_h = image->height; - if (addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE) { + cfg->g_profile = seqProfile; + cfg->g_bit_depth = image->depth; + cfg->g_input_bit_depth = image->depth; + cfg->g_w = image->width; + cfg->g_h = image->height; + if (extraLayerCount > 0) { + cfg->g_lag_in_frames = 0; + } else if (addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE) { // Set the maximum number of frames to encode to 1. This instructs // libaom to set still_picture and reduced_still_picture_header to // 1 in AV1 sequence headers. - cfg.g_limit = 1; + cfg->g_limit = 1; // Use the default settings of the new AOM_USAGE_ALL_INTRA (added in // https://crbug.com/aomedia/2959). @@ -672,43 +705,33 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, // Set g_lag_in_frames to 0 to reduce the number of frame buffers // (from 20 to 2) in libaom's lookahead structure. This reduces // memory consumption when encoding a single image. - cfg.g_lag_in_frames = 0; + cfg->g_lag_in_frames = 0; // Disable automatic placement of key frames by the encoder. - cfg.kf_mode = AOM_KF_DISABLED; + cfg->kf_mode = AOM_KF_DISABLED; // Tell libaom that all frames will be key frames. - cfg.kf_max_dist = 0; + cfg->kf_max_dist = 0; } if (encoder->maxThreads > 1) { - cfg.g_threads = encoder->maxThreads; + cfg->g_threads = encoder->maxThreads; } - int minQuantizer = AVIF_CLAMP(encoder->minQuantizer, 0, 63); - int maxQuantizer = AVIF_CLAMP(encoder->maxQuantizer, 0, 63); - if (alpha) { - minQuantizer = AVIF_CLAMP(encoder->minQuantizerAlpha, 0, 63); - maxQuantizer = AVIF_CLAMP(encoder->maxQuantizerAlpha, 0, 63); - } - avifBool lossless = ((minQuantizer == AVIF_QUANTIZER_LOSSLESS) && (maxQuantizer == AVIF_QUANTIZER_LOSSLESS)); - cfg.rc_min_quantizer = minQuantizer; - cfg.rc_max_quantizer = maxQuantizer; - codec->internal->monochromeEnabled = AVIF_FALSE; if (aomVersion > aomVersion_2_0_0) { // There exists a bug in libaom's chroma_check() function where it will attempt to // access nonexistent UV planes when encoding monochrome at faster libavif "speeds". It // was fixed shortly after the 2.0.0 libaom release, and the fix exists in both the // master and applejack branches. This ensures that the next version *after* 2.0.0 will - // have the fix, and we must avoid cfg.monochrome until then. + // have the fix, and we must avoid cfg->monochrome until then. // // Bugfix Change-Id: https://aomedia-review.googlesource.com/q/I26a39791f820b4d4e1d63ff7141f594c3c7181f5 if (alpha || (image->yuvFormat == AVIF_PIXEL_FORMAT_YUV400)) { codec->internal->monochromeEnabled = AVIF_TRUE; - cfg.monochrome = 1; + cfg->monochrome = 1; } } - if (!avifProcessAOMOptionsPreInit(codec, alpha, &cfg)) { + if (!avifProcessAOMOptionsPreInit(codec, alpha, cfg)) { return AVIF_RESULT_INVALID_CODEC_SPECIFIC_OPTION; } @@ -716,7 +739,7 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, if (image->depth > 8) { encoderFlags |= AOM_CODEC_USE_HIGHBITDEPTH; } - if (aom_codec_enc_init(&codec->internal->encoder, encoderInterface, &cfg, encoderFlags) != AOM_CODEC_OK) { + if (aom_codec_enc_init(&codec->internal->encoder, encoderInterface, cfg, encoderFlags) != AOM_CODEC_OK) { avifDiagnosticsPrintf(codec->diag, "aom_codec_enc_init() failed: %s: %s", aom_codec_error(&codec->internal->encoder), @@ -725,9 +748,6 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, } codec->internal->encoderInitialized = AVIF_TRUE; - if (lossless) { - aom_codec_control(&codec->internal->encoder, AV1E_SET_LOSSLESS, 1); - } if (encoder->maxThreads > 1) { aom_codec_control(&codec->internal->encoder, AV1E_SET_ROW_MT, 1); } @@ -744,21 +764,16 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, return AVIF_RESULT_UNKNOWN_ERROR; } } + if (extraLayerCount > 0) { + if (aom_codec_control(&codec->internal->encoder, AOME_SET_NUMBER_SPATIAL_LAYERS, extraLayerCount + 1) != AOM_CODEC_OK) { + return AVIF_RESULT_UNKNOWN_ERROR; + } + } + if (!avifProcessAOMOptionsPostInit(codec, alpha)) { return AVIF_RESULT_INVALID_CODEC_SPECIFIC_OPTION; } -#if defined(AOM_USAGE_ALL_INTRA) - if (aomUsage == AOM_USAGE_ALL_INTRA && !codec->internal->endUsageSet && !codec->internal->cqLevelSet) { - // The default rc_end_usage in all intra mode is AOM_Q, which requires cq-level to - // function. A libavif user may not know this internal detail and therefore may only - // set the min and max quantizers in the avifEncoder struct. If this is the case, set - // cq-level to a reasonable value for the user, otherwise the default cq-level - // (currently 10) will be unknowingly used. - assert(cfg.rc_end_usage == AOM_Q); - unsigned int cqLevel = (cfg.rc_min_quantizer + cfg.rc_max_quantizer) / 2; - aom_codec_control(&codec->internal->encoder, AOME_SET_CQ_LEVEL, cqLevel); - } -#endif + if (!codec->internal->tuningSet) { if (aom_codec_control(&codec->internal->encoder, AOME_SET_TUNING, AOM_TUNE_SSIM) != AOM_CODEC_OK) { return AVIF_RESULT_UNKNOWN_ERROR; @@ -766,6 +781,58 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, } } + int minQuantizer = AVIF_CLAMP(encoder->minQuantizer, 0, 63); + int maxQuantizer = AVIF_CLAMP(encoder->maxQuantizer, 0, 63); + if (alpha) { + minQuantizer = AVIF_CLAMP(encoder->minQuantizerAlpha, 0, 63); + maxQuantizer = AVIF_CLAMP(encoder->maxQuantizerAlpha, 0, 63); + } + + if (extraLayerCount > 0) { + // Provide a way to set per-layer cq-level to allow using q and cq mode in layered image. + if (cfg->rc_end_usage == AOM_Q || cfg->rc_end_usage == AOM_CQ) { + unsigned int cqLevel; + if (alpha) { + cqLevel = (encoder->layersAlpha[layerIndex].minQuantizer + encoder->layersAlpha[layerIndex].maxQuantizer) / 2; + } else { + cqLevel = (encoder->layers[layerIndex].minQuantizer + encoder->layers[layerIndex].maxQuantizer) / 2; + } + aom_codec_control(&codec->internal->encoder, AOME_SET_CQ_LEVEL, cqLevel); + } else { + minQuantizer = AVIF_CLAMP(encoder->layers[layerIndex].minQuantizer, 0, 63); + maxQuantizer = AVIF_CLAMP(encoder->layers[layerIndex].maxQuantizer, 0, 63); + if (alpha) { + minQuantizer = AVIF_CLAMP(encoder->layersAlpha[layerIndex].minQuantizer, 0, 63); + maxQuantizer = AVIF_CLAMP(encoder->layersAlpha[layerIndex].maxQuantizer, 0, 63); + } + } + } + + avifBool lossless = ((minQuantizer == AVIF_QUANTIZER_LOSSLESS) && (maxQuantizer == AVIF_QUANTIZER_LOSSLESS)); + cfg->rc_min_quantizer = minQuantizer; + cfg->rc_max_quantizer = maxQuantizer; + + if (lossless) { + aom_codec_control(&codec->internal->encoder, AV1E_SET_LOSSLESS, 1); + } + + if (aom_codec_enc_config_set(&codec->internal->encoder, cfg) != AOM_CODEC_OK) { + return AVIF_RESULT_UNKNOWN_ERROR; + } + +#if defined(AOM_USAGE_ALL_INTRA) + if (aomUsage == AOM_USAGE_ALL_INTRA && !codec->internal->endUsageSet && !codec->internal->cqLevelSet) { + // The default rc_end_usage in all intra mode is AOM_Q, which requires cq-level to + // function. A libavif user may not know this internal detail and therefore may only + // set the min and max quantizers in the avifEncoder struct. If this is the case, set + // cq-level to a reasonable value for the user, otherwise the default cq-level + // (currently 10) will be unknowingly used. + assert(cfg->rc_end_usage == AOM_Q); + unsigned int cqLevel = (cfg->rc_min_quantizer + cfg->rc_max_quantizer) / 2; + aom_codec_control(&codec->internal->encoder, AOME_SET_CQ_LEVEL, cqLevel); + } +#endif + aom_image_t aomImage; // We prefer to simply set the aomImage.planes[] pointers to the plane buffers in 'image'. When // doing this, we set aomImage.w equal to aomImage.d_w and aomImage.h equal to aomImage.d_h and @@ -874,6 +941,25 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, aom_codec_control(&codec->internal->encoder, AV1E_SET_CHROMA_SAMPLE_POSITION, aomImage.csp); } + if (extraLayerCount > 0) { + aom_scaling_mode_t scaling_mode; + if (!avifFindAOMScalingMode(&layers[layerIndex].horizontalMode, &scaling_mode.h_scaling_mode)) { + return AVIF_RESULT_NOT_IMPLEMENTED; + } + + if (!avifFindAOMScalingMode(&layers[layerIndex].verticalMode, &scaling_mode.v_scaling_mode)) { + return AVIF_RESULT_NOT_IMPLEMENTED; + } + + if (aom_codec_control(&codec->internal->encoder, AOME_SET_SCALEMODE, &scaling_mode) != AOM_CODEC_OK) { + return AVIF_RESULT_UNKNOWN_ERROR; + } + + if (aom_codec_control(&codec->internal->encoder, AOME_SET_SPATIAL_LAYER_ID, layerIndex) != AOM_CODEC_OK) { + return AVIF_RESULT_UNKNOWN_ERROR; + } + } + unsigned char * monoUVPlane = NULL; if (monochromeRequested && !codec->internal->monochromeEnabled) { // The user requested monochrome (via alpha or YUV400) but libaom cannot currently support @@ -916,6 +1002,11 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, if (addImageFlags & AVIF_ADD_IMAGE_FLAG_FORCE_KEYFRAME) { encodeFlags |= AOM_EFLAG_FORCE_KF; } + if ((extraLayerCount > 0) && (layerIndex > 0)) { + encodeFlags |= AOM_EFLAG_NO_REF_GF | AOM_EFLAG_NO_REF_ARF | AOM_EFLAG_NO_REF_BWD | AOM_EFLAG_NO_REF_ARF2 | + AOM_EFLAG_NO_UPD_GF | AOM_EFLAG_NO_UPD_ARF; + } + aom_codec_err_t encodeErr = aom_codec_encode(&codec->internal->encoder, &aomImage, 0, 1, encodeFlags); avifFree(monoUVPlane); if (aomImageAllocated) { @@ -940,8 +1031,10 @@ static avifResult aomCodecEncodeImage(avifCodec * codec, } } - if (addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE) { - // Flush and clean up encoder resources early to save on overhead when encoding alpha or grid images + if (((extraLayerCount > 0) && (layerIndex == extraLayerCount)) || + ((extraLayerCount == 0) && (addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE))) { + // Flush and clean up encoder resources early to save on overhead when encoding alpha or grid images. + // (For layered image, this should be done after the last layer is encoded.) if (!aomCodecEncodeFinish(codec, output)) { return AVIF_RESULT_UNKNOWN_ERROR; diff --git a/src/codec_rav1e.c b/src/codec_rav1e.c index 438ff5e415..b5179a65b2 100644 --- a/src/codec_rav1e.c +++ b/src/codec_rav1e.c @@ -51,6 +51,7 @@ static avifResult rav1eCodecEncodeImage(avifCodec * codec, avifEncoder * encoder, const avifImage * image, avifBool alpha, + uint32_t layerIndex, uint32_t addImageFlags, avifCodecEncodeOutput * output) { @@ -59,6 +60,10 @@ static avifResult rav1eCodecEncodeImage(avifCodec * codec, RaConfig * rav1eConfig = NULL; RaFrame * rav1eFrame = NULL; + if (layerIndex != 0) { + return AVIF_RESULT_NOT_IMPLEMENTED; + } + if (!codec->internal->rav1eContext) { if (codec->csOptions->count > 0) { // None are currently supported! diff --git a/src/codec_svt.c b/src/codec_svt.c index 1ddf83e2ba..125adb6000 100644 --- a/src/codec_svt.c +++ b/src/codec_svt.c @@ -46,6 +46,7 @@ static avifResult svtCodecEncodeImage(avifCodec * codec, avifEncoder * encoder, const avifImage * image, avifBool alpha, + uint32_t layerIndex, uint32_t addImageFlags, avifCodecEncodeOutput * output) { @@ -54,6 +55,10 @@ static avifResult svtCodecEncodeImage(avifCodec * codec, EbBufferHeaderType * input_buffer = NULL; EbErrorType res = EB_ErrorNone; + if (layerIndex != 0) { + return AVIF_RESULT_NOT_IMPLEMENTED; + } + int y_shift = 0; // EbColorRange svt_range; if (alpha) { diff --git a/src/read.c b/src/read.c index bf5a7b567a..ec5b23b9f1 100644 --- a/src/read.c +++ b/src/read.c @@ -37,8 +37,6 @@ static const size_t xmpContentTypeSize = sizeof(xmpContentType); // can't be more than 4 unique tuples right now. #define MAX_IPMA_VERSION_AND_FLAGS_SEEN 4 -#define MAX_AV1_LAYER_COUNT 4 - // --------------------------------------------------------------------------- // Box data structures @@ -1608,7 +1606,7 @@ static avifBool avifParseItemLocationBox(avifMeta * meta, const uint8_t * raw, s } } - uint16_t dataReferenceIndex; // unsigned int(16) data_ref rence_index; + uint16_t dataReferenceIndex; // unsigned int(16) data_reference_index; CHECK(avifROStreamReadU16(&s, &dataReferenceIndex)); // uint64_t baseOffset; // unsigned int(base_offset_size*8) base_offset; CHECK(avifROStreamReadUX8(&s, &baseOffset, baseOffsetSize)); // @@ -1901,7 +1899,7 @@ static avifBool avifParseAV1LayeredImageIndexingProperty(avifProperty * prop, co } } - // Layer sizes will be validated layer (when the item's size is known) + // Layer sizes will be validated later (when the item's size is known) return AVIF_TRUE; } diff --git a/src/write.c b/src/write.c index ad2a95605f..2a26d47a89 100644 --- a/src/write.c +++ b/src/write.c @@ -100,12 +100,25 @@ typedef struct avifEncoderItem uint32_t gridCols; // if non-zero (legal range [1-256]), this is a grid item uint32_t gridRows; // if non-zero (legal range [1-256]), this is a grid item + uint32_t extraLayerCount; + uint16_t dimgFromID; // if non-zero, make an iref from dimgFromID -> this id struct ipmaArray ipma; } avifEncoderItem; AVIF_ARRAY_DECLARE(avifEncoderItemArray, avifEncoderItem, item); +// --------------------------------------------------------------------------- +// avifEncoderItemReference + +// pointer to one "item" interested in + +typedef struct avifEncoderItemReference +{ + avifEncoderItem * item; +} avifEncoderItemReference; +AVIF_ARRAY_DECLARE(avifEncoderItemReferenceArray, avifEncoderItemReference, ref); + // --------------------------------------------------------------------------- // avifEncoderFrame @@ -297,6 +310,7 @@ avifEncoder * avifEncoderCreate(void) encoder->maxQuantizer = AVIF_QUANTIZER_LOSSLESS; encoder->minQuantizerAlpha = AVIF_QUANTIZER_LOSSLESS; encoder->maxQuantizerAlpha = AVIF_QUANTIZER_LOSSLESS; + encoder->extraLayerCount = 0; encoder->tileRowsLog2 = 0; encoder->tileColsLog2 = 0; encoder->speed = AVIF_SPEED_DEFAULT; @@ -606,14 +620,26 @@ static avifResult avifEncoderAddImageInternal(avifEncoder * encoder, return AVIF_RESULT_NO_CODEC_AVAILABLE; } + // Currently, layered image can only have one frame. + if (((encoder->extraLayerCount > 0) || (encoder->extraLayerCountAlpha > 0)) && (encoder->data->frames.count > 0)) { + return AVIF_RESULT_NOT_IMPLEMENTED; + } + + const uint32_t layerCount = AVIF_MAX(encoder->extraLayerCount, encoder->extraLayerCountAlpha) + 1; + if (layerCount > MAX_AV1_LAYER_COUNT) { + return AVIF_RESULT_INVALID_LAYERS; + } + // ----------------------------------------------------------------------- // Validate images - const uint32_t cellCount = gridCols * gridRows; - if (cellCount == 0) { - return AVIF_RESULT_INVALID_ARGUMENT; + if ((gridCols == 0) || (gridCols > 256) || (gridRows == 0) || (gridRows > 256)) { + return AVIF_RESULT_INVALID_IMAGE_GRID; } + const uint32_t cellCount = gridCols * gridRows; + const uint32_t imageCount = cellCount * layerCount; + const avifImage * firstCell = cellImages[0]; if ((firstCell->depth != 8) && (firstCell->depth != 10) && (firstCell->depth != 12)) { return AVIF_RESULT_UNSUPPORTED_DEPTH; @@ -717,7 +743,8 @@ static avifResult avifEncoderAddImageInternal(avifEncoder * encoder, } encoder->data->alphaPresent = (firstCell->alphaPlane != NULL); - if (encoder->data->alphaPresent && (addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE)) { + if (encoder->data->alphaPresent && + ((addImageFlags & AVIF_ADD_IMAGE_FLAG_SINGLE) || (encoder->extraLayerCount > 0) || (encoder->extraLayerCountAlpha > 0))) { // If encoding a single image in which the alpha plane exists but is entirely opaque, // simply skip writing an alpha AV1 payload entirely, as it'll be interpreted as opaque // and is less bytes. @@ -728,8 +755,8 @@ static avifResult avifEncoderAddImageInternal(avifEncoder * encoder, // when encoding a single image. encoder->data->alphaPresent = AVIF_FALSE; - for (uint32_t cellIndex = 0; cellIndex < cellCount; ++cellIndex) { - const avifImage * cellImage = cellImages[cellIndex]; + for (uint32_t imageIndex = 0; imageIndex < imageCount; ++imageIndex) { + const avifImage * cellImage = cellImages[imageIndex]; if (!avifImageIsOpaque(cellImage)) { encoder->data->alphaPresent = AVIF_TRUE; break; @@ -820,14 +847,17 @@ static avifResult avifEncoderAddImageInternal(avifEncoder * encoder, for (uint32_t itemIndex = 0; itemIndex < encoder->data->items.count; ++itemIndex) { avifEncoderItem * item = &encoder->data->items.item[itemIndex]; if (item->codec) { - const avifImage * cellImage = cellImages[item->cellIndex]; - avifResult encodeResult = - item->codec->encodeImage(item->codec, encoder, cellImage, item->alpha, addImageFlags, item->encodeOutput); - if (encodeResult == AVIF_RESULT_UNKNOWN_ERROR) { - encodeResult = item->alpha ? AVIF_RESULT_ENCODE_ALPHA_FAILED : AVIF_RESULT_ENCODE_COLOR_FAILED; - } - if (encodeResult != AVIF_RESULT_OK) { - return encodeResult; + item->extraLayerCount = item->alpha ? encoder->extraLayerCountAlpha : encoder->extraLayerCount; + for (uint32_t layerIndex = 0; layerIndex < item->extraLayerCount + 1; ++layerIndex) { + const avifImage * layerImage = cellImages[item->cellIndex * layerCount + layerIndex]; + avifResult encodeResult = + item->codec->encodeImage(item->codec, encoder, layerImage, item->alpha, layerIndex, addImageFlags, item->encodeOutput); + if (encodeResult == AVIF_RESULT_UNKNOWN_ERROR) { + encodeResult = item->alpha ? AVIF_RESULT_ENCODE_ALPHA_FAILED : AVIF_RESULT_ENCODE_COLOR_FAILED; + } + if (encodeResult != AVIF_RESULT_OK) { + return encodeResult; + } } } } @@ -850,10 +880,16 @@ avifResult avifEncoderAddImageGrid(avifEncoder * encoder, avifAddImageFlags addImageFlags) { avifDiagnosticsClearError(&encoder->diag); - if ((gridCols == 0) || (gridCols > 256) || (gridRows == 0) || (gridRows > 256)) { - return AVIF_RESULT_INVALID_IMAGE_GRID; + if (encoder->extraLayerCount == 0 && encoder->extraLayerCountAlpha == 0) { + addImageFlags |= AVIF_ADD_IMAGE_FLAG_SINGLE; // only single image grids are supported } - return avifEncoderAddImageInternal(encoder, gridCols, gridRows, cellImages, 1, addImageFlags | AVIF_ADD_IMAGE_FLAG_SINGLE); // only single image grids are supported + return avifEncoderAddImageInternal(encoder, gridCols, gridRows, cellImages, 1, addImageFlags); +} + +avifResult avifEncoderAddImageProgressive(avifEncoder * encoder, const avifImage * const * layerImages, avifAddImageFlags addImageFlags) +{ + avifDiagnosticsClearError(&encoder->diag); + return avifEncoderAddImageInternal(encoder, 1, 1, layerImages, 1, addImageFlags); } static size_t avifEncoderFindExistingChunk(avifRWStream * s, size_t mdatStartOffset, const uint8_t * data, size_t size) @@ -889,7 +925,7 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) return item->alpha ? AVIF_RESULT_ENCODE_ALPHA_FAILED : AVIF_RESULT_ENCODE_COLOR_FAILED; } - if (item->encodeOutput->samples.count != encoder->data->frames.count) { + if (item->encodeOutput->samples.count != encoder->data->frames.count * (item->extraLayerCount + 1)) { return item->alpha ? AVIF_RESULT_ENCODE_ALPHA_FAILED : AVIF_RESULT_ENCODE_COLOR_FAILED; } } @@ -990,25 +1026,29 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) for (uint32_t itemIndex = 0; itemIndex < encoder->data->items.count; ++itemIndex) { avifEncoderItem * item = &encoder->data->items.item[itemIndex]; - uint32_t contentSize = (uint32_t)item->metadataPayload.size; - if (item->encodeOutput->samples.count > 0) { - // This is choosing sample 0's size as there are two cases here: - // * This is a single image, in which case this is correct - // * This is an image sequence, but this file should still be a valid single-image avif, - // so there must still be a primary item pointing at a sync sample. Since the first - // frame of the image sequence is guaranteed to be a sync sample, it is chosen here. - // - // TODO: Offer the ability for a user to specify which frame in the sequence should - // become the primary item's image, and force that frame to be a keyframe. - contentSize = (uint32_t)item->encodeOutput->samples.sample[0].data.size; - } + avifRWStreamWriteU16(&s, item->id); // unsigned int(16) item_ID; + avifRWStreamWriteU16(&s, 0); // unsigned int(16) data_reference_index; + avifRWStreamWriteU16(&s, (uint16_t)(item->extraLayerCount + 1)); // unsigned int(16) extent_count; - avifRWStreamWriteU16(&s, item->id); // unsigned int(16) item_ID; - avifRWStreamWriteU16(&s, 0); // unsigned int(16) data_reference_index; - avifRWStreamWriteU16(&s, 1); // unsigned int(16) extent_count; - avifEncoderItemAddMdatFixup(item, &s); // - avifRWStreamWriteU32(&s, 0 /* set later */); // unsigned int(offset_size*8) extent_offset; - avifRWStreamWriteU32(&s, (uint32_t)contentSize); // unsigned int(length_size*8) extent_length; + for (uint32_t i = 0; i < item->extraLayerCount + 1; ++i) { + avifEncoderItemAddMdatFixup(item, &s); + avifRWStreamWriteU32(&s, 0 /* set later */); // unsigned int(offset_size*8) extent_offset; + + if (item->encodeOutput->samples.count == 0) { + avifRWStreamWriteU32(&s, (uint32_t)item->metadataPayload.size); // unsigned int(length_size*8) extent_length; + assert(item->extraLayerCount == 0); + } else { + // For non-layered image, this is choosing sample 0's size as there are two cases here: + // * This is a single image, in which case this is correct + // * This is an image sequence, but this file should still be a valid single-image avif, + // so there must still be a primary item pointing at a sync sample. Since the first + // frame of the image sequence is guaranteed to be a sync sample, it is chosen here. + // + // TODO: Offer the ability for a user to specify which frame in the sequence should + // become the primary item's image, and force that frame to be a keyframe. + avifRWStreamWriteU32(&s, (uint32_t)item->encodeOutput->samples.sample[i].data.size); // unsigned int(length_size*8) extent_length; + } + } } avifRWStreamFinishBox(&s, iloc); @@ -1099,15 +1139,16 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) continue; } - if (item->dimgFromID) { - // All image cells from a grid should share the exact same properties, so see if we've - // already written properties out for another cell in this grid, and if so, just steal - // their ipma and move on. This is a sneaky way to provide iprp deduplication. + if (item->dimgFromID && item->extraLayerCount == 0) { + // All image cells from a grid should share the exact same properties unless they are + // layered image which have different a1lx, so see if we've already written properties + // out for another cell in this grid, and if so, just steal their ipma and move on. + // This is a sneaky way to provide iprp deduplication. avifBool foundPreviousCell = AVIF_FALSE; for (uint32_t dedupIndex = 0; dedupIndex < itemIndex; ++dedupIndex) { avifEncoderItem * dedupItem = &encoder->data->items.item[dedupIndex]; - if (item->dimgFromID == dedupItem->dimgFromID) { + if (item->dimgFromID == dedupItem->dimgFromID && dedupItem->extraLayerCount == 0) { // We've already written dedup's items out. Steal their ipma indices and move on! item->ipma = dedupItem->ipma; foundPreviousCell = AVIF_TRUE; @@ -1164,6 +1205,38 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) avifEncoderWriteColorProperties(&s, imageMetadata, &item->ipma, dedup); } + + if (item->extraLayerCount != 0) { + // Layered Image Indexing Property + + avifItemPropertyDedupStart(dedup); + avifBoxMarker a1lx = avifRWStreamWriteBox(&dedup->s, "a1lx", AVIF_BOX_SIZE_TBD); + uint32_t layerSize[MAX_AV1_LAYER_COUNT - 1] = { 0 }; + avifBool largeSize = AVIF_FALSE; + + for (uint32_t validLayer = 0; validLayer < item->extraLayerCount; ++validLayer) { + uint32_t size = (uint32_t)item->encodeOutput->samples.sample[validLayer].data.size; + layerSize[validLayer] = size; + if (size > 0xffff) { + largeSize = AVIF_TRUE; + } + } + + avifRWStreamWriteU8(&dedup->s, (uint8_t)largeSize); // unsigned int(7) reserved = 0; + // unsigned int(1) large_size; + + // FieldLength = (large_size + 1) * 16; + // unsigned int(FieldLength) layer_size[3]; + for (uint32_t layer = 0; layer < MAX_AV1_LAYER_COUNT - 1; ++layer) { + if (largeSize) { + avifRWStreamWriteU32(&dedup->s, layerSize[layer]); + } else { + avifRWStreamWriteU16(&dedup->s, (uint16_t)layerSize[layer]); + } + } + avifRWStreamFinishBox(&dedup->s, a1lx); + ipmaPush(&item->ipma, avifItemPropertyDedupFinish(dedup, &s), AVIF_FALSE); + } } avifRWStreamFinishBox(&s, ipco); avifItemPropertyDedupDestroy(dedup); @@ -1441,6 +1514,13 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) avifBoxMarker mdat = avifRWStreamWriteBox(&s, "mdat", AVIF_BOX_SIZE_TBD); const size_t mdatStartOffset = avifRWStreamOffset(&s); + + avifEncoderItemReferenceArray layeredAlphaItems; + avifEncoderItemReferenceArray layeredColorItems; + avifArrayCreate(&layeredAlphaItems, sizeof(avifEncoderItemReference), 1); + avifArrayCreate(&layeredColorItems, sizeof(avifEncoderItemReference), 1); + avifBool useInterleave = encoder->extraLayerCount > 0; + for (uint32_t itemPasses = 0; itemPasses < 3; ++itemPasses) { // Use multiple passes to pack in the following order: // * Pass 0: metadata (Exif/XMP) @@ -1475,6 +1555,20 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) size_t chunkOffset = 0; + // Interleave - Pick out and record layered image items, interleave them later. + // Layer image items have same number of samples and fixups. + if (useInterleave && (item->encodeOutput->samples.count > 0) && + (item->encodeOutput->samples.count == item->mdatFixups.count)) { + avifEncoderItemReference * ref; + if (item->alpha) { + ref = (avifEncoderItemReference *)avifArrayPushPtr(&layeredAlphaItems); + } else { + ref = (avifEncoderItemReference *)avifArrayPushPtr(&layeredColorItems); + } + ref->item = item; + continue; + } + // Deduplication - See if an identical chunk to this has already been written if (item->encodeOutput->samples.count > 0) { avifEncodeSample * sample = &item->encodeOutput->samples.sample[0]; @@ -1511,6 +1605,54 @@ avifResult avifEncoderFinish(avifEncoder * encoder, avifRWData * output) } } } + + if (useInterleave) { + avifBool hasMoreSample; + uint32_t layerIndex = 0; + do { + hasMoreSample = AVIF_FALSE; + for (uint32_t itemIndex = 0; itemIndex < AVIF_MAX(layeredColorItems.count, layeredAlphaItems.count); ++itemIndex) { + for (int samplePass = 0; samplePass < 2; ++samplePass) { + // Alpha coming before color + avifEncoderItemReferenceArray * currentItems = (samplePass == 0) ? &layeredAlphaItems : &layeredColorItems; + if (itemIndex >= currentItems->count) { + continue; + } + + // TODO: Offer the ability for a user to specify which grid cell should be written first. + avifEncoderItem * item = currentItems->ref[itemIndex].item; + if (item->encodeOutput->samples.count <= layerIndex) { + // We've already written all samples of this item + continue; + } else if (item->encodeOutput->samples.count > layerIndex + 1) { + hasMoreSample = AVIF_TRUE; + } + avifRWData * data = &item->encodeOutput->samples.sample[layerIndex].data; + size_t chunkOffset = avifEncoderFindExistingChunk(&s, mdatStartOffset, data->data, data->size); + if (!chunkOffset) { + // We've never seen this chunk before; write it out + chunkOffset = avifRWStreamOffset(&s); + avifRWStreamWrite(&s, data->data, data->size); + if (samplePass == 0) { + encoder->ioStats.alphaOBUSize += data->size; + } else { + encoder->ioStats.colorOBUSize += data->size; + } + + size_t prevOffset = avifRWStreamOffset(&s); + avifRWStreamSetOffset(&s, item->mdatFixups.fixup[layerIndex].offset); + avifRWStreamWriteU32(&s, (uint32_t)chunkOffset); + avifRWStreamSetOffset(&s, prevOffset); + } + } + } + ++layerIndex; + } while (hasMoreSample); + } + + avifArrayDestroy(&layeredColorItems); + avifArrayDestroy(&layeredAlphaItems); + avifRWStreamFinishBox(&s, mdat); // ----------------------------------------------------------------------- diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index eab84e6896..6c4d036ea1 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -95,6 +95,11 @@ if(AVIF_ENABLE_GTEST) target_link_libraries(avify4mtest aviftest_helpers avif_apps ${GTEST_BOTH_LIBRARIES}) target_include_directories(avify4mtest PRIVATE ${GTEST_INCLUDE_DIRS}) add_test(NAME avify4mtest COMMAND avify4mtest) + + add_executable(avifprogressivetest gtest/avifprogressivetest.cc) + target_link_libraries(avifprogressivetest aviftest_helpers ${GTEST_BOTH_LIBRARIES}) + target_include_directories(avifprogressivetest PRIVATE ${GTEST_INCLUDE_DIRS}) + add_test(NAME avifprogressivetest COMMAND avifprogressivetest) else() message(STATUS "Most tests are disabled because AVIF_ENABLE_GTEST is OFF.") endif() diff --git a/tests/gtest/avifprogressivetest.cc b/tests/gtest/avifprogressivetest.cc new file mode 100644 index 0000000000..33ccce4dcb --- /dev/null +++ b/tests/gtest/avifprogressivetest.cc @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC. All rights reserved. +// SPDX-License-Identifier: BSD-2-Clause + +#include "avif/avif.h" +#include "aviftest_helpers.h" +#include "gtest/gtest.h" + +namespace libavif { +namespace { +class ProgressiveTest : public testing::Test {}; + +TEST(ProgressiveTest, EncodeDecode) { + if (avifCodecName(AVIF_CODEC_CHOICE_AOM, AVIF_CODEC_FLAG_CAN_ENCODE) == + nullptr) { + GTEST_SKIP() << "ProgressiveTest requires AOM encoder."; + } + + const uint32_t image_size = 512; + testutil::AvifImagePtr image = + testutil::CreateImage(image_size, image_size, 8, AVIF_PIXEL_FORMAT_YUV444, + AVIF_PLANES_YUV, AVIF_RANGE_FULL); + ASSERT_NE(image, nullptr); + testutil::FillImageGradient(image.get()); + + // Encode + testutil::AvifEncoderPtr encoder(avifEncoderCreate(), avifEncoderDestroy); + ASSERT_NE(encoder, nullptr); + encoder->codecChoice = AVIF_CODEC_CHOICE_AOM; + encoder->speed = AVIF_SPEED_FASTEST; + encoder->extraLayerCount = 1; + encoder->layers[0] = {50, 50, {1, 4}, {1, 4}}; + encoder->layers[1] = {25, 25, {1, 1}, {1, 1}}; + avifImage* layer_image_ptrs[2] = {image.get(), image.get()}; + ASSERT_EQ(avifEncoderAddImageProgressive(encoder.get(), layer_image_ptrs, + AVIF_ADD_IMAGE_FLAG_NONE), + AVIF_RESULT_OK); + testutil::AvifRwData encodedAvif; + ASSERT_EQ(avifEncoderFinish(encoder.get(), &encodedAvif), AVIF_RESULT_OK); + + // Decode + ASSERT_NE(image, nullptr); + testutil::AvifDecoderPtr decoder(avifDecoderCreate(), avifDecoderDestroy); + ASSERT_NE(decoder, nullptr); + decoder->allowProgressive = true; + ASSERT_EQ( + avifDecoderSetIOMemory(decoder.get(), encodedAvif.data, encodedAvif.size), + AVIF_RESULT_OK); + ASSERT_EQ(avifDecoderParse(decoder.get()), AVIF_RESULT_OK); + ASSERT_EQ(decoder->progressiveState, AVIF_PROGRESSIVE_STATE_ACTIVE); + ASSERT_EQ(avifDecoderNextImage(decoder.get()), AVIF_RESULT_OK); + ASSERT_EQ(decoder->image->width, image_size); + ASSERT_EQ(decoder->image->height, image_size); + // TODO Check decoder->image and image are similar + ASSERT_EQ(avifDecoderNextImage(decoder.get()), AVIF_RESULT_OK); + ASSERT_EQ(decoder->image->width, image_size); + ASSERT_EQ(decoder->image->height, image_size); + // TODO Check decoder->image and image are more similar than previous layer +} +} // namespace +} // namespace libavif