From a8f8e6a7c004b8882f0dd9ba10563684f3d6d11e Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Mardelle Date: Mon, 3 Feb 2025 20:40:06 +0100 Subject: [PATCH] Fix various aspect ratio issues with qtblend filter/transition --- src/modules/qt/common.h | 2 + src/modules/qt/filter_qtblend.cpp | 110 +++++++++++++++++++------- src/modules/qt/transition_qtblend.cpp | 84 +++++++++++++------- 3 files changed, 137 insertions(+), 59 deletions(-) diff --git a/src/modules/qt/common.h b/src/modules/qt/common.h index e2894377f..17e108273 100644 --- a/src/modules/qt/common.h +++ b/src/modules/qt/common.h @@ -21,6 +21,8 @@ #include +#define MLT_QTBLEND_MAX_DIMENSION (16000) + class QImage; bool createQApplicationIfNeeded(mlt_service service); diff --git a/src/modules/qt/filter_qtblend.cpp b/src/modules/qt/filter_qtblend.cpp index 1d9c91328..7b8230a11 100644 --- a/src/modules/qt/filter_qtblend.cpp +++ b/src/modules/qt/filter_qtblend.cpp @@ -1,5 +1,5 @@ /* - * filter_lightshow.cpp -- animate color to the audio + * filter_qtblend.cpp -- Qt composite filter * Copyright (C) 2015 Meltytech, LLC * * This library is free software; you can redistribute it and/or @@ -66,19 +66,22 @@ static int filter_get_image(mlt_frame frame, 1.0}; int b_width = mlt_properties_get_int(frame_properties, "meta.media.width"); int b_height = mlt_properties_get_int(frame_properties, "meta.media.height"); + bool distort = mlt_properties_get_int(properties, "distort"); + if (b_height == 0) { b_width = normalized_width; b_height = normalized_height; } // Special case - aspect_ratio = 0 if (mlt_frame_get_aspect_ratio(frame) == 0) { - double output_ar = mlt_profile_sar(profile); - mlt_frame_set_aspect_ratio(frame, output_ar); + mlt_frame_set_aspect_ratio(frame, consumer_ar); } double b_ar = mlt_frame_get_aspect_ratio(frame); double b_dar = b_ar * b_width / b_height; double opacity = 1.0; + // If the _qtblend_scaled property is defined, a qtblend filter was already applied + int qtblendRescaled = mlt_properties_get_int(frame_properties, "_qtblend_scaled"); if (mlt_properties_get(properties, "rect")) { rect = mlt_properties_anim_get_rect(properties, "rect", position, length); if (::strchr(mlt_properties_get(properties, "rect"), '%')) { @@ -87,30 +90,81 @@ static int filter_get_image(mlt_frame frame, rect.w *= normalized_width; rect.h *= normalized_height; } - double scale = mlt_profile_scale_width(profile, *width); - if (scale != 1.0) { - rect.x *= scale; - rect.w *= scale; - } - scale = mlt_profile_scale_height(profile, *height); - if (scale != 1.0) { - rect.y *= scale; - rect.h *= scale; - } - transform.translate(rect.x, rect.y); - opacity = rect.o; - hasAlpha = rect.o < 1 || rect.x != 0 || rect.y != 0 || rect.w != *width - || rect.h != *height; + if (qtblendRescaled) { + // Another qtblend filter was already applied + // In this case, the *width and *height are set to the source resolution to ensure we don't lose too much details on multiple scaling operations + // We requested a image with full media resolution, adjust rect to profile + // Check if we have consumer scaling enabled since we cannot use *width and *height + double consumerScale = mlt_properties_get_double(frame_properties, "_qtblend_scalex"); + if (consumerScale > 0.) { + b_width *= consumerScale; + b_height *= consumerScale; + } - if (mlt_properties_get_int(properties, "distort") == 0) { - b_height = qMax(1, qMin(qRound(rect.h), b_height)); - b_width = qMax(1, qRound(b_height * b_dar / b_ar / consumer_ar)); + // Always request an image that follows the consumer aspect ratio + double consumer_dar = normalized_width * consumer_ar / normalized_height; + int tmpWidth = b_width; + int tmpHeight = b_height; + double scaleFactor = qMax(*width / rect.w, *height / rect.h); + if (scaleFactor > 1.) { + // Use the highest necessary resolution image + tmpWidth *= scaleFactor; + tmpHeight *= scaleFactor; + } + if (consumer_dar > b_dar) { + *width = qBound(qRound(normalized_width * consumerScale), + tmpWidth, + MLT_QTBLEND_MAX_DIMENSION); + *height = qRound(*width * consumer_ar * normalized_height / normalized_width); + } else { + *height = qBound(qRound(normalized_height * consumerScale), + tmpHeight, + MLT_QTBLEND_MAX_DIMENSION); + *width = qRound(*height * normalized_width / normalized_height / consumer_ar); + } + // Adjust rect to new scaling + double scale = (double) *width / normalized_width; + if (scale != 1.0) { + rect.x *= scale; + rect.w *= scale; + } + scale = (double) *height / normalized_height; + if (scale != 1.0) { + rect.y *= scale; + rect.h *= scale; + } } else { - b_width = qMax(1, qRound(b_width * b_ar / consumer_ar)); - } - if (!hasAlpha && (b_width < *width || b_height < *height)) { - hasAlpha = true; + // First instance of a qtblend filter + double scale = mlt_profile_scale_width(profile, *width); + // Store consumer scaling for further uses + mlt_properties_set_int(frame_properties, "_qtblend_scaled", 1); + mlt_properties_set_double(frame_properties, "_qtblend_scalex", scale); + // Apply scaling + if (scale != 1.0) { + rect.x *= scale; + rect.w *= scale; + if (distort) { + b_width *= scale; + } else { + // Apply consumer scaling to the source image request + b_width *= scale; + b_height *= scale; + } + } + scale = mlt_profile_scale_height(profile, *height); + if (scale != 1.0) { + rect.y *= scale; + rect.h *= scale; + if (distort) { + b_height *= scale; + } + } } + transform.translate(rect.x, rect.y); + opacity = rect.o; + hasAlpha = rect.o < 1 || rect.x != 0 || rect.y != 0 || rect.w != *width || rect.h != *height + || rect.w / b_dar < *height || rect.h * b_dar < *width || b_width < *width + || b_height < *height; } else { b_width = *width; b_height = *height; @@ -154,7 +208,6 @@ static int filter_get_image(mlt_frame frame, // fetch image *format = mlt_image_rgba; uint8_t *src_image = NULL; - error = mlt_frame_get_image(frame, &src_image, format, &b_width, &b_height, 0); // Put source buffer into QImage @@ -164,13 +217,12 @@ static int filter_get_image(mlt_frame frame, int image_size = mlt_image_format_size(*format, *width, *height, NULL); // resize to rect - if (mlt_properties_get_int(properties, "distort")) { + if (distort) { transform.scale(rect.w / b_width, rect.h / b_height); } else { - // Determine scale with respect to aspect ratio. - double geometry_dar = rect.w * consumer_ar / rect.h; double scale; - if (b_dar > geometry_dar) { + double resize_dar = rect.w * consumer_ar / rect.h; + if (b_dar >= resize_dar) { scale = rect.w / b_width; } else { scale = rect.h / b_height * b_ar; diff --git a/src/modules/qt/transition_qtblend.cpp b/src/modules/qt/transition_qtblend.cpp index 100e4b69a..1c6690852 100644 --- a/src/modules/qt/transition_qtblend.cpp +++ b/src/modules/qt/transition_qtblend.cpp @@ -58,7 +58,9 @@ static int get_image(mlt_frame a_frame, mlt_profile profile = mlt_service_profile(MLT_TRANSITION_SERVICE(transition)); int b_width = mlt_properties_get_int(b_properties, "meta.media.width"); int b_height = mlt_properties_get_int(b_properties, "meta.media.height"); + bool distort = mlt_properties_get_int(transition_properties, "distort"); + double consumer_ar = mlt_profile_sar(profile); // Check the producer's native format before fetching image int sourceFormat = mlt_properties_get_int(b_properties, "format"); @@ -75,11 +77,28 @@ static int get_image(mlt_frame a_frame, double b_dar = b_ar * b_width / b_height; rect.w = -1; rect.h = -1; + double transformScale = 1.; + double geometry_dar = *width * consumer_ar / *height; if (!distort && (b_height < *height || b_width < *width)) { + // Source image is smaller than profile, request full frame + if (b_dar > geometry_dar) { + transformScale = b_dar / geometry_dar; + } else { + transformScale = geometry_dar / b_dar; + } b_width = *width; b_height = *height; } + double scalex = mlt_profile_scale_width(profile, *width); + double scaley = mlt_profile_scale_height(profile, *height); + if (scalex != 1.) { + b_height *= scalex; + b_width *= scalex; + } + int request_width = *width; + int request_height = *height; + // Check transform if (mlt_properties_get(transition_properties, "rect")) { rect = mlt_properties_anim_get_rect(transition_properties, "rect", position, length); @@ -91,31 +110,24 @@ static int get_image(mlt_frame a_frame, rect.h *= *height; } else { // Adjust to preview scaling - double scale = mlt_profile_scale_width(profile, *width); - if (scale != 1.0) { - rect.x *= scale; - rect.w *= scale; + if (scalex != 1.0) { + rect.x *= scalex; + rect.w *= scalex; if (distort) { - b_width *= scale; + b_width *= scalex; } } - scale = mlt_profile_scale_height(profile, *height); - if (scale != 1.0) { - rect.y *= scale; - rect.h *= scale; + if (scaley != 1.0) { + rect.y *= scaley; + rect.h *= scaley; if (distort) { - b_height *= scale; + b_height *= scaley; } } } transform.translate(rect.x, rect.y); opacity = rect.o; - if (!distort) { - b_width = qMin(qRound(rect.w), b_width); - b_height = qMin(qRound(rect.h), b_height); - transform.translate((rect.w - b_width) / 2.0, (rect.h - b_height) / 2.0); - } if (opacity < 1 || rect.x != 0 || rect.y != 0 || (rect.x + rect.w != *width) || (rect.y + rect.h != *height)) { // we will process operations on top frame, so also process b_frame @@ -157,12 +169,6 @@ static int get_image(mlt_frame a_frame, if (interps) interps = strdup(interps); - if (error) { - return error; - } - if (distort && b_width != 0 && b_height != 0) { - transform.scale(rect.w / b_width, rect.h / b_height); - } // Check profile dar vs image dar image if (!forceAlpha && rect.w == -1 && b_dar != mlt_profile_dar(profile)) { // Activate transparency if the clips don't have the same aspect ratio @@ -175,8 +181,6 @@ static int get_image(mlt_frame a_frame, } // Check if we have transparency - int request_width = b_width; - int request_height = b_height; bool imageFetched = false; if (!forceAlpha) { if (!hasAlpha || *format == mlt_image_rgba) { @@ -197,7 +201,8 @@ static int get_image(mlt_frame a_frame, "progressive,distort,colorspace,full_range,force_full_luma," "top_field_first,color_trc"); // Prepare output image - if (b_frame->convert_image && (b_width != request_width || b_height != request_height)) { + if (b_frame->convert_image + && (b_width != request_width || b_height != request_height)) { mlt_properties_set_int(b_properties, "convert_image_width", request_width); mlt_properties_set_int(b_properties, "convert_image_height", request_height); b_frame->convert_image(b_frame, &b_image, format, *format); @@ -217,6 +222,7 @@ static int get_image(mlt_frame a_frame, *format = mlt_image_rgba; error = mlt_frame_get_image(b_frame, &b_image, format, &b_width, &b_height, 0); } + b_dar = b_ar * b_width / b_height; if (b_frame->convert_image && (*format != mlt_image_rgba || b_width != request_width || b_height != request_height)) { mlt_properties_set_int(b_properties, "convert_image_width", request_width); @@ -226,6 +232,27 @@ static int get_image(mlt_frame a_frame, b_height = request_height; } *format = mlt_image_rgba; + if (distort) { + if (b_width != 0 && b_height != 0) { + transform.scale(rect.w / b_width, rect.h / b_height); + } + } else if (rect.w > 0 && rect.h > 0) { + double resize_dar = rect.w * consumer_ar / rect.h; + double scale; + if (b_dar > resize_dar) { + scale = rect.w / b_width; + if (b_dar < geometry_dar) { + scale *= transformScale; + } + } else { + scale = rect.h / b_height; + if (b_dar > geometry_dar) { + scale *= transformScale; + } + } + transform.translate((rect.w - (b_width * scale)) / 2.0, (rect.h - (b_height * scale)) / 2.0); + transform.scale(scale, scale); + } // Get bottom frame uint8_t *a_image = NULL; @@ -241,12 +268,9 @@ static int get_image(mlt_frame a_frame, // Copy bottom frame in output memcpy(*image, a_image, image_size); - bool hqPainting = false; - if (interps) { - if (strcmp(interps, "bilinear") == 0 || strcmp(interps, "bicubic") == 0) { - hqPainting = true; - } - } + // We don't do subpixel smoothing for nearest neighbour interpolation + // so people can use that to upscale pixel art and keep the hard edges. + bool hqPainting = interps && strcmp(interps, "nearest") != 0; // convert bottom mlt image to qimage QImage bottomImg;