// ****************************************************************************
// * This file is part of the HqMAME project. It is distributed under *
// * GNU General Public License: https://www.gnu.org/licenses/gpl-3.0 *
// * Copyright (C) Zenju (zenju AT gmx DOT de) - All Rights Reserved *
// * *
// * Additionally and as a special exception, the author gives permission *
// * to link the code of this program with the MAME library (or with modified *
// * versions of MAME that use the same license as MAME), and distribute *
// * linked combinations including the two. You must obey the GNU General *
// * Public License in all respects for all of the code used other than MAME. *
// * If you modify this file, you may extend this exception to your version *
// * of the file, but you are not obligated to do so. If you do not wish to *
// * do so, delete this exception statement from your version. *
// ****************************************************************************
// -------------------------------------------------------------------------
// | xBRZ: "Scale by rules" - high quality image upscaling filter by Zenju |
// -------------------------------------------------------------------------
// using a modified approach of xBR:
// http://board.byuu.org/viewtopic.php?f=10&t=2248
// - new rule set preserving small image features
// - highly optimized for performance
// - support alpha channel
// - support multithreading
// - support 64-bit architectures
// - support processing image slices
// - support scaling up to 6xBRZ
// -> map source (srcWidth * srcHeight) to target (scale * width x scale * height) image, optionally processing a half-open slice of rows [yFirst, yLast) only
// -> support for source/target pitch in bytes!
// -> if your emulator changes only a few image slices during each cycle (e.g. DOSBox) then there's no need to run xBRZ on the complete image:
// Just make sure you enlarge the source image slice by 2 rows on top and 2 on bottom (this is the additional range the xBRZ algorithm is using during analysis)
// CAVEAT: If there are multiple changed slices, make sure they do not overlap after adding these additional rows in order to avoid a memory race condition
// in the target image data if you are using multiple threads for processing each enlarged slice!
//
// THREAD-SAFETY: - parts of the same image may be scaled by multiple threads as long as the [yFirst, yLast) ranges do not overlap!
// - there is a minor inefficiency for the first row of a slice, so avoid processing single rows only; suggestion: process at least 8-16 rows
#include <stddef.h> // for size_t
#include <stdint.h> // for uint32_t
#include <memory.h> // for memset()
#include <limits.h>
#include <math.h>
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else // !__cplusplus
#define EXTERN_C
#endif // __cplusplus
// scaler configuration
#define XBRZ_CFG_LUMINANCE_WEIGHT 1
#define XBRZ_CFG_EQUAL_COLOR_TOLERANCE 30
#define XBRZ_CFG_DOMINANT_DIRECTION_THRESHOLD 3.6
#define XBRZ_CFG_STEEP_DIRECTION_THRESHOLD 2.2
// slice types
#define XBRZ_SLICETYPE_SOURCE 1
#define XBRZ_SLICETYPE_TARGET 2
// handy macros
#define GET_BYTE(val,byteno) ((unsigned char) (((val) >> ((byteno) << 3)) & 0xff))
#define GET_BLUE(val) GET_BYTE (val, 0)
#define GET_GREEN(val) GET_BYTE (val, 1)
#define GET_RED(val) GET_BYTE (val, 2)
#define GET_ALPHA(val) GET_BYTE (val, 3)
#define CALC_COLOR24(colFront,colBack,M,N) (unsigned char) ((((unsigned char) (colFront)) * ((unsigned int) (M)) + ((unsigned char) (colBack)) * (((unsigned int) (N)) - ((unsigned int) (M)))) / ((unsigned int) (N)))
#define CALC_COLOR32(colFront,colBack,weightFront,weightBack,weightSum) ((unsigned char) ((((unsigned char) (colFront)) * ((unsigned int) (weightFront)) + ((unsigned char) (colBack)) * ((unsigned int) (weightBack))) / ((unsigned int) (weightSum))))
#define BYTE_ADVANCE(buffer,offset) (((char *) buffer) + (offset))
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif // MIN
#ifndef MAX
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif // MAX
typedef void (alphagrad_func) (uint32_t *pixBack, uint32_t pixFront, unsigned int M, unsigned int N);
typedef double (dist_func) (uint32_t pix1, uint32_t pix2);
namespace
{
#ifdef _MSC_VER
#define FORCE_INLINE __forceinline
#elif defined __GNUC__
#define FORCE_INLINE __attribute__((always_inline)) inline
#else
#define FORCE_INLINE inline
#endif
enum RotationDegree //clock-wise
{
ROT_0 = 0,
ROT_90,
ROT_180,
ROT_270
};
//calculate input matrix coordinates after rotation at compile time
template <RotationDegree rotDeg, size_t I, size_t J, size_t N> struct MatrixRotation;
template <size_t I, size_t J, size_t N> struct MatrixRotation<ROT_0, I, J, N>
{
static const size_t I_old = I;
static const size_t J_old = J;
};
template <RotationDegree rotDeg, size_t I, size_t J, size_t N> //(i, j) = (row, col) indices, N = size of (square) matrix
struct MatrixRotation
{
static const size_t I_old = N - 1 - MatrixRotation<(RotationDegree)(rotDeg - 1), I, J, N>::J_old; //old coordinates before rotation!
static const size_t J_old = MatrixRotation<(RotationDegree)(rotDeg - 1), I, J, N>::I_old; //
};
template <size_t N, RotationDegree rotDeg> class OutputMatrix
{
public:
OutputMatrix (uint32_t *out, int outWidth) //access matrix area, top-left at position "out" for image with given width
{
out_ = out;
outWidth_ = outWidth;
}
template <size_t I, size_t J> uint32_t &ref() const
{
static const size_t I_old = MatrixRotation<rotDeg, I, J, N>::I_old;
static const size_t J_old = MatrixRotation<rotDeg, I, J, N>::J_old;
return *(out_ + J_old + I_old * outWidth_);
}
uint32_t* out_;
int outWidth_;
};
enum BlendType
{
BLEND_NONE = 0,
BLEND_NORMAL, //a normal indication to blend
BLEND_DOMINANT, //a strong indication to blend
//attention: BlendType must fit into the value range of 2 bit!!!
};
struct BlendResult
{
BlendType
/**/blend_f, blend_g,
/**/blend_j, blend_k;
};
struct Kernel_4x4 //kernel for preprocessing step
{
uint32_t
/**/a, b, c, d,
/**/e, f, g, h,
/**/i, j, k, l,
/**/m, n, o, p;
};
/*
input kernel area naming convention:
-----------------
| A | B | C | D |
----|---|---|---|
| E | F | G | H | //evaluate the four corners between F, G, J, K
----|---|---|---| //input pixel is at position F
| I | J | K | L |
----|---|---|---|
| M | N | O | P |
-----------------
*/
FORCE_INLINE //detect blend direction
BlendResult preProcessCorners(const Kernel_4x4& ker, dist_func dist) //result: F, G, J, K corners of "GradientType"
{
BlendResult result = {};
if ((ker.f == ker.g &&
ker.j == ker.k) ||
(ker.f == ker.j &&
ker.g == ker.k))
return result;
const int weight = 4;
double jg = dist (ker.i, ker.f) + dist (ker.f, ker.c) + dist (ker.n, ker.k) + dist (ker.k, ker.h) + weight * dist (ker.j, ker.g);
double fk = dist (ker.e, ker.j) + dist (ker.j, ker.o) + dist (ker.b, ker.g) + dist (ker.g, ker.l) + weight * dist (ker.f, ker.k);
if (jg < fk) //test sample: 70% of values max(jg, fk) / min(jg, fk) are between 1.1 and 3.7 with median being 1.8
{
const bool dominantGradient = XBRZ_CFG_DOMINANT_DIRECTION_THRESHOLD * jg < fk;
if (ker.f != ker.g && ker.f != ker.j)
result.blend_f = dominantGradient ? BLEND_DOMINANT : BLEND_NORMAL;
if (ker.k != ker.j && ker.k != ker.g)
result.blend_k = dominantGradient ? BLEND_DOMINANT : BLEND_NORMAL;
}
else if (fk < jg)
{
const bool dominantGradient = XBRZ_CFG_DOMINANT_DIRECTION_THRESHOLD * fk < jg;
if (ker.j != ker.f && ker.j != ker.k)
result.blend_j = dominantGradient ? BLEND_DOMINANT : BLEND_NORMAL;
if (ker.g != ker.f && ker.g != ker.k)
result.blend_g = dominantGradient ? BLEND_DOMINANT : BLEND_NORMAL;
}
return result;
}
struct Kernel_3x3
{
uint32_t
/**/a, b, c,
/**/d, e, f,
/**/g, h, i;
};
/*
#define DEF_GETTER(x) template <RotationDegree rotDeg> uint32_t inline get_##x(const Kernel_3x3& ker) { return ker.x; }
//we cannot and NEED NOT write "ker.##x" since ## concatenates preprocessor tokens but "." is not a token
DEF_GETTER(a) DEF_GETTER(b) DEF_GETTER(c)
DEF_GETTER(d) DEF_GETTER(e) DEF_GETTER(f)
DEF_GETTER(g) DEF_GETTER(h) DEF_GETTER(i)
#undef DEF_GETTER
#define DEF_GETTER(x, y) template <> inline uint32_t get_##x<ROT_90>(const Kernel_3x3& ker) { return ker.y; }
DEF_GETTER(a, g) DEF_GETTER(b, d) DEF_GETTER(c, a)
DEF_GETTER(d, h) DEF_GETTER(e, e) DEF_GETTER(f, b)
DEF_GETTER(g, i) DEF_GETTER(h, f) DEF_GETTER(i, c)
#undef DEF_GETTER
#define DEF_GETTER(x, y) template <> inline uint32_t get_##x<ROT_180>(const Kernel_3x3& ker) { return ker.y; }
DEF_GETTER(a, i) DEF_GETTER(b, h) DEF_GETTER(c, g)
DEF_GETTER(d, f) DEF_GETTER(e, e) DEF_GETTER(f, d)
DEF_GETTER(g, c) DEF_GETTER(h, b) DEF_GETTER(i, a)
#undef DEF_GETTER
#define DEF_GETTER(x, y) template <> inline uint32_t get_##x<ROT_270>(const Kernel_3x3& ker) { return ker.y; }
DEF_GETTER(a, c) DEF_GETTER(b, f) DEF_GETTER(c, i)
DEF_GETTER(d, b) DEF_GETTER(e, e) DEF_GETTER(f, h)
DEF_GETTER(g, a) DEF_GETTER(h, d) DEF_GETTER(i, g)
#undef DEF_GETTER
*/
template <RotationDegree rotDeg> uint32_t inline get_a (const Kernel_3x3& ker) { return ker.a; }
template <RotationDegree rotDeg> uint32_t inline get_b (const Kernel_3x3& ker) { return ker.b; }
template <RotationDegree rotDeg> uint32_t inline get_c (const Kernel_3x3& ker) { return ker.c; }
template <RotationDegree rotDeg> uint32_t inline get_d (const Kernel_3x3& ker) { return ker.d; }
template <RotationDegree rotDeg> uint32_t inline get_e (const Kernel_3x3& ker) { return ker.e; }
template <RotationDegree rotDeg> uint32_t inline get_f (const Kernel_3x3& ker) { return ker.f; }
template <RotationDegree rotDeg> uint32_t inline get_g (const Kernel_3x3& ker) { return ker.g; }
template <RotationDegree rotDeg> uint32_t inline get_h (const Kernel_3x3& ker) { return ker.h; }
template <RotationDegree rotDeg> uint32_t inline get_i (const Kernel_3x3& ker) { return ker.i; }
template <> inline uint32_t get_a<ROT_90>(const Kernel_3x3& ker) { return ker.g; }
template <> inline uint32_t get_b<ROT_90>(const Kernel_3x3& ker) { return ker.d; }
template <> inline uint32_t get_c<ROT_90>(const Kernel_3x3& ker) { return ker.a; }
template <> inline uint32_t get_d<ROT_90>(const Kernel_3x3& ker) { return ker.h; }
template <> inline uint32_t get_e<ROT_90>(const Kernel_3x3& ker) { return ker.e; }
template <> inline uint32_t get_f<ROT_90>(const Kernel_3x3& ker) { return ker.b; }
template <> inline uint32_t get_g<ROT_90>(const Kernel_3x3& ker) { return ker.i; }
template <> inline uint32_t get_h<ROT_90>(const Kernel_3x3& ker) { return ker.f; }
template <> inline uint32_t get_i<ROT_90>(const Kernel_3x3& ker) { return ker.c; }
template <> inline uint32_t get_a<ROT_180>(const Kernel_3x3& ker) { return ker.i; }
template <> inline uint32_t get_b<ROT_180>(const Kernel_3x3& ker) { return ker.h; }
template <> inline uint32_t get_c<ROT_180>(const Kernel_3x3& ker) { return ker.g; }
template <> inline uint32_t get_d<ROT_180>(const Kernel_3x3& ker) { return ker.f; }
template <> inline uint32_t get_e<ROT_180>(const Kernel_3x3& ker) { return ker.e; }
template <> inline uint32_t get_f<ROT_180>(const Kernel_3x3& ker) { return ker.d; }
template <> inline uint32_t get_g<ROT_180>(const Kernel_3x3& ker) { return ker.c; }
template <> inline uint32_t get_h<ROT_180>(const Kernel_3x3& ker) { return ker.b; }
template <> inline uint32_t get_i<ROT_180>(const Kernel_3x3& ker) { return ker.a; }
template <> inline uint32_t get_a<ROT_270>(const Kernel_3x3& ker) { return ker.c; }
template <> inline uint32_t get_b<ROT_270>(const Kernel_3x3& ker) { return ker.f; }
template <> inline uint32_t get_c<ROT_270>(const Kernel_3x3& ker) { return ker.i; }
template <> inline uint32_t get_d<ROT_270>(const Kernel_3x3& ker) { return ker.b; }
template <> inline uint32_t get_e<ROT_270>(const Kernel_3x3& ker) { return ker.e; }
template <> inline uint32_t get_f<ROT_270>(const Kernel_3x3& ker) { return ker.h; }
template <> inline uint32_t get_g<ROT_270>(const Kernel_3x3& ker) { return ker.a; }
template <> inline uint32_t get_h<ROT_270>(const Kernel_3x3& ker) { return ker.d; }
template <> inline uint32_t get_i<ROT_270>(const Kernel_3x3& ker) { return ker.g; }
//compress four blend types into a single byte
inline BlendType getTopL (unsigned char b) { return (BlendType)(0x3 & b); }
inline BlendType getTopR (unsigned char b) { return (BlendType)(0x3 & (b >> 2)); }
inline BlendType getBottomR(unsigned char b) { return (BlendType)(0x3 & (b >> 4)); }
inline BlendType getBottomL(unsigned char b) { return (BlendType)(0x3 & (b >> 6)); }
inline void setTopL (unsigned char& b, BlendType bt) { b |= bt; } //buffer is assumed to be initialized before preprocessing!
inline void setTopR (unsigned char& b, BlendType bt) { b |= (bt << 2); }
inline void setBottomR(unsigned char& b, BlendType bt) { b |= (bt << 4); }
inline void setBottomL(unsigned char& b, BlendType bt) { b |= (bt << 6); }
template <RotationDegree rotDeg> inline
unsigned char rotateBlendInfo (unsigned char b) { return b; }
template <> inline unsigned char rotateBlendInfo<ROT_90 >(unsigned char b) { return ((b << 2) | (b >> 6)) & 0xff; }
template <> inline unsigned char rotateBlendInfo<ROT_180>(unsigned char b) { return ((b << 4) | (b >> 4)) & 0xff; }
template <> inline unsigned char rotateBlendInfo<ROT_270>(unsigned char b) { return ((b << 6) | (b >> 2)) & 0xff; }
/*
input kernel area naming convention:
-------------
| A | B | C |
----|---|---|
| D | E | F | //input pixel is at position E
----|---|---|
| G | H | I |
-------------
*/
template <class Scaler, RotationDegree rotDeg>
FORCE_INLINE void blendPixel(const Kernel_3x3& ker, uint32_t *target, int trgWidth, unsigned char blendInfo, alphagrad_func alphagrad, dist_func dist) //result of preprocessing all four corners of pixel "e"
{
#define a get_a<rotDeg>(ker)
#define b get_b<rotDeg>(ker)
#define c get_c<rotDeg>(ker)
#define d get_d<rotDeg>(ker)
#define e get_e<rotDeg>(ker)
#define f get_f<rotDeg>(ker)
#define g get_g<rotDeg>(ker)
#define h get_h<rotDeg>(ker)
#define i get_i<rotDeg>(ker)
const unsigned char blend = rotateBlendInfo<rotDeg>(blendInfo);
if (getBottomR(blend) >= BLEND_NORMAL)
{
bool doLineBlend;
if (getBottomR(blend) >= BLEND_DOMINANT)
doLineBlend = true;
else if (getTopR(blend) != BLEND_NONE && (dist (e, g) >= XBRZ_CFG_EQUAL_COLOR_TOLERANCE)) //but support double-blending for 90° corners
doLineBlend = false; // make sure there is no second blending in an adjacent rotation for this pixel: handles insular pixels, mario eyes
else if (getBottomL(blend) != BLEND_NONE && (dist (e, c) >= XBRZ_CFG_EQUAL_COLOR_TOLERANCE))
doLineBlend = false; // make sure there is no second blending in an adjacent rotation for this pixel: handles insular pixels, mario eyes
else if ((dist (e, i) >= XBRZ_CFG_EQUAL_COLOR_TOLERANCE)
&& (dist (g, h) < XBRZ_CFG_EQUAL_COLOR_TOLERANCE)
&& (dist (h, i) < XBRZ_CFG_EQUAL_COLOR_TOLERANCE)
&& (dist (i, f) < XBRZ_CFG_EQUAL_COLOR_TOLERANCE)
&& (dist (f, c) < XBRZ_CFG_EQUAL_COLOR_TOLERANCE))
doLineBlend = false; // no full blending for L-shapes; blend corner only (handles "mario mushroom eyes")
else
doLineBlend = true;
const uint32_t px = (dist (e, f) <= dist (e, h) ? f : h); //choose most similar color
OutputMatrix<Scaler::scale, rotDeg> out(target, trgWidth);
if (doLineBlend)
{
const double fg = dist (f, g); //test sample: 70% of values max(fg, hc) / min(fg, hc) are between 1.1 and 3.7 with median being 1.9
const double hc = dist (h, c); //
const bool haveShallowLine = XBRZ_CFG_STEEP_DIRECTION_THRESHOLD * fg <= hc && e != g && d != g;
const bool haveSteepLine = XBRZ_CFG_STEEP_DIRECTION_THRESHOLD * hc <= fg && e != c && b != c;
if (haveShallowLine)
{
if (haveSteepLine)
Scaler::blendLineSteepAndShallow(px, out, alphagrad);
else
Scaler::blendLineShallow(px, out, alphagrad);
}
else
{
if (haveSteepLine)
Scaler::blendLineSteep(px, out, alphagrad);
else
Scaler::blendLineDiagonal(px, out, alphagrad);
}
}
else
Scaler::blendCorner(px, out, alphagrad);
}
#undef a
#undef b
#undef c
#undef d
#undef e
#undef f
#undef g
#undef h
#undef i
}
template <class Scaler> //scaler policy: see "Scaler2x" reference implementation
void scaleImage(const uint32_t *src, uint32_t *trg, int srcWidth, int srcHeight, int yFirst, int yLast, alphagrad_func alphagrad, dist_func dist)
{
yFirst = MAX (yFirst, 0);
yLast = MIN (yLast, srcHeight);
if (yFirst >= yLast || srcWidth <= 0)
return;
const int trgWidth = srcWidth * Scaler::scale;
//"use" space at the end of the image as temporary buffer for "on the fly preprocessing": we even could use larger area of
//"sizeof(uint32_t) * srcWidth * (yLast - yFirst)" bytes without risk of accidental overwriting before accessing
const int bufferSize = srcWidth;
unsigned char* preProcBuffer = reinterpret_cast<unsigned char*>(trg + yLast * Scaler::scale * trgWidth) - bufferSize;
memset (preProcBuffer
, 0, bufferSize
);
static_assert(BLEND_NONE == 0, "");
//initialize preprocessing buffer for first row of current stripe: detect upper left and right corner blending
//this cannot be optimized for adjacent processing stripes; we must not allow for a memory race condition!
if (yFirst > 0)
{
const int y = yFirst - 1;
const uint32_t* s_m1 = src + srcWidth * MAX (y - 1, 0);
const uint32_t* s_0 = src + srcWidth * y; //center line
const uint32_t* s_p1 = src + srcWidth * MIN (y + 1, srcHeight - 1);
const uint32_t* s_p2 = src + srcWidth * MIN (y + 2, srcHeight - 1);
for (int x = 0; x < srcWidth; ++x)
{
const int x_m1 = MAX (x - 1, 0);
const int x_p1 = MIN (x + 1, srcWidth - 1);
const int x_p2 = MIN (x + 2, srcWidth - 1);
Kernel_4x4 ker = {}; //perf: initialization is negligible
ker.a = s_m1[x_m1]; //read sequentially from memory as far as possible
ker.b = s_m1[x];
ker.c = s_m1[x_p1];
ker.d = s_m1[x_p2];
ker.e = s_0[x_m1];
ker.f = s_0[x];
ker.g = s_0[x_p1];
ker.h = s_0[x_p2];
ker.i = s_p1[x_m1];
ker.j = s_p1[x];
ker.k = s_p1[x_p1];
ker.l = s_p1[x_p2];
ker.m = s_p2[x_m1];
ker.n = s_p2[x];
ker.o = s_p2[x_p1];
ker.p = s_p2[x_p2];
const BlendResult res = preProcessCorners (ker, dist);
/*
preprocessing blend result:
---------
| F | G | //evalute corner between F, G, J, K
----|---| //input pixel is at position F
| J | K |
---------
*/
setTopR(preProcBuffer[x], res.blend_j);
if (x + 1 < bufferSize)
setTopL(preProcBuffer[x + 1], res.blend_k);
}
}
//------------------------------------------------------------------------------------
for (int y = yFirst; y < yLast; ++y)
{
uint32_t *out = trg + Scaler::scale * y * trgWidth; //consider MT "striped" access
const uint32_t* s_m1 = src + srcWidth * MAX (y - 1, 0);
const uint32_t* s_0 = src + srcWidth * y; //center line
const uint32_t* s_p1 = src + srcWidth * MIN (y + 1, srcHeight - 1);
const uint32_t* s_p2 = src + srcWidth * MIN (y + 2, srcHeight - 1);
unsigned char blend_xy1 = 0; //corner blending for current (x, y + 1) position
for (int x = 0; x < srcWidth; ++x, out += Scaler::scale)
{
//all those bounds checks have only insignificant impact on performance!
const int x_m1 = MAX (x - 1, 0); //perf: prefer array indexing to additional pointers!
const int x_p1 = MIN (x + 1, srcWidth - 1);
const int x_p2 = MIN (x + 2, srcWidth - 1);
Kernel_4x4 ker4 = {}; //perf: initialization is negligible
ker4.a = s_m1[x_m1]; //read sequentially from memory as far as possible
ker4.b = s_m1[x];
ker4.c = s_m1[x_p1];
ker4.d = s_m1[x_p2];
ker4.e = s_0[x_m1];
ker4.f = s_0[x];
ker4.g = s_0[x_p1];
ker4.h = s_0[x_p2];
ker4.i = s_p1[x_m1];
ker4.j = s_p1[x];
ker4.k = s_p1[x_p1];
ker4.l = s_p1[x_p2];
ker4.m = s_p2[x_m1];
ker4.n = s_p2[x];
ker4.o = s_p2[x_p1];
ker4.p = s_p2[x_p2];
//evaluate the four corners on bottom-right of current pixel
unsigned char blend_xy = 0; //for current (x, y) position
{
const BlendResult res = preProcessCorners (ker4, dist);
/*
preprocessing blend result:
---------
| F | G | //evalute corner between F, G, J, K
----|---| //current input pixel is at position F
| J | K |
---------
*/
blend_xy = preProcBuffer[x];
setBottomR(blend_xy, res.blend_f); //all four corners of (x, y) have been determined at this point due to processing sequence!
setTopR(blend_xy1, res.blend_j); //set 2nd known corner for (x, y + 1)
preProcBuffer[x] = blend_xy1; //store on current buffer position for use on next row
blend_xy1 = 0;
setTopL(blend_xy1, res.blend_k); //set 1st known corner for (x + 1, y + 1) and buffer for use on next column
if (x + 1 < bufferSize) //set 3rd known corner for (x + 1, y)
setBottomL(preProcBuffer[x + 1], res.blend_g);
}
//fill block of size scale * scale with the given color
{
uint32_t *blk = out;
for (int _blk_y = 0; _blk_y < Scaler::scale; ++_blk_y, blk = (uint32_t *) BYTE_ADVANCE (blk, trgWidth * sizeof (uint32_t)))
for (int _blk_x = 0; _blk_x < Scaler::scale; ++_blk_x)
blk[_blk_x] = ker4.f;
}
//place *after* preprocessing step, to not overwrite the results while processing the the last pixel!
//blend four corners of current pixel
if (blend_xy != 0) //good 5% perf-improvement
{
Kernel_3x3 ker3 = {}; //perf: initialization is negligible
ker3.a = ker4.a;
ker3.b = ker4.b;
ker3.c = ker4.c;
ker3.d = ker4.e;
ker3.e = ker4.f;
ker3.f = ker4.g;
ker3.g = ker4.i;
ker3.h = ker4.j;
ker3.i = ker4.k;
blendPixel<Scaler, ROT_0 >(ker3, out, trgWidth, blend_xy, alphagrad, dist);
blendPixel<Scaler, ROT_90 >(ker3, out, trgWidth, blend_xy, alphagrad, dist);
blendPixel<Scaler, ROT_180>(ker3, out, trgWidth, blend_xy, alphagrad, dist);
blendPixel<Scaler, ROT_270>(ker3, out, trgWidth, blend_xy, alphagrad, dist);
}
}
}
}
//------------------------------------------------------------------------------------
struct Scaler2x
{
static const int scale = 2;
template <class OutputMatrix>
static void blendLineShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
}
template <class OutputMatrix>
static void blendLineSteep(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
}
template <class OutputMatrix>
static void blendLineSteepAndShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<0, 1>()), col, 1, 4);
alphagrad (&(out.template ref<1, 1>()), col, 5, 6); //[!] fixes 7/8 used in xBR
}
template <class OutputMatrix>
static void blendLineDiagonal(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<1, 1>()), col, 1, 2);
}
template <class OutputMatrix>
static void blendCorner(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
//model a round corner
alphagrad (&(out.template ref<1, 1>()), col, 21, 100); //exact: 1 - pi/4 = 0.2146018366
}
};
struct Scaler3x
{
static const int scale = 3;
template <class OutputMatrix>
static void blendLineShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
out.template ref<scale - 1, 2>() = col;
}
template <class OutputMatrix>
static void blendLineSteep(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
out.template ref<2, scale - 1>() = col;
}
template <class OutputMatrix>
static void blendLineSteepAndShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<2, 0>()), col, 1, 4);
alphagrad (&(out.template ref<0, 2>()), col, 1, 4);
alphagrad (&(out.template ref<2, 1>()), col, 3, 4);
alphagrad (&(out.template ref<1, 2>()), col, 3, 4);
out.template ref<2, 2>() = col;
}
template <class OutputMatrix>
static void blendLineDiagonal(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<1, 2>()), col, 1, 8); //conflict with other rotations for this odd scale
alphagrad (&(out.template ref<2, 1>()), col, 1, 8);
alphagrad (&(out.template ref<2, 2>()), col, 7, 8); //
}
template <class OutputMatrix>
static void blendCorner(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
//model a round corner
alphagrad (&(out.template ref<2, 2>()), col, 45, 100); //exact: 0.4545939598
//alphagrad (&(out.template ref<2, 1>()), col, 7, 256); //0.02826017254 -> negligible + avoid conflicts with other rotations for this odd scale
//alphagrad (&(out.template ref<1, 2>()), col, 7, 256); //0.02826017254
}
};
struct Scaler4x
{
static const int scale = 4;
template <class OutputMatrix>
static void blendLineShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 2, 3>()), col, 3, 4);
out.template ref<scale - 1, 2>() = col;
out.template ref<scale - 1, 3>() = col;
}
template <class OutputMatrix>
static void blendLineSteep(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
alphagrad (&(out.template ref<3, scale - 2>()), col, 3, 4);
out.template ref<2, scale - 1>() = col;
out.template ref<3, scale - 1>() = col;
}
template <class OutputMatrix>
static void blendLineSteepAndShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<3, 1>()), col, 3, 4);
alphagrad (&(out.template ref<1, 3>()), col, 3, 4);
alphagrad (&(out.template ref<3, 0>()), col, 1, 4);
alphagrad (&(out.template ref<0, 3>()), col, 1, 4);
alphagrad (&(out.template ref<2, 2>()), col, 1, 3); //[!] fixes 1/4 used in xBR
out.template ref<3, 3>() = col;
out.template ref<3, 2>() = col;
out.template ref<2, 3>() = col;
}
template <class OutputMatrix>
static void blendLineDiagonal(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, scale / 2 >()), col, 1, 2);
alphagrad (&(out.template ref<scale - 2, scale / 2 + 1>()), col, 1, 2);
out.template ref<scale - 1, scale - 1>() = col;
}
template <class OutputMatrix>
static void blendCorner(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
//model a round corner
alphagrad (&(out.template ref<3, 3>()), col, 68, 100); //exact: 0.6848532563
alphagrad (&(out.template ref<3, 2>()), col, 9, 100); //0.08677704501
alphagrad (&(out.template ref<2, 3>()), col, 9, 100); //0.08677704501
}
};
struct Scaler5x
{
static const int scale = 5;
template <class OutputMatrix>
static void blendLineShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 3, 4>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 2, 3>()), col, 3, 4);
out.template ref<scale - 1, 2>() = col;
out.template ref<scale - 1, 3>() = col;
out.template ref<scale - 1, 4>() = col;
out.template ref<scale - 2, 4>() = col;
}
template <class OutputMatrix>
static void blendLineSteep(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<4, scale - 3>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
alphagrad (&(out.template ref<3, scale - 2>()), col, 3, 4);
out.template ref<2, scale - 1>() = col;
out.template ref<3, scale - 1>() = col;
out.template ref<4, scale - 1>() = col;
out.template ref<4, scale - 2>() = col;
}
template <class OutputMatrix>
static void blendLineSteepAndShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
alphagrad (&(out.template ref<3, 3>()), col, 2, 3);
out.template ref<2, scale - 1>() = col;
out.template ref<3, scale - 1>() = col;
out.template ref<4, scale - 1>() = col;
out.template ref<scale - 1, 2>() = col;
out.template ref<scale - 1, 3>() = col;
}
template <class OutputMatrix>
static void blendLineDiagonal(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, scale / 2 >()), col, 1, 8); //conflict with other rotations for this odd scale
alphagrad (&(out.template ref<scale - 2, scale / 2 + 1>()), col, 1, 8);
alphagrad (&(out.template ref<scale - 3, scale / 2 + 2>()), col, 1, 8); //
alphagrad (&(out.template ref<4, 3>()), col, 7, 8);
alphagrad (&(out.template ref<3, 4>()), col, 7, 8);
out.template ref<4, 4>() = col;
}
template <class OutputMatrix>
static void blendCorner(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
// model a round corner
alphagrad (&(out.template ref<4, 4>()), col, 86, 100); //exact: 0.8631434088
alphagrad (&(out.template ref<4, 3>()), col, 23, 100); //0.2306749731
alphagrad (&(out.template ref<3, 4>()), col, 23, 100); //0.2306749731
//alphaGrad<1, 64>(out.template ref<4, 2>(), col); //0.01676812367 -> negligible + avoid conflicts with other rotations for this odd scale
//alphaGrad<1, 64>(out.template ref<2, 4>(), col); //0.01676812367
}
};
struct Scaler6x
{
static const int scale = 6;
template <class OutputMatrix>
static void blendLineShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 3, 4>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 2, 3>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 3, 5>()), col, 3, 4);
out.template ref<scale - 1, 2>() = col;
out.template ref<scale - 1, 3>() = col;
out.template ref<scale - 1, 4>() = col;
out.template ref<scale - 1, 5>() = col;
out.template ref<scale - 2, 4>() = col;
out.template ref<scale - 2, 5>() = col;
}
template <class OutputMatrix>
static void blendLineSteep(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<4, scale - 3>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
alphagrad (&(out.template ref<3, scale - 2>()), col, 3, 4);
alphagrad (&(out.template ref<5, scale - 3>()), col, 3, 4);
out.template ref<2, scale - 1>() = col;
out.template ref<3, scale - 1>() = col;
out.template ref<4, scale - 1>() = col;
out.template ref<5, scale - 1>() = col;
out.template ref<4, scale - 2>() = col;
out.template ref<5, scale - 2>() = col;
}
template <class OutputMatrix>
static void blendLineSteepAndShallow(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<0, scale - 1>()), col, 1, 4);
alphagrad (&(out.template ref<2, scale - 2>()), col, 1, 4);
alphagrad (&(out.template ref<1, scale - 1>()), col, 3, 4);
alphagrad (&(out.template ref<3, scale - 2>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 1, 0>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 2, 2>()), col, 1, 4);
alphagrad (&(out.template ref<scale - 1, 1>()), col, 3, 4);
alphagrad (&(out.template ref<scale - 2, 3>()), col, 3, 4);
out.template ref<2, scale - 1>() = col;
out.template ref<3, scale - 1>() = col;
out.template ref<4, scale - 1>() = col;
out.template ref<5, scale - 1>() = col;
out.template ref<4, scale - 2>() = col;
out.template ref<5, scale - 2>() = col;
out.template ref<scale - 1, 2>() = col;
out.template ref<scale - 1, 3>() = col;
}
template <class OutputMatrix>
static void blendLineDiagonal(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
alphagrad (&(out.template ref<scale - 1, scale / 2 >()), col, 1, 2);
alphagrad (&(out.template ref<scale - 2, scale / 2 + 1>()), col, 1, 2);
alphagrad (&(out.template ref<scale - 3, scale / 2 + 2>()), col, 1, 2);
out.template ref<scale - 2, scale - 1>() = col;
out.template ref<scale - 1, scale - 1>() = col;
out.template ref<scale - 1, scale - 2>() = col;
}
template <class OutputMatrix>
static void blendCorner(uint32_t col, OutputMatrix& out, alphagrad_func alphagrad)
{
//model a round corner
alphagrad (&(out.template ref<5, 5>()), col, 97, 100); //exact: 0.9711013910
alphagrad (&(out.template ref<4, 5>()), col, 42, 100); //0.4236372243
alphagrad (&(out.template ref<5, 4>()), col, 42, 100); //0.4236372243
alphagrad (&(out.template ref<5, 3>()), col, 6, 100); //0.05652034508
alphagrad (&(out.template ref<3, 5>()), col, 6, 100); //0.05652034508
}
};
//------------------------------------------------------------------------------------
}
static double dist24 (uint32_t pix1, uint32_t pix2)
{
//30% perf boost compared to plain distYCbCr()!
//consumes 64 MB memory; using double is only 2% faster, but takes 128 MB
static float diffToDist[256 * 256 * 256];
static bool is_initialized = false;
if (!is_initialized)
{
for (uint32_t i = 0; i < 256 * 256 * 256; ++i) //startup time: 114 ms on Intel Core i5 (four cores)
{
const int r_diff = GET_RED (i) * 2 - 0xFF;
const int g_diff = GET_GREEN (i) * 2 - 0xFF;
const int b_diff = GET_BLUE (i) * 2 - 0xFF;
const double k_b = 0.0593; //ITU-R BT.2020 conversion
const double k_r = 0.2627; //
const double k_g = 1 - k_b - k_r;
const double scale_b = 0.5 / (1 - k_b);
const double scale_r = 0.5 / (1 - k_r);
const double y = k_r * r_diff + k_g * g_diff + k_b * b_diff; //[!], analog YCbCr!
const double c_b = scale_b * (b_diff - y);
const double c_r = scale_r * (r_diff - y);
diffToDist
[i
] = (float) (sqrt ((y
* y
) + (c_b
* c_b
) + (c_r
* c_r
)));
}
is_initialized = true;
}
const int r_diff = (int) GET_RED (pix1) - (int) GET_RED (pix2);
const int g_diff = (int) GET_GREEN (pix1) - (int) GET_GREEN (pix2);
const int b_diff = (int) GET_BLUE (pix1) - (int) GET_BLUE (pix2);
return diffToDist[(((r_diff + 0xFF) / 2) << 16) | //slightly reduce precision (division by 2) to squeeze value into single byte
(((g_diff + 0xFF) / 2) << 8) |
(((b_diff + 0xFF) / 2) << 0)];
}
static double dist32 (uint32_t pix1, uint32_t pix2)
{
const double a1 = GET_ALPHA (pix1) / 255.0 ;
const double a2 = GET_ALPHA (pix2) / 255.0 ;
/*
Requirements for a color distance handling alpha channel: with a1, a2 in [0, 1]
1. if a1 = a2, distance should be: a1 * distYCbCr()
2. if a1 = 0, distance should be: a2 * distYCbCr(black, white) = a2 * 255
3. if a1 = 1, ??? maybe: 255 * (1 - a2) + a2 * distYCbCr()
*/
//return MIN (a1, a2) * distYCbCrBuffered(pix1, pix2) + 255 * abs(a1 - a2);
//=> following code is 15% faster:
const double d = dist24 (pix1, pix2);
return (a1 < a2 ? a1 * d + 255 * (a2 - a1) : a2 * d + 255 * (a1 - a2));
}
static void alphagrad24 (uint32_t *pixBack, uint32_t pixFront, unsigned int M, unsigned int N)
{
// blend front color with opacity M / N over opaque background: http://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
*pixBack = ( (CALC_COLOR24 (GET_RED (pixFront), GET_RED (*pixBack), M, N) << 16)
| (CALC_COLOR24 (GET_GREEN (pixFront), GET_GREEN (*pixBack), M, N) << 8)
| (CALC_COLOR24 (GET_BLUE (pixFront), GET_BLUE (*pixBack), M, N) << 0));
}
static void alphagrad32 (uint32_t *pixBack, uint32_t pixFront, unsigned int M, unsigned int N)
{
// find intermediate color between two colors with alpha channels (=> NO alpha blending!!!)
const unsigned int weightFront = GET_ALPHA (pixFront) * M;
const unsigned int weightBack = GET_ALPHA (*pixBack) * (N - M);
const unsigned int weightSum = weightFront + weightBack;
*pixBack = (weightSum == 0 ? 0 :
(((unsigned char) (weightSum / N)) << 24)
| (CALC_COLOR32 (GET_RED (pixFront), GET_RED (*pixBack), weightFront, weightBack, weightSum) << 16)
| (CALC_COLOR32 (GET_GREEN (pixFront), GET_GREEN (*pixBack), weightFront, weightBack, weightSum) << 8)
| (CALC_COLOR32 (GET_BLUE (pixFront), GET_BLUE (*pixBack), weightFront, weightBack, weightSum) << 0));
}
EXTERN_C void nearestNeighborScale(const uint32_t *src, int srcWidth, int srcHeight, uint32_t *trg, int trgWidth, int trgHeight)
{
// nearestNeighborScale (src, srcWidth, srcHeight, srcWidth * sizeof (uint32_t), trg, trgWidth, trgHeight, trgWidth * sizeof (uint32_t), XBRZ_SLICETYPE_TARGET, 0, trgHeight, [](uint32_t pix) { return pix; });
//static_assert(std::is_integral<PixSrc>::value, "PixSrc* is expected to be cast-able to char*");
//static_assert(std::is_integral<PixTrg>::value, "PixTrg* is expected to be cast-able to char*");
//static_assert(std::is_same<decltype(pixCvrt(PixSrc())), PixTrg>::value, "PixConverter returning wrong pixel format");
int srcPitch = srcWidth * sizeof (uint32_t);
int trgPitch = trgWidth * sizeof (uint32_t);
int yFirst;
int yLast;
#if 0 // going over source image - fast for upscaling, since source is read only once
yFirst = 0;
yLast = MIN (trgHeight, srcHeight);
if (yFirst >= yLast || trgWidth <= 0 || trgHeight <= 0)
return; // consistency check
for (int y = yFirst; y < yLast; ++y)
{
//mathematically: ySrc = floor(srcHeight * yTrg / trgHeight)
// => search for integers in: [ySrc, ySrc + 1) * trgHeight / srcHeight
//keep within for loop to support MT input slices!
const int yTrg_first = ( y * trgHeight + srcHeight - 1) / srcHeight; //=ceil(y * trgHeight / srcHeight)
const int yTrg_last = ((y + 1) * trgHeight + srcHeight - 1) / srcHeight; //=ceil(((y + 1) * trgHeight) / srcHeight)
const int blockHeight = yTrg_last - yTrg_first;
if (blockHeight > 0)
{
const uint32_t *srcLine = (const uint32_t *) BYTE_ADVANCE (src, y * srcPitch);
/**/ uint32_t *trgLine = ( uint32_t *) BYTE_ADVANCE (trg, yTrg_first * trgPitch);
int xTrg_first = 0;
for (int x = 0; x < srcWidth; ++x)
{
const int xTrg_last = ((x + 1) * trgWidth + srcWidth - 1) / srcWidth;
const int blockWidth = xTrg_last - xTrg_first;
if (blockWidth > 0)
{
const uint32_t trgColor = srcLine[x];
uint32_t *blkLine = trgLine;
xTrg_first = xTrg_last;
for (int blk_y = 0; blk_y < blockHeight; ++blk_y, blkLine = (uint32_t *) BYTE_ADVANCE (blkLine, trgPitch))
for (int blk_x = 0; blk_x < blockWidth; ++blk_x)
blkLine[blk_x] = trgColor;
trgLine += blockWidth;
}
}
}
}
#else // going over target image - slow for upscaling, since source is read multiple times missing out on cache! Fast for similar image sizes!
yFirst = 0;
yLast = trgHeight;
if (yFirst >= yLast || srcHeight <= 0 || srcWidth <= 0)
return; // consistency check
for (int y = yFirst; y < yLast; ++y)
{
/**/ uint32_t *trgLine = ( uint32_t *) BYTE_ADVANCE (trg, y * trgPitch);
const int ySrc = srcHeight * y / trgHeight;
const uint32_t *srcLine = (const uint32_t *) BYTE_ADVANCE (src, ySrc * srcPitch);
for (int x = 0; x < trgWidth; ++x)
{
const int xSrc = srcWidth * x / trgWidth;
trgLine[x] = srcLine[xSrc];
}
}
#endif // going over source or target
return;
}
EXTERN_C bool xbrz_equalcolortest24 (uint32_t col1, uint32_t col2, double luminanceWeight, double equalColorTolerance)
{
return (dist24 (col1, col2) < equalColorTolerance);
}
EXTERN_C bool xbrz_equalcolortest32 (uint32_t col1, uint32_t col2, double luminanceWeight, double equalColorTolerance)
{
return (dist32 (col1, col2) < equalColorTolerance);
}
EXTERN_C void xbrz_scale24 (size_t factor, const uint32_t *src, uint32_t *trg, int srcWidth, int srcHeight)
{
if (factor == 2) return scaleImage<Scaler2x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad24, dist24);
else if (factor == 3) return scaleImage<Scaler3x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad24, dist24);
else if (factor == 4) return scaleImage<Scaler4x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad24, dist24);
else if (factor == 5) return scaleImage<Scaler5x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad24, dist24);
else if (factor == 6) return scaleImage<Scaler6x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad24, dist24);
}
EXTERN_C void xbrz_scale32 (size_t factor, const uint32_t *src, uint32_t *trg, int srcWidth, int srcHeight)
{
if (factor == 2) return scaleImage<Scaler2x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad32, dist32);
else if (factor == 3) return scaleImage<Scaler3x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad32, dist32);
else if (factor == 4) return scaleImage<Scaler4x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad32, dist32);
else if (factor == 5) return scaleImage<Scaler5x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad32, dist32);
else if (factor == 6) return scaleImage<Scaler6x> (src, trg, srcWidth, srcHeight, 0, srcHeight, alphagrad32, dist32);
}