4#include <private/qdrawhelper_neon_p.h>
5#include <private/qblendfunctions_p.h>
6#include <private/qmath_p.h>
7#include <private/qpixellayout_p.h>
11#include <private/qpaintengine_raster_p.h>
17 const int epilogueSize =
count % 16;
18#if defined(Q_CC_GHS) || defined(Q_CC_MSVC)
22 const uint32x4_t valueVector1 = vdupq_n_u32(
value);
23 const uint32x4x4_t valueVector4 = { valueVector1, valueVector1, valueVector1, valueVector1 };
25 vst4q_u32(dest, valueVector4);
27 }
while (dest != neonEnd);
29#elif !defined(Q_PROCESSOR_ARM_64)
32 register uint32x4_t valueVector1
asm (
"q0") = vdupq_n_u32(
value);
33 register uint32x4_t valueVector2
asm (
"q1") = valueVector1;
34 while (dest != neonEnd) {
36 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
37 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
39 : [VALUE1]
"w"(valueVector1), [VALUE2]
"w"(valueVector2)
47 register uint32x4_t valueVector1
asm (
"v0") = vdupq_n_u32(
value);
48 register uint32x4_t valueVector2
asm (
"v1") = valueVector1;
49 while (dest != neonEnd) {
51 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
52 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
54 : [VALUE1]
"w"(valueVector1), [VALUE2]
"w"(valueVector2)
77 case 1: *dest++ =
value;
81static inline uint16x8_t qvdiv_255_u16(uint16x8_t
x, uint16x8_t half)
85 const uint16x8_t temp = vshrq_n_u16(
x, 8);
86 const uint16x8_t sum_part = vaddq_u16(
x, half);
87 const uint16x8_t
sum = vaddq_u16(temp, sum_part);
89 return vshrq_n_u16(
sum, 8);
92static inline uint16x8_t qvbyte_mul_u16(uint16x8_t
x, uint16x8_t
alpha, uint16x8_t half)
96 const uint16x8_t
t = vmulq_u16(
x,
alpha);
97 return qvdiv_255_u16(
t, half);
100static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t
x, uint16x8_t
a, uint16x8_t
y, uint16x8_t
b, uint16x8_t half)
104 const uint16x8_t ta = vmulq_u16(
x,
a);
105 const uint16x8_t tb = vmulq_u16(
y,
b);
107 return qvdiv_255_u16(vaddq_u16(ta, tb), half);
110static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
112 const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
113 const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
115 const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
117 return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
120#if defined(ENABLE_PIXMAN_DRAWHELPERS)
122pixman_composite_over_8888_0565_asm_neon (int32_t
w,
130pixman_composite_over_8888_8888_asm_neon (int32_t
w,
138pixman_composite_src_0565_8888_asm_neon (int32_t
w,
146pixman_composite_over_n_8_0565_asm_neon (int32_t
w,
153 int32_t mask_stride);
156pixman_composite_scanline_over_asm_neon (int32_t
w,
158 const uint32_t *
src);
161pixman_composite_src_0565_0565_asm_neon (int32_t
w,
169 const uchar *srcPixels,
int sbpl,
173void qt_blend_rgb16_on_argb32_neon(
uchar *destPixels,
int dbpl,
174 const uchar *srcPixels,
int sbpl,
184 if (const_alpha != 256) {
185 quint8 a = (255 * const_alpha) >> 8;
189 for (
int x=0;
x<
w; ++
x)
197 pixman_composite_src_0565_8888_asm_neon(
w,
h,
dst, dbpl,
src, sbpl);
212 __builtin_prefetch(
dst + dstride, 1, 0);
214 for (
int i = 1;
i <
N/2; ++
i)
235 scanLineBlit16<
Width-1>(
dst + 1,
src + 1, dstride);
241 scanLineBlit16<Width>(
dst,
src, dstride);
249void qt_blend_rgb16_on_rgb16_neon(
uchar *destPixels,
int dbpl,
250 const uchar *srcPixels,
int sbpl,
255 if (const_alpha != 256 ||
w >= 150) {
260 int dstride = dbpl / 2;
261 int sstride = sbpl / 2;
267#define BLOCKBLIT(n) case n: blockBlit16<n>(dst, src, dstride, sstride, h); return;
288 pixman_composite_src_0565_0565_asm_neon (
w,
h,
dst, dstride,
src, sstride);
291extern "C" void blend_8_pixels_argb32_on_rgb16_neon(
quint16 *
dst,
const quint32 *
src,
int const_alpha);
293void qt_blend_argb32_on_rgb16_neon(
uchar *destPixels,
int dbpl,
294 const uchar *srcPixels,
int sbpl,
301 if (const_alpha != 256) {
302 for (
int y=0;
y<
h; ++
y) {
304 for (;
i <
w-7;
i += 8)
305 blend_8_pixels_argb32_on_rgb16_neon(&
dst[
i], &
src[
i], const_alpha);
313 for (
int j = 0;
j < tail; ++
j) {
314 dstBuffer[
j] =
dst[
i +
j];
315 srcBuffer[
j] =
src[
i +
j];
318 blend_8_pixels_argb32_on_rgb16_neon(dstBuffer, srcBuffer, const_alpha);
320 for (
int j = 0;
j < tail; ++
j)
321 dst[
i +
j] = dstBuffer[
j];
330 pixman_composite_over_8888_0565_asm_neon(
w,
h,
dst, dbpl / 2,
src, sbpl / 4);
334void qt_blend_argb32_on_argb32_scanline_neon(
uint *dest,
const uint *
src,
int length,
uint const_alpha)
336 if (const_alpha == 255) {
337#if defined(ENABLE_PIXMAN_DRAWHELPERS)
338 pixman_composite_scanline_over_asm_neon(
length, dest,
src);
347void qt_blend_argb32_on_argb32_neon(
uchar *destPixels,
int dbpl,
348 const uchar *srcPixels,
int sbpl,
354 uint16x8_t half = vdupq_n_u16(0x80);
355 uint16x8_t full = vdupq_n_u16(0xff);
356 if (const_alpha == 256) {
357#if defined(ENABLE_PIXMAN_DRAWHELPERS)
358 pixman_composite_over_8888_8888_asm_neon(
w,
h, (uint32_t *)destPixels, dbpl / 4, (uint32_t *)srcPixels, sbpl / 4);
360 for (
int y=0;
y<
h; ++
y) {
362 for (;
x <
w-3;
x += 4) {
364 uint32x4_t src32 = vld1q_u32((uint32_t *)&
src[
x]);
365 uint32x4_t dst32 = vld1q_u32((uint32_t *)&
dst[
x]);
367 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
368 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
370 const uint8x8_t src8_low = vget_low_u8(src8);
371 const uint8x8_t dst8_low = vget_low_u8(dst8);
373 const uint8x8_t src8_high = vget_high_u8(src8);
374 const uint8x8_t dst8_high = vget_high_u8(dst8);
376 const uint16x8_t src16_low = vmovl_u8(src8_low);
377 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
379 const uint16x8_t src16_high = vmovl_u8(src8_high);
380 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
382 const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
383 const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
385 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
386 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
388 vst1q_u32((uint32_t *)&
dst[
x], vcombine_u32(result32_low, result32_high));
402 }
else if (const_alpha != 0) {
403 const_alpha = (const_alpha * 255) >> 8;
404 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
405 for (
int y = 0;
y <
h; ++
y) {
407 for (;
x <
w-3;
x += 4) {
409 uint32x4_t src32 = vld1q_u32((uint32_t *)&
src[
x]);
410 uint32x4_t dst32 = vld1q_u32((uint32_t *)&
dst[
x]);
412 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
413 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
415 const uint8x8_t src8_low = vget_low_u8(src8);
416 const uint8x8_t dst8_low = vget_low_u8(dst8);
418 const uint8x8_t src8_high = vget_high_u8(src8);
419 const uint8x8_t dst8_high = vget_high_u8(dst8);
421 const uint16x8_t src16_low = vmovl_u8(src8_low);
422 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
424 const uint16x8_t src16_high = vmovl_u8(src8_high);
425 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
427 const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
428 const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
430 const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
431 const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
433 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
434 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
436 vst1q_u32((uint32_t *)&
dst[
x], vcombine_u32(result32_low, result32_high));
454 const uchar *srcPixels,
int sbpl,
458void qt_blend_rgb32_on_rgb32_neon(
uchar *destPixels,
int dbpl,
459 const uchar *srcPixels,
int sbpl,
463 if (const_alpha != 256) {
464 if (const_alpha != 0) {
467 uint16x8_t half = vdupq_n_u16(0x80);
468 const_alpha = (const_alpha * 255) >> 8;
469 int one_minus_const_alpha = 255 - const_alpha;
470 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
471 uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
472 for (
int y = 0;
y <
h; ++
y) {
474 for (;
x <
w-3;
x += 4) {
475 uint32x4_t src32 = vld1q_u32((uint32_t *)&
src[
x]);
476 uint32x4_t dst32 = vld1q_u32((uint32_t *)&
dst[
x]);
478 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
479 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
481 const uint8x8_t src8_low = vget_low_u8(src8);
482 const uint8x8_t dst8_low = vget_low_u8(dst8);
484 const uint8x8_t src8_high = vget_high_u8(src8);
485 const uint8x8_t dst8_high = vget_high_u8(dst8);
487 const uint16x8_t src16_low = vmovl_u8(src8_low);
488 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
490 const uint16x8_t src16_high = vmovl_u8(src8_high);
491 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
493 const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
494 const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
496 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
497 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
499 vst1q_u32((uint32_t *)&
dst[
x], vcombine_u32(result32_low, result32_high));
513#if defined(ENABLE_PIXMAN_DRAWHELPERS)
517 int mapWidth,
int mapHeight,
int mapStride,
518 const QClipData *clip,
bool useGammaCorrection);
520void qt_alphamapblit_quint16_neon(
QRasterBuffer *rasterBuffer,
523 int mapWidth,
int mapHeight,
int mapStride,
524 const QClipData *clip,
bool useGammaCorrection)
526 if (clip || useGammaCorrection) {
537 pixman_composite_over_n_8_0565_asm_neon(mapWidth, mapHeight, dest, destStride,
c, 0,
mask, mapStride);
540extern "C" void blend_8_pixels_rgb16_on_rgb16_neon(
quint16 *
dst,
const quint16 *
src,
int const_alpha);
542template <
typename SRC,
typename BlendFunc>
543struct Blend_on_RGB16_SourceAndConstAlpha_Neon {
544 Blend_on_RGB16_SourceAndConstAlpha_Neon(BlendFunc blender,
int const_alpha)
547 , m_const_alpha(const_alpha)
553 srcBuffer[m_index++] =
src;
556 m_blender(
dst - 7, srcBuffer, m_const_alpha);
565 for (
int i = 0;
i < m_index; ++
i)
566 dstBuffer[
i] =
dst[
i - m_index];
568 m_blender(dstBuffer, srcBuffer, m_const_alpha);
570 for (
int i = 0;
i < m_index; ++
i)
571 dst[
i - m_index] = dstBuffer[
i];
584template <
typename SRC,
typename BlendFunc>
585Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>
586Blend_on_RGB16_SourceAndConstAlpha_Neon_create(BlendFunc blender,
int const_alpha)
588 return Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>(blender, const_alpha);
591void qt_scale_image_argb32_on_rgb16_neon(
uchar *destPixels,
int dbpl,
592 const uchar *srcPixels,
int sbpl,
int srch,
598 if (const_alpha == 0)
601 qt_scale_image_16bit<quint32>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
602 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
606 const uchar *srcPixels,
int sbpl,
int srch,
612void qt_scale_image_rgb16_on_rgb16_neon(
uchar *destPixels,
int dbpl,
613 const uchar *srcPixels,
int sbpl,
int srch,
619 if (const_alpha == 0)
622 if (const_alpha == 256) {
627 qt_scale_image_16bit<quint16>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
628 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
632 const uchar *srcPixels,
int sbpl,
639void qt_transform_image_rgb16_on_rgb16_neon(
uchar *destPixels,
int dbpl,
640 const uchar *srcPixels,
int sbpl,
647 if (const_alpha == 0)
650 if (const_alpha == 256) {
656 reinterpret_cast<const quint16 *
>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
657 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
660void qt_transform_image_argb32_on_rgb16_neon(
uchar *destPixels,
int dbpl,
661 const uchar *srcPixels,
int sbpl,
668 if (const_alpha == 0)
672 reinterpret_cast<const quint32 *
>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
673 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
679 "vld1.16 { d0, d1 }, [%[SRC]]\n\t"
683 "vshrn.u16 d4, q0, #8\n\t"
684 "vshrn.u16 d3, q0, #3\n\t"
685 "vsli.u16 q0, q0, #5\n\t"
686 "vsri.u8 d4, d4, #5\n\t"
687 "vsri.u8 d3, d3, #6\n\t"
688 "vshrn.u16 d2, q0, #2\n\t"
694 "vst4.8 { d2, d3, d4, d5 }, [%[DST]]"
695 : : [DST]
"r" (
dst), [SRC]
"r" (
src)
696 :
"memory",
"r2",
"d0",
"d1",
"d2",
"d3",
"d4",
"d5"
713 for (
int j = 0;
j < tail; ++
j)
716 convert_8_pixels_rgb16_to_argb32(dstBuffer, srcBuffer);
718 for (
int j = 0;
j < tail; ++
j)
728 "vld4.8 { d0, d1, d2, d3 }, [%[SRC]]\n\t"
731 "vshll.u8 q14, d2, #8\n\t"
732 "vshll.u8 q8, d1, #8\n\t"
733 "vshll.u8 q9, d0, #8\n\t"
734 "vsri.u16 q14, q8, #5\n\t"
735 "vsri.u16 q14, q9, #11\n\t"
737 "vst1.16 { d28, d29 }, [%[DST]]"
738 : : [DST]
"r" (
dst), [SRC]
"r" (
src)
739 :
"memory",
"d0",
"d1",
"d2",
"d3",
"d16",
"d17",
"d18",
"d19",
"d28",
"d29"
756 for (
int j = 0;
j < tail; ++
j)
759 convert_8_pixels_argb32_to_rgb16(dstBuffer, srcBuffer);
761 for (
int j = 0;
j < tail; ++
j)
772 if (const_alpha != 255)
778 uint32_t *
dst = (uint32_t *) destPixels;
779 const uint32x4_t colorVector = vdupq_n_u32(
color);
780 uint16x8_t half = vdupq_n_u16(0x80);
781 const uint16x8_t minusAlphaOfColorVector = vdupq_n_u16(minusAlphaOfColor);
784 uint32x4_t dstVector = vld1q_u32(&
dst[
x]);
786 const uint8x16_t dst8 = vreinterpretq_u8_u32(dstVector);
788 const uint8x8_t dst8_low = vget_low_u8(dst8);
789 const uint8x8_t dst8_high = vget_high_u8(dst8);
791 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
792 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
794 const uint16x8_t result16_low = qvbyte_mul_u16(dst16_low, minusAlphaOfColorVector, half);
795 const uint16x8_t result16_high = qvbyte_mul_u16(dst16_high, minusAlphaOfColorVector, half);
797 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
798 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
800 uint32x4_t blendedPixels = vcombine_u32(result32_low, result32_high);
801 uint32x4_t colorPlusBlendedPixels = vaddq_u32(colorVector, blendedPixels);
802 vst1q_u32(&
dst[
x], colorPlusBlendedPixels);
812 if (const_alpha == 255) {
816 while (
dst < neonEnd) {
817 uint8x16_t vs = vld1q_u8((
const uint8_t*)
src);
818 const uint8x16_t vd = vld1q_u8((uint8_t*)
dst);
819 vs = vqaddq_u8(vs, vd);
820 vst1q_u8((uint8_t*)
dst, vs);
832 const int one_minus_const_alpha = 255 - const_alpha;
833 const uint16x8_t constAlphaVector = vdupq_n_u16(const_alpha);
834 const uint16x8_t oneMinusconstAlphaVector = vdupq_n_u16(one_minus_const_alpha);
836 const uint16x8_t half = vdupq_n_u16(0x80);
838 const uint32x4_t src32 = vld1q_u32((uint32_t *)&
src[
x]);
839 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
840 uint8x16_t dst8 = vld1q_u8((uint8_t *)&
dst[
x]);
841 uint8x16_t
result = vqaddq_u8(dst8, src8);
843 uint16x8_t result_low = vmovl_u8(vget_low_u8(
result));
844 uint16x8_t result_high = vmovl_u8(vget_high_u8(
result));
846 uint16x8_t dst_low = vmovl_u8(vget_low_u8(dst8));
847 uint16x8_t dst_high = vmovl_u8(vget_high_u8(dst8));
849 result_low = qvinterpolate_pixel_255(result_low, constAlphaVector, dst_low, oneMinusconstAlphaVector, half);
850 result_high = qvinterpolate_pixel_255(result_high, constAlphaVector, dst_high, oneMinusconstAlphaVector, half);
852 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result_low));
853 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result_high));
854 vst1q_u32((uint32_t *)&
dst[
x], vcombine_u32(result32_low, result32_high));
862#if defined(ENABLE_PIXMAN_DRAWHELPERS)
867void qt_memrotate90_16_neon(
const uchar *srcPixels,
int w,
int h,
int sstride,
uchar *destPixels,
int dstride)
872 sstride /=
sizeof(
ushort);
873 dstride /=
sizeof(
ushort);
876 const int unaligned =
879 const int restY = (
h - unaligned) %
tileSize;
880 const int unoptimizedY = restY % pack;
881 const int numTilesX =
w /
tileSize + (restX > 0);
882 const int numTilesY = (
h - unaligned) /
tileSize + (restY >= pack);
884 for (
int tx = 0; tx < numTilesX; ++tx) {
885 const int startx =
w - tx *
tileSize - 1;
889 for (
int x = startx;
x >= stopx; --
x) {
890 ushort *
d = dest + (
w -
x - 1) * dstride;
891 for (
int y = 0;
y < unaligned; ++
y) {
892 *
d++ =
src[
y * sstride +
x];
897 for (
int ty = 0;
ty < numTilesY; ++
ty) {
903 for (;
x >= stopx + 7;
x -= 8) {
904 ushort *
d = dest + (
w -
x - 1) * dstride + starty;
906 qt_rotate90_16_neon(
d,
s, sstride * 2, dstride * 2, stopy - starty);
909 for (;
x >= stopx; --
x) {
911 for (
int y = starty;
y < stopy;
y += pack) {
913 for (
int i = 1;
i < pack; ++
i) {
914 const int shift = (
sizeof(int) * 8 / pack *
i);
924 const int starty =
h - unoptimizedY;
925 for (
int x = startx;
x >= stopx; --
x) {
926 ushort *
d = dest + (
w -
x - 1) * dstride + starty;
927 for (
int y = starty;
y <
h; ++
y) {
928 *
d++ =
src[
y * sstride +
x];
937void qt_memrotate270_16_neon(
const uchar *srcPixels,
int w,
int h,
939 uchar *destPixels,
int dstride)
944 sstride /=
sizeof(
ushort);
945 dstride /=
sizeof(
ushort);
948 const int unaligned =
951 const int restY = (
h - unaligned) %
tileSize;
952 const int unoptimizedY = restY % pack;
953 const int numTilesX =
w /
tileSize + (restX > 0);
954 const int numTilesY = (
h - unaligned) /
tileSize + (restY >= pack);
956 for (
int tx = 0; tx < numTilesX; ++tx) {
961 for (
int x = startx;
x < stopx; ++
x) {
963 for (
int y =
h - 1;
y >=
h - unaligned; --
y) {
964 *
d++ =
src[
y * sstride +
x];
969 for (
int ty = 0;
ty < numTilesY; ++
ty) {
970 const int starty =
h - 1 - unaligned -
ty *
tileSize;
971 const int stopy =
qMax(starty -
tileSize, unoptimizedY);
975 for (;
x < stopx - 7;
x += 8) {
976 ushort *
d = dest +
x * dstride +
h - 1 - starty;
978 qt_rotate90_16_neon(
d + 7 * dstride,
s, -sstride * 2, -dstride * 2, starty - stopy);
981 for (;
x < stopx; ++
x) {
984 for (
int y = starty;
y > stopy;
y -= pack) {
986 for (
int i = 1;
i < pack; ++
i) {
987 const int shift = (
sizeof(int) * 8 / pack *
i);
996 const int starty = unoptimizedY - 1;
997 for (
int x = startx;
x < stopx; ++
x) {
998 ushort *
d = dest +
x * dstride +
h - 1 - starty;
999 for (
int y = starty;
y >= 0; --
y) {
1000 *
d++ =
src[
y * sstride +
x];
1011 typedef int32x4_t Int32x4;
1012 typedef float32x4_t Float32x4;
1014 union Vect_buffer_i { Int32x4
v;
int i[4]; };
1015 union Vect_buffer_f { Float32x4
v;
float f[4]; };
1017 static inline Float32x4 v_dup(
double x) {
return vdupq_n_f32(
float(
x)); }
1018 static inline Float32x4 v_dup(
float x) {
return vdupq_n_f32(
x); }
1019 static inline Int32x4 v_dup(
int x) {
return vdupq_n_s32(
x); }
1020 static inline Int32x4 v_dup(
uint x) {
return vdupq_n_s32(
x); }
1022 static inline Float32x4 v_add(Float32x4
a, Float32x4
b) {
return vaddq_f32(
a,
b); }
1023 static inline Int32x4 v_add(Int32x4
a, Int32x4
b) {
return vaddq_s32(
a,
b); }
1025 static inline Float32x4 v_max(Float32x4
a, Float32x4
b) {
return vmaxq_f32(
a,
b); }
1026 static inline Float32x4 v_min(Float32x4
a, Float32x4
b) {
return vminq_f32(
a,
b); }
1027 static inline Int32x4 v_min_16(Int32x4
a, Int32x4
b) {
return vminq_s32(
a,
b); }
1029 static inline Int32x4 v_and(Int32x4
a, Int32x4
b) {
return vandq_s32(
a,
b); }
1031 static inline Float32x4 v_sub(Float32x4
a, Float32x4
b) {
return vsubq_f32(
a,
b); }
1032 static inline Int32x4 v_sub(Int32x4
a, Int32x4
b) {
return vsubq_s32(
a,
b); }
1034 static inline Float32x4 v_mul(Float32x4
a, Float32x4
b) {
return vmulq_f32(
a,
b); }
1036 static inline Float32x4 v_sqrt(Float32x4
x) { Float32x4
y = vrsqrteq_f32(
x);
y = vmulq_f32(
y, vrsqrtsq_f32(
x, vmulq_f32(
y,
y)));
return vmulq_f32(
x,
y); }
1038 static inline Int32x4 v_toInt(Float32x4
x) {
return vcvtq_s32_f32(
x); }
1040 static inline Int32x4 v_greaterOrEqual(Float32x4
a, Float32x4
b) {
return vreinterpretq_s32_u32(vcgeq_f32(
a,
b)); }
1046 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdNeon>,
uint>(
buffer, op,
data,
y,
x,
length);
1059#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1060static inline uint32x4_t vrgba2argb(uint32x4_t srcVector)
1062#if defined(Q_PROCESSOR_ARM_64)
1063 const uint8x16_t rgbaMask = { 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15};
1065 const uint8x8_t rgbaMask = { 2, 1, 0, 3, 6, 5, 4, 7 };
1067#if defined(Q_PROCESSOR_ARM_64)
1068 srcVector = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(srcVector), rgbaMask));
1071 const uint8x8_t low = vtbl1_u8(vreinterpret_u8_u32(vget_low_u32(srcVector)), rgbaMask);
1072 const uint8x8_t high = vtbl1_u8(vreinterpret_u8_u32(vget_high_u32(srcVector)), rgbaMask);
1073 srcVector = vcombine_u32(vreinterpret_u32_u8(low), vreinterpret_u32_u8(high));
1082 const uint8x8_t shuffleMask = { 3, 3, 3, 3, 7, 7, 7, 7};
1083 const uint32x4_t blendMask = vdupq_n_u32(0xff000000);
1085 for (;
i <
count - 3;
i += 4) {
1086 uint32x4_t srcVector = vld1q_u32(
src +
i);
1087 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1088#if defined(Q_PROCESSOR_ARM_64)
1089 uint32_t alphaSum = vaddvq_u32(alphaVector);
1092 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1093 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1096 if (alphaSum != 255 * 4) {
1098 srcVector = vrgba2argb(srcVector);
1099 const uint8x8_t
s1 = vreinterpret_u8_u32(vget_low_u32(srcVector));
1100 const uint8x8_t
s2 = vreinterpret_u8_u32(vget_high_u32(srcVector));
1101 const uint8x8_t alpha1 = vtbl1_u8(
s1, shuffleMask);
1102 const uint8x8_t alpha2 = vtbl1_u8(
s2, shuffleMask);
1103 uint16x8_t src1 = vmull_u8(
s1, alpha1);
1104 uint16x8_t src2 = vmull_u8(
s2, alpha2);
1105 src1 = vsraq_n_u16(src1, src1, 8);
1106 src2 = vsraq_n_u16(src2, src2, 8);
1107 const uint8x8_t
d1 = vrshrn_n_u16(src1, 8);
1108 const uint8x8_t
d2 = vrshrn_n_u16(src2, 8);
1109 const uint32x4_t
d = vbslq_u32(blendMask, srcVector, vreinterpretq_u32_u8(vcombine_u8(
d1,
d2)));
1113 vst1q_u32(
buffer +
i, vrgba2argb(srcVector));
1115 vst1q_u32(
buffer +
i, srcVector);
1118 vst1q_u32(
buffer +
i, vdupq_n_u32(0));
1134 const uint8x8_t shuffleMask = { 3, 3, 3, 3, 7, 7, 7, 7};
1135 const uint64x2_t blendMask = vdupq_n_u64(
Q_UINT64_C(0xffff000000000000));
1139 uint32x4_t vs32 = vld1q_u32(
src +
i);
1140 uint32x4_t alphaVector = vshrq_n_u32(vs32, 24);
1141#if defined(Q_PROCESSOR_ARM_64)
1142 uint32_t alphaSum = vaddvq_u32(alphaVector);
1145 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1146 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1150 vs32 = vrgba2argb(vs32);
1151 const uint8x16_t vs8 = vreinterpretq_u8_u32(vs32);
1152 const uint8x16x2_t
v = vzipq_u8(vs8, vs8);
1153 if (alphaSum != 255 * 4) {
1154 const uint8x8_t
s1 = vreinterpret_u8_u32(vget_low_u32(vs32));
1155 const uint8x8_t
s2 = vreinterpret_u8_u32(vget_high_u32(vs32));
1156 const uint8x8_t alpha1 = vtbl1_u8(
s1, shuffleMask);
1157 const uint8x8_t alpha2 = vtbl1_u8(
s2, shuffleMask);
1158 uint16x8_t src1 = vmull_u8(
s1, alpha1);
1159 uint16x8_t src2 = vmull_u8(
s2, alpha2);
1161 src1 = vsraq_n_u16(src1, src1, 7);
1162 src2 = vsraq_n_u16(src2, src2, 7);
1165 const uint64x2_t
d1 = vbslq_u64(blendMask, vreinterpretq_u64_u8(
v.val[0]), vreinterpretq_u64_u16(src1));
1166 const uint64x2_t
d2 = vbslq_u64(blendMask, vreinterpretq_u64_u8(
v.val[1]), vreinterpretq_u64_u16(src2));
1168 vst1q_u16((uint16_t *)
buffer, vreinterpretq_u16_u64(
d1));
1170 vst1q_u16((uint16_t *)
buffer, vreinterpretq_u16_u64(
d2));
1173 vst1q_u16((uint16_t *)
buffer, vreinterpretq_u16_u8(
v.val[0]));
1175 vst1q_u16((uint16_t *)
buffer, vreinterpretq_u16_u8(
v.val[1]));
1179 vst1q_u16((uint16_t *)
buffer, vdupq_n_u16(0));
1181 vst1q_u16((uint16_t *)
buffer, vdupq_n_u16(0));
1194static inline float32x4_t reciprocal_mul_ps(float32x4_t
a,
float mul)
1196 float32x4_t ia = vrecpeq_f32(
a);
1197 ia = vmulq_f32(vrecpsq_f32(
a, ia), vmulq_n_f32(ia, mul));
1201template<
bool RGBA,
bool RGBx>
1205 const uint32x4_t alphaMask = vdupq_n_u32(0xff000000);
1207 for (;
i <
count - 3;
i += 4) {
1208 uint32x4_t srcVector = vld1q_u32(
src +
i);
1209 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1210#if defined(Q_PROCESSOR_ARM_64)
1211 uint32_t alphaSum = vaddvq_u32(alphaVector);
1214 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1215 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1218 if (alphaSum != 255 * 4) {
1220 srcVector = vrgba2argb(srcVector);
1221 const float32x4_t
a = vcvtq_f32_u32(alphaVector);
1222 const float32x4_t ia = reciprocal_mul_ps(
a, 255.0f);
1224 uint16x8_t tmp1 = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(srcVector)));
1225 uint16x8_t tmp3 = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(srcVector)));
1226 float32x4_t src1 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
1227 float32x4_t src2 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
1228 float32x4_t src3 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp3)));
1229 float32x4_t src4 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp3)));
1230 src1 = vmulq_lane_f32(src1, vget_low_f32(ia), 0);
1231 src2 = vmulq_lane_f32(src2, vget_low_f32(ia), 1);
1232 src3 = vmulq_lane_f32(src3, vget_high_f32(ia), 0);
1233 src4 = vmulq_lane_f32(src4, vget_high_f32(ia), 1);
1235 tmp1 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src1, 1), 1),
1236 vrshrn_n_u32(vcvtq_n_u32_f32(src2, 1), 1));
1237 tmp3 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src3, 1), 1),
1238 vrshrn_n_u32(vcvtq_n_u32_f32(src4, 1), 1));
1239 uint32x4_t dstVector = vreinterpretq_u32_u8(vcombine_u8(vmovn_u16(tmp1), vmovn_u16(tmp3)));
1241#if defined(Q_PROCESSOR_ARM_64)
1242 uint32x4_t srcVectorAlphaMask = vceqzq_u32(alphaVector);
1244 uint32x4_t srcVectorAlphaMask = vceqq_u32(alphaVector, vdupq_n_u32(0));
1246 dstVector = vbicq_u32(dstVector, srcVectorAlphaMask);
1249 dstVector = vorrq_u32(alphaMask, dstVector);
1251 dstVector = vbslq_u32(alphaMask, srcVector, dstVector);
1252 vst1q_u32(&
buffer[
i], dstVector);
1256 vst1q_u32(&
buffer[
i], vrgba2argb(srcVector));
1258 vst1q_u32(&
buffer[
i], srcVector);
1263 vst1q_u32(&
buffer[
i], alphaMask);
1265 vst1q_u32(&
buffer[
i], vdupq_n_u32(0));
1335 convertARGBFromARGB32PM_neon<false,true>(
d,
src,
count);
1342 convertARGBFromARGB32PM_neon<false,false>(
d,
src,
count);
1349 convertARGBFromARGB32PM_neon<true,false>(
d,
src,
count);
1356 convertARGBFromARGB32PM_neon<true,true>(
d,
src,
count);
qsizetype bytesPerLine() const
\inmodule QtCore\reentrant
\inmodule QtCore\reentrant
static constexpr QRgba64 fromArgb32(uint rgb)
constexpr QRgba64 premultiplied() const
QMap< QString, QString > map
[6]
Combined button and popup list for selecting options.
QTextStream & flush(QTextStream &stream)
Calls QTextStream::flush() on stream and returns stream.
static QT_WARNING_DISABLE_FLOAT_COMPARE ShiftResult shift(const QBezier *orig, QBezier *shifted, qreal offset, qreal threshold)
void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int w, int h, int const_alpha)
void qt_transform_image_rgb16_on_rgb16(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, const QTransform &targetRectTransform, int const_alpha)
void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int w, int h, int const_alpha)
void qt_scale_image_rgb16_on_rgb16(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int srch, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, int const_alpha)
void qt_blend_rgb16_on_rgb16(uchar *dst, int dbpl, const uchar *src, int sbpl, int w, int h, int const_alpha)
void qt_transform_image(DestT *destPixels, int dbpl, const SrcT *srcPixels, int sbpl, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, const QTransform &targetRectTransform, Blender blender)
void qt_memfill32(quint32 *dest, quint32 color, qsizetype count)
void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer, int x, int y, const QRgba64 &color, const uchar *map, int mapWidth, int mapHeight, int mapStride, const QClipData *clip, bool useGammaCorrection)
uint comp_func_Plus_one_pixel_const_alpha(uint d, const uint s, const uint const_alpha, const uint one_minus_const_alpha)
static uint INTERPOLATE_PIXEL_255(uint x, uint a, uint y, uint b)
static uint BYTE_MUL(uint x, uint a)
uint comp_func_Plus_one_pixel(uint d, const uint s)
QRgb qConvertRgb16To32(uint c)
EGLOutputLayerEXT EGLint EGLAttrib value
[5]
Q_GUI_EXPORT void QT_FASTCALL qt_convert_rgb888_to_rgb32_neon(quint32 *dst, const uchar *src, int len)
static QT_BEGIN_NAMESPACE const int tileSize
constexpr const T & qMin(const T &a, const T &b)
constexpr const T & qMax(const T &a, const T &b)
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * v
[13]
GLint GLint GLint GLint GLint x
[0]
GLfloat GLfloat GLfloat w
[0]
GLboolean GLboolean GLboolean GLboolean a
[7]
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat s1
GLenum GLuint GLenum GLsizei length
GLenum GLenum GLsizei count
GLint GLsizei GLsizei GLenum GLenum GLsizei void * data
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLfloat GLfloat GLfloat GLfloat h
GLsizei GLfixed GLfixed GLfixed GLfixed const GLubyte * bitmap
GLuint GLuint64EXT address
GLsizei const void * pointer
GLfloat GLfloat GLfloat alpha
static quint32 RGBA2ARGB(quint32 x)
static quint32 ARGB2RGBA(quint32 x)
QRgb qUnpremultiply(QRgb p)
constexpr QRgb qPremultiply(QRgb x)
constexpr int qAlpha(QRgb rgb)
#define SIMD_EPILOGUE(i, length, max)
gzip write("uncompressed data")