aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHao Chen <chenhao@loongson.cn>2024-01-03 10:14:39 +0800
committerlibyuv LUCI CQ <libyuv-scoped@luci-project-accounts.iam.gserviceaccount.com>2024-01-03 19:15:56 +0000
commitee53a66c5cfe6723f924818cbed92abe01749b83 (patch)
treefa33e2e86381ce3f716dfb084bd4f9d6d3f9a140
parent7da8059c262c237d6673e4717c6d29911051adff (diff)
downloadlibyuv-ee53a66c5cfe6723f924818cbed92abe01749b83.tar.gz
Fix compilation errors.
Fix the narrowing conversion error from ‘long unsigned int’ to ‘long long int’ that occurs when using the new compiler on the LoongArch platform. Bug: libyuv:913 Change-Id: Ic535946a2453bc48840bab05355854670c52114f Reviewed-on: https://chromium-review.googlesource.com/c/libyuv/libyuv/+/5161066 Commit-Queue: Frank Barchard <fbarchard@chromium.org> Reviewed-by: Frank Barchard <fbarchard@chromium.org>
-rw-r--r--source/row_lasx.cc21
-rw-r--r--source/row_lsx.cc28
2 files changed, 24 insertions, 25 deletions
diff --git a/source/row_lasx.cc b/source/row_lasx.cc
index 1082ad80..0814ef1c 100644
--- a/source/row_lasx.cc
+++ b/source/row_lasx.cc
@@ -543,8 +543,7 @@ void I422ToARGB4444Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
- __m256i alpha = {0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000,
- 0xF000F000F000F000};
+ __m256i alpha = (__m256i)v4u64{0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000, 0xF000F000F000F000};
__m256i mask = {0x00F000F000F000F0, 0x00F000F000F000F0, 0x00F000F000F000F0,
0x00F000F000F000F0};
@@ -595,8 +594,8 @@ void I422ToARGB1555Row_LASX(const uint8_t* src_y,
__m256i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
__m256i vec_ubvr, vec_ugvg;
__m256i const_0x80 = __lasx_xvldi(0x80);
- __m256i alpha = {0x8000800080008000, 0x8000800080008000, 0x8000800080008000,
- 0x8000800080008000};
+ __m256i alpha = (__m256i)v4u64{0x8000800080008000, 0x8000800080008000,
+ 0x8000800080008000, 0x8000800080008000};
YUVTORGB_SETUP(yuvconstants, vec_ub, vec_vr, vec_ug, vec_vg, vec_yg, vec_yb);
vec_ubvr = __lasx_xvilvl_h(vec_ub, vec_vr);
@@ -799,7 +798,7 @@ void ARGBToUVRow_LASX(const uint8_t* src_argb0,
0x0009000900090009, 0x0009000900090009};
__m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
0x0000000700000003};
- __m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_0x8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
@@ -1037,7 +1036,7 @@ void ARGBToUV444Row_LASX(const uint8_t* src_argb,
__m256i const_38 = __lasx_xvldi(38);
__m256i const_94 = __lasx_xvldi(94);
__m256i const_18 = __lasx_xvldi(18);
- __m256i const_0x8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_0x8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
__m256i control = {0x0000000400000000, 0x0000000500000001, 0x0000000600000002,
0x0000000700000003};
@@ -1609,7 +1608,7 @@ void ARGB1555ToUVRow_LASX(const uint8_t* src_argb1555,
__m256i const_38 = __lasx_xvldi(0x413);
__m256i const_94 = __lasx_xvldi(0x42F);
__m256i const_18 = __lasx_xvldi(0x409);
- __m256i const_8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
@@ -1726,7 +1725,7 @@ void RGB565ToUVRow_LASX(const uint8_t* src_rgb565,
__m256i const_38 = __lasx_xvldi(0x413);
__m256i const_94 = __lasx_xvldi(0x42F);
__m256i const_18 = __lasx_xvldi(0x409);
- __m256i const_8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
@@ -1793,7 +1792,7 @@ void RGB24ToUVRow_LASX(const uint8_t* src_rgb24,
__m256i const_38 = __lasx_xvldi(0x413);
__m256i const_94 = __lasx_xvldi(0x42F);
__m256i const_18 = __lasx_xvldi(0x409);
- __m256i const_8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
__m256i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18,
0x15120F0C09060300, 0x00000000001E1B18};
@@ -1856,7 +1855,7 @@ void RAWToUVRow_LASX(const uint8_t* src_raw,
__m256i const_38 = __lasx_xvldi(0x413);
__m256i const_94 = __lasx_xvldi(0x42F);
__m256i const_18 = __lasx_xvldi(0x409);
- __m256i const_8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
__m256i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18,
0x15120F0C09060300, 0x00000000001E1B18};
@@ -2242,7 +2241,7 @@ void ARGBToUVJRow_LASX(const uint8_t* src_argb,
__m256i const_21 = __lasx_xvldi(0x415);
__m256i const_53 = __lasx_xvldi(0x435);
__m256i const_10 = __lasx_xvldi(0x40A);
- __m256i const_8080 = {0x8080808080808080, 0x8080808080808080,
+ __m256i const_8080 = (__m256i)v4u64{0x8080808080808080, 0x8080808080808080,
0x8080808080808080, 0x8080808080808080};
__m256i shuff = {0x1614060412100200, 0x1E1C0E0C1A180A08, 0x1715070513110301,
0x1F1D0F0D1B190B09};
diff --git a/source/row_lsx.cc b/source/row_lsx.cc
index e626072a..fa088c9e 100644
--- a/source/row_lsx.cc
+++ b/source/row_lsx.cc
@@ -565,7 +565,7 @@ void I422ToARGB4444Row_LSX(const uint8_t* src_y,
__m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
__m128i vec_ubvr, vec_ugvg;
__m128i const_80 = __lsx_vldi(0x80);
- __m128i alpha = {0xF000F000F000F000, 0xF000F000F000F000};
+ __m128i alpha = (__m128i)v2u64{0xF000F000F000F000, 0xF000F000F000F000};
__m128i mask = {0x00F000F000F000F0, 0x00F000F000F000F0};
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
@@ -612,7 +612,7 @@ void I422ToARGB1555Row_LSX(const uint8_t* src_y,
__m128i vec_yb, vec_yg, vec_ub, vec_vr, vec_ug, vec_vg;
__m128i vec_ubvr, vec_ugvg;
__m128i const_80 = __lsx_vldi(0x80);
- __m128i alpha = {0x8000800080008000, 0x8000800080008000};
+ __m128i alpha = (__m128i)v2u64{0x8000800080008000, 0x8000800080008000};
YUVTORGB_SETUP(yuvconstants, vec_vr, vec_ub, vec_vg, vec_ug, vec_yg, vec_yb);
vec_ubvr = __lsx_vilvl_h(vec_ub, vec_vr);
@@ -792,7 +792,7 @@ void ARGBToUVRow_LSX(const uint8_t* src_argb0,
__m128i const_0x26 = {0x0013001300130013, 0x0013001300130013};
__m128i const_0x5E = {0x002f002f002f002f, 0x002f002f002f002f};
__m128i const_0x12 = {0x0009000900090009, 0x0009000900090009};
- __m128i const_0x8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_0x8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb0, 0, src_argb0, 16, src_argb0, 32, src_argb0,
48, src0, src1, src2, src3);
@@ -991,7 +991,7 @@ void ARGBToUV444Row_LSX(const uint8_t* src_argb,
__m128i const_38 = __lsx_vldi(38);
__m128i const_94 = __lsx_vldi(94);
__m128i const_18 = __lsx_vldi(18);
- __m128i const_0x8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_0x8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
src0, src1, src2, src3);
@@ -1533,7 +1533,7 @@ void ARGB1555ToUVRow_LSX(const uint8_t* src_argb1555,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb1555, 0, src_argb1555, 16, next_argb1555, 0,
@@ -1642,7 +1642,7 @@ void RGB565ToUVRow_LSX(const uint8_t* src_rgb565,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_rgb565, 0, src_rgb565, 16, next_rgb565, 0,
@@ -1703,7 +1703,7 @@ void RGB24ToUVRow_LSX(const uint8_t* src_rgb24,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
__m128i shuff0_b = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_b = {0x0706050403020100, 0x1D1A1714110A0908};
__m128i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19};
@@ -1756,7 +1756,7 @@ void RAWToUVRow_LSX(const uint8_t* src_raw,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
__m128i shuff0_r = {0x15120F0C09060300, 0x00000000001E1B18};
__m128i shuff1_r = {0x0706050403020100, 0x1D1A1714110A0908};
__m128i shuff0_g = {0x1613100D0A070401, 0x00000000001F1C19};
@@ -1991,7 +1991,7 @@ void BGRAToUVRow_LSX(const uint8_t* src_bgra,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_bgra, 0, src_bgra, 16, src_bgra, 32, src_bgra, 48,
@@ -2039,7 +2039,7 @@ void ABGRToUVRow_LSX(const uint8_t* src_abgr,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_abgr, 0, src_abgr, 16, src_abgr, 32, src_abgr, 48,
@@ -2087,7 +2087,7 @@ void RGBAToUVRow_LSX(const uint8_t* src_rgba,
__m128i const_38 = __lsx_vldi(0x413);
__m128i const_94 = __lsx_vldi(0x42F);
__m128i const_18 = __lsx_vldi(0x409);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_rgba, 0, src_rgba, 16, src_rgba, 32, src_rgba, 48,
@@ -2136,7 +2136,7 @@ void ARGBToUVJRow_LSX(const uint8_t* src_argb,
__m128i const_21 = __lsx_vldi(0x415);
__m128i const_53 = __lsx_vldi(0x435);
__m128i const_10 = __lsx_vldi(0x40A);
- __m128i const_8080 = {0x8080808080808080, 0x8080808080808080};
+ __m128i const_8080 = (__m128i)v2u64{0x8080808080808080, 0x8080808080808080};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb, 32, src_argb, 48,
@@ -2514,7 +2514,7 @@ void ARGBBlendRow_LSX(const uint8_t* src_argb,
__m128i const_256 = __lsx_vldi(0x500);
__m128i zero = __lsx_vldi(0);
__m128i alpha = __lsx_vldi(0xFF);
- __m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
+ __m128i control = (__m128i)v2u64{0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, src_argb, 0, src_argb, 16, src_argb1, 0, src_argb1, 16,
@@ -2560,7 +2560,7 @@ void ARGBQuantizeRow_LSX(uint8_t* dst_argb,
__m128i vec_offset = __lsx_vreplgr2vr_b(interval_offset);
__m128i vec_scale = __lsx_vreplgr2vr_w(scale);
__m128i zero = __lsx_vldi(0);
- __m128i control = {0xFF000000FF000000, 0xFF000000FF000000};
+ __m128i control = (__m128i)v2u64{0xFF000000FF000000, 0xFF000000FF000000};
for (x = 0; x < len; x++) {
DUP4_ARG2(__lsx_vld, dst_argb, 0, dst_argb, 16, dst_argb, 32, dst_argb, 48,