diff options
author | Jose Dapena Paz <jdapena@igalia.com> | 2020-09-09 16:13:03 +0200 |
---|---|---|
committer | Jonathan Wright <jonathan.wright@arm.com> | 2020-09-15 19:07:21 +0000 |
commit | d5148db386ceb4a608058320071cbed890bd6ad2 (patch) | |
tree | 4fb259f4240d45e729b6c857ebba31788e8c4a83 | |
parent | 408fab0d8ec3d5aaf274607d688064326d91f725 (diff) |
GCC: fix NEON type casts
GCC9+ is more strict with the type casts, so it does not accept
implicit casting from int64x1_t to int64_t or similar. And is also
rejects implicit casting from signed to unsigned vectors.
Bug: 819194
Change-Id: I35d269108645e8eb0bd8cffbfaf69a70f82d766f
-rw-r--r-- | simd/arm/common/jidctfst-neon.c | 12 | ||||
-rw-r--r-- | simd/arm/common/jidctint-neon.c | 34 | ||||
-rw-r--r-- | simd/arm/common/jidctred-neon.c | 8 |
3 files changed, 32 insertions, 22 deletions
diff --git a/simd/arm/common/jidctfst-neon.c b/simd/arm/common/jidctfst-neon.c index c926e6d..87806fd 100644 --- a/simd/arm/common/jidctfst-neon.c +++ b/simd/arm/common/jidctfst-neon.c @@ -84,8 +84,8 @@ void jsimd_idct_ifast_neon(void *dct_table, bitmap = vorrq_s16(bitmap, row6); bitmap = vorrq_s16(bitmap, row7); - int64_t left_ac_bitmap = vreinterpret_s64_s16(vget_low_s16(bitmap)); - int64_t right_ac_bitmap = vreinterpret_s64_s16(vget_high_s16(bitmap)); + int64_t left_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 0); + int64_t right_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 1); if (left_ac_bitmap == 0 && right_ac_bitmap == 0) { /* All AC coefficients are zero. */ @@ -405,13 +405,13 @@ void jsimd_idct_ifast_neon(void *dct_table, vqshrn_n_s16(col7, PASS1_BITS + 3)); /* Clamp to range [0-255]. */ uint8x16_t cols_01 = vreinterpretq_u8_s8( - vaddq_s8(cols_01_s8, vdupq_n_u8(CENTERJSAMPLE))); + vaddq_s8(cols_01_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE)))); uint8x16_t cols_45 = vreinterpretq_u8_s8( - vaddq_s8(cols_45_s8, vdupq_n_u8(CENTERJSAMPLE))); + vaddq_s8(cols_45_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE)))); uint8x16_t cols_23 = vreinterpretq_u8_s8( - vaddq_s8(cols_23_s8, vdupq_n_u8(CENTERJSAMPLE))); + vaddq_s8(cols_23_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE)))); uint8x16_t cols_67 = vreinterpretq_u8_s8( - vaddq_s8(cols_67_s8, vdupq_n_u8(CENTERJSAMPLE))); + vaddq_s8(cols_67_s8, vreinterpretq_s8_u8(vdupq_n_u8(CENTERJSAMPLE)))); /* Transpose block ready for store. */ uint32x4x2_t cols_0415 = vzipq_u32(vreinterpretq_u32_u8(cols_01), diff --git a/simd/arm/common/jidctint-neon.c b/simd/arm/common/jidctint-neon.c index 4ee9eb0..0fd4a36 100644 --- a/simd/arm/common/jidctint-neon.c +++ b/simd/arm/common/jidctint-neon.c @@ -215,13 +215,13 @@ void jsimd_idct_islow_neon(void *dct_table, int16x4_t bitmap = vorr_s16(row7, row6); bitmap = vorr_s16(bitmap, row5); bitmap = vorr_s16(bitmap, row4); - int64_t bitmap_rows_4567 = vreinterpret_s64_s16(bitmap); + int64_t bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0); if (bitmap_rows_4567 == 0) { bitmap = vorr_s16(bitmap, row3); bitmap = vorr_s16(bitmap, row2); bitmap = vorr_s16(bitmap, row1); - int64_t left_ac_bitmap = vreinterpret_s64_s16(bitmap); + int64_t left_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0); if (left_ac_bitmap == 0) { int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS); @@ -267,18 +267,18 @@ void jsimd_idct_islow_neon(void *dct_table, bitmap = vorr_s16(row7, row6); bitmap = vorr_s16(bitmap, row5); bitmap = vorr_s16(bitmap, row4); - bitmap_rows_4567 = vreinterpret_s64_s16(bitmap); + bitmap_rows_4567 = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0); bitmap = vorr_s16(bitmap, row3); bitmap = vorr_s16(bitmap, row2); bitmap = vorr_s16(bitmap, row1); - int64_t right_ac_bitmap = vreinterpret_s64_s16(bitmap); + int64_t right_ac_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0); /* Initialise to non-zero value: defaults to regular second pass. */ int64_t right_ac_dc_bitmap = 1; if (right_ac_bitmap == 0) { bitmap = vorr_s16(bitmap, row0); - right_ac_dc_bitmap = vreinterpret_s64_s16(bitmap); + right_ac_dc_bitmap = vget_lane_s64(vreinterpret_s64_s16(bitmap), 0); if (right_ac_dc_bitmap != 0) { int16x4_t dcval = vshl_n_s16(vmul_s16(row0, quant_row0), PASS1_BITS); @@ -631,10 +631,14 @@ static inline void jsimd_idct_islow_pass2_regular(int16_t *workspace, int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16); int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16); /* Clamp to range [0-255]. */ - uint8x8_t cols_02_u8 = vadd_u8(cols_02_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_13_u8 = vadd_u8(cols_13_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_46_u8 = vadd_u8(cols_46_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_57_u8 = vadd_u8(cols_57_s8, vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8), + vdup_n_u8(CENTERJSAMPLE)); /* Transpose 4x8 block and store to memory. */ /* Zipping adjacent columns together allows us to store 16-bit elements. */ @@ -723,10 +727,14 @@ static inline void jsimd_idct_islow_pass2_sparse(int16_t *workspace, int8x8_t cols_46_s8 = vqrshrn_n_s16(cols_46_s16, DESCALE_P2 - 16); int8x8_t cols_57_s8 = vqrshrn_n_s16(cols_57_s16, DESCALE_P2 - 16); /* Clamp to range [0-255]. */ - uint8x8_t cols_02_u8 = vadd_u8(cols_02_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_13_u8 = vadd_u8(cols_13_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_46_u8 = vadd_u8(cols_46_s8, vdup_n_u8(CENTERJSAMPLE)); - uint8x8_t cols_57_u8 = vadd_u8(cols_57_s8, vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_02_u8 = vadd_u8(vreinterpret_u8_s8(cols_02_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_13_u8 = vadd_u8(vreinterpret_u8_s8(cols_13_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_46_u8 = vadd_u8(vreinterpret_u8_s8(cols_46_s8), + vdup_n_u8(CENTERJSAMPLE)); + uint8x8_t cols_57_u8 = vadd_u8(vreinterpret_u8_s8(cols_57_s8), + vdup_n_u8(CENTERJSAMPLE)); /* Transpose 4x8 block and store to memory. */ /* Zipping adjacent columns together allow us to store 16-bit elements. */ diff --git a/simd/arm/common/jidctred-neon.c b/simd/arm/common/jidctred-neon.c index 3c9393b..ed4232c 100644 --- a/simd/arm/common/jidctred-neon.c +++ b/simd/arm/common/jidctred-neon.c @@ -218,8 +218,8 @@ void jsimd_idct_4x4_neon(void *dct_table, bitmap = vorrq_s16(bitmap, row6); bitmap = vorrq_s16(bitmap, row7); - int64_t left_ac_bitmap = vreinterpret_s64_s16(vget_low_s16(bitmap)); - int64_t right_ac_bitmap = vreinterpret_s64_s16(vget_high_s16(bitmap)); + int64_t left_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 0); + int64_t right_ac_bitmap = vgetq_lane_s64(vreinterpretq_s64_s16(bitmap), 1); /* Load constants for IDCT computation. */ const int16x4x3_t consts = vld1_s16_x3(jsimd_idct_4x4_neon_consts); @@ -453,7 +453,9 @@ void jsimd_idct_4x4_neon(void *dct_table, /* Interleaving store completes the transpose. */ uint8x8x2_t output_0123 = vzip_u8(vqmovun_s16(output_cols_02), vqmovun_s16(output_cols_13)); - uint16x4x2_t output_01_23 = { output_0123.val[0], output_0123.val[1] }; + uint16x4x2_t output_01_23 = { vreinterpret_u16_u8(output_0123.val[0]), + vreinterpret_u16_u8(output_0123.val[1]) + }; /* Store 4x4 block to memory. */ JSAMPROW outptr0 = output_buf[0] + output_col; |