Commit 9c43d81b authored by Linfeng Zhang's avatar Linfeng Zhang

Refactor highbd idct 4x4 and 8x8 x86 functions

BUG=webm:1412

Change-Id: I221dff34dd5f71b390b5e043d0a137ccb0a01dec
parent a83e1f1d
...@@ -754,6 +754,24 @@ INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest, ...@@ -754,6 +754,24 @@ INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest,
#if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH #if HAVE_SSE4_1 && CONFIG_VP9_HIGHBITDEPTH
const PartialInvTxfmParam sse4_1_partial_idct_tests[] = { const PartialInvTxfmParam sse4_1_partial_idct_tests[] = {
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_64_add_sse4_1>, TX_8X8, 64, 8, 2),
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_64_add_sse4_1>, TX_8X8, 64, 10, 2),
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_64_add_sse4_1>, TX_8X8, 64, 12, 2),
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_12_add_sse4_1>, TX_8X8, 12, 8, 2),
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_12_add_sse4_1>, TX_8X8, 12, 10, 2),
make_tuple(
&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
&highbd_wrapper<vpx_highbd_idct8x8_12_add_sse4_1>, TX_8X8, 12, 12, 2),
make_tuple( make_tuple(
&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
&highbd_wrapper<vpx_highbd_idct4x4_16_add_sse4_1>, TX_4X4, 16, 8, 2), &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse4_1>, TX_4X4, 16, 8, 2),
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <emmintrin.h> // SSE2
#include "./vpx_dsp_rtcd.h" #include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h" #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
#include "vpx_dsp/x86/inv_txfm_sse2.h" #include "vpx_dsp/x86/inv_txfm_sse2.h"
...@@ -72,73 +74,20 @@ static INLINE void highbd_idct4_small_sse2(__m128i *const io) { ...@@ -72,73 +74,20 @@ static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3] io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
} }
static INLINE void abs_extend_64bit_sse2(const __m128i in,
__m128i *const out /*out[2]*/,
__m128i *const sign /*sign[2]*/) {
sign[0] = _mm_srai_epi32(in, 31);
out[0] = _mm_xor_si128(in, sign[0]);
out[0] = _mm_sub_epi32(out[0], sign[0]);
sign[1] = _mm_unpackhi_epi32(sign[0], sign[0]); // 64-bit sign of 2, 3
sign[0] = _mm_unpacklo_epi32(sign[0], sign[0]); // 64-bit sign of 0, 1
out[1] = _mm_unpackhi_epi32(out[0], out[0]); // 2, 3
out[0] = _mm_unpacklo_epi32(out[0], out[0]); // 0, 1
}
static INLINE __m128i multiply_apply_sign_sse2(const __m128i in,
const __m128i sign,
const __m128i cospi) {
__m128i out = _mm_mul_epu32(in, cospi);
out = _mm_xor_si128(out, sign);
return _mm_sub_epi64(out, sign);
}
static INLINE void highbd_idct4_large_sse2(__m128i *const io) { static INLINE void highbd_idct4_large_sse2(__m128i *const io) {
const __m128i cospi_p16_p16 = __m128i temp[2], sign[2], step[4];
_mm_setr_epi32((int)cospi_16_64 << 2, 0, (int)cospi_16_64 << 2, 0);
const __m128i cospi_p08_p08 =
_mm_setr_epi32((int)cospi_8_64 << 2, 0, (int)cospi_8_64 << 2, 0);
const __m128i cospi_p24_p24 =
_mm_setr_epi32((int)cospi_24_64 << 2, 0, (int)cospi_24_64 << 2, 0);
__m128i temp1[4], temp2[4], step[4], sign1[4], sign2[4];
transpose_32bit_4x4(io, io); transpose_32bit_4x4(io, io);
// stage 1 // stage 1
temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2] temp[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
temp2[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2] abs_extend_64bit_sse2(temp[0], temp, sign);
abs_extend_64bit_sse2(temp1[0], temp1, sign1); step[0] = multiplication_round_shift_sse2(temp, sign, (int)cospi_16_64);
abs_extend_64bit_sse2(temp2[0], temp2, sign2); temp[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p16_p16); abs_extend_64bit_sse2(temp[0], temp, sign);
temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p16_p16); step[1] = multiplication_round_shift_sse2(temp, sign, (int)cospi_16_64);
temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p16_p16); highbd_multiplication_and_add_sse2(io[1], io[3], (int)cospi_24_64,
temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p16_p16); (int)cospi_8_64, &step[2], &step[3]);
temp1[0] = dct_const_round_shift_64bit(temp1[0]);
temp1[1] = dct_const_round_shift_64bit(temp1[1]);
temp2[0] = dct_const_round_shift_64bit(temp2[0]);
temp2[1] = dct_const_round_shift_64bit(temp2[1]);
step[0] = pack_4(temp1[0], temp1[1]);
step[1] = pack_4(temp2[0], temp2[1]);
abs_extend_64bit_sse2(io[1], temp1, sign1);
abs_extend_64bit_sse2(io[3], temp2, sign2);
temp1[2] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p08_p08);
temp1[3] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p08_p08);
temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p24_p24);
temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p24_p24);
temp2[2] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p24_p24);
temp2[3] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p24_p24);
temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p08_p08);
temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p08_p08);
temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]); // [1]*cospi_24 - [3]*cospi_8
temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]); // [1]*cospi_24 - [3]*cospi_8
temp2[0] = _mm_add_epi64(temp1[2], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
temp2[1] = _mm_add_epi64(temp1[3], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
temp1[0] = dct_const_round_shift_64bit(temp1[0]);
temp1[1] = dct_const_round_shift_64bit(temp1[1]);
temp2[0] = dct_const_round_shift_64bit(temp2[0]);
temp2[1] = dct_const_round_shift_64bit(temp2[1]);
step[2] = pack_4(temp1[0], temp1[1]);
step[3] = pack_4(temp2[0], temp2[1]);
// stage 2 // stage 2
io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3] io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <smmintrin.h> #include <smmintrin.h> // SSE4.1
#include "./vpx_dsp_rtcd.h" #include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h" #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
...@@ -17,25 +17,19 @@ ...@@ -17,25 +17,19 @@
#include "vpx_dsp/x86/transpose_sse2.h" #include "vpx_dsp/x86/transpose_sse2.h"
static INLINE void highbd_idct4(__m128i *const io) { static INLINE void highbd_idct4(__m128i *const io) {
const __m128i cospi_p16_p16 = __m128i temp[2], step[4];
_mm_setr_epi32((int)cospi_16_64 << 2, 0, (int)cospi_16_64 << 2, 0);
const __m128i cospi_p08_p08 =
_mm_setr_epi32((int)cospi_8_64 << 2, 0, (int)cospi_8_64 << 2, 0);
const __m128i cospi_p24_p24 =
_mm_setr_epi32((int)cospi_24_64 << 2, 0, (int)cospi_24_64 << 2, 0);
__m128i temp1[4], step[4];
transpose_32bit_4x4(io, io); transpose_32bit_4x4(io, io);
// stage 1 // stage 1
temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2] temp[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
extend_64bit(temp1[0], temp1); extend_64bit(temp[0], temp);
step[0] = multiplication_round_shift(temp1, cospi_p16_p16); step[0] = multiplication_round_shift_sse4_1(temp, (int)cospi_16_64);
temp1[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2] temp[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
extend_64bit(temp1[0], temp1); extend_64bit(temp[0], temp);
step[1] = multiplication_round_shift(temp1, cospi_p16_p16); step[1] = multiplication_round_shift_sse4_1(temp, (int)cospi_16_64);
multiplication_and_add_2_ssse4_1(&io[1], &io[3], &cospi_p24_p24, highbd_multiplication_and_add_sse4_1(io[1], io[3], (int)cospi_24_64,
&cospi_p08_p08, &step[2], &step[3]); (int)cospi_8_64, &step[2], &step[3]);
// stage 2 // stage 2
io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3] io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <smmintrin.h> #include <smmintrin.h> // SSE4.1
#include "./vpx_dsp_rtcd.h" #include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/highbd_inv_txfm_sse2.h" #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
...@@ -18,20 +18,6 @@ ...@@ -18,20 +18,6 @@
#include "vpx_dsp/x86/transpose_sse2.h" #include "vpx_dsp/x86/transpose_sse2.h"
static void highbd_idct8x8_half1d(__m128i *const io) { static void highbd_idct8x8_half1d(__m128i *const io) {
const __m128i cp_4q_4q =
_mm_setr_epi32((int)cospi_4_64 << 2, 0, (int)cospi_4_64 << 2, 0);
const __m128i cp_8q_8q =
_mm_setr_epi32((int)cospi_8_64 << 2, 0, (int)cospi_8_64 << 2, 0);
const __m128i cp_12q_12q =
_mm_setr_epi32((int)cospi_12_64 << 2, 0, (int)cospi_12_64 << 2, 0);
const __m128i cp_16q_16q =
_mm_setr_epi32((int)cospi_16_64 << 2, 0, (int)cospi_16_64 << 2, 0);
const __m128i cp_20q_20q =
_mm_setr_epi32((int)cospi_20_64 << 2, 0, (int)cospi_20_64 << 2, 0);
const __m128i cp_24q_24q =
_mm_setr_epi32((int)cospi_24_64 << 2, 0, (int)cospi_24_64 << 2, 0);
const __m128i cp_28q_28q =
_mm_setr_epi32((int)cospi_28_64 << 2, 0, (int)cospi_28_64 << 2, 0);
__m128i temp1[4], temp2[4], step1[8], step2[8]; __m128i temp1[4], temp2[4], step1[8], step2[8];
transpose_32bit_4x4x2(io, io); transpose_32bit_4x4x2(io, io);
...@@ -41,20 +27,20 @@ static void highbd_idct8x8_half1d(__m128i *const io) { ...@@ -41,20 +27,20 @@ static void highbd_idct8x8_half1d(__m128i *const io) {
step1[2] = io[4]; step1[2] = io[4];
step1[1] = io[2]; step1[1] = io[2];
step1[3] = io[6]; step1[3] = io[6];
multiplication_and_add_2_ssse4_1(&io[1], &io[7], &cp_28q_28q, &cp_4q_4q, highbd_multiplication_and_add_sse4_1(io[1], io[7], (int)cospi_28_64,
&step1[4], &step1[7]); (int)cospi_4_64, &step1[4], &step1[7]);
multiplication_and_add_2_ssse4_1(&io[5], &io[3], &cp_12q_12q, &cp_20q_20q, highbd_multiplication_and_add_sse4_1(io[5], io[3], (int)cospi_12_64,
&step1[5], &step1[6]); (int)cospi_20_64, &step1[5], &step1[6]);
// stage 2 // stage 2
temp2[0] = _mm_add_epi32(step1[0], step1[2]); temp2[0] = _mm_add_epi32(step1[0], step1[2]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step2[0] = multiplication_round_shift(temp1, cp_16q_16q); step2[0] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
temp2[0] = _mm_sub_epi32(step1[0], step1[2]); temp2[0] = _mm_sub_epi32(step1[0], step1[2]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step2[1] = multiplication_round_shift(temp1, cp_16q_16q); step2[1] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
multiplication_and_add_2_ssse4_1(&step1[1], &step1[3], &cp_24q_24q, &cp_8q_8q, highbd_multiplication_and_add_sse4_1(step1[1], step1[3], (int)cospi_24_64,
&step2[2], &step2[3]); (int)cospi_8_64, &step2[2], &step2[3]);
step2[4] = _mm_add_epi32(step1[4], step1[5]); step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]); step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]); step2[6] = _mm_sub_epi32(step1[7], step1[6]);
...@@ -68,38 +54,17 @@ static void highbd_idct8x8_half1d(__m128i *const io) { ...@@ -68,38 +54,17 @@ static void highbd_idct8x8_half1d(__m128i *const io) {
step1[4] = step2[4]; step1[4] = step2[4];
temp2[0] = _mm_sub_epi32(step2[6], step2[5]); temp2[0] = _mm_sub_epi32(step2[6], step2[5]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step1[5] = multiplication_round_shift(temp1, cp_16q_16q); step1[5] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
temp2[0] = _mm_add_epi32(step2[6], step2[5]); temp2[0] = _mm_add_epi32(step2[6], step2[5]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step1[6] = multiplication_round_shift(temp1, cp_16q_16q); step1[6] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
step1[7] = step2[7]; step1[7] = step2[7];
// stage 4 // stage 4
io[0] = _mm_add_epi32(step1[0], step1[7]); highbd_idct8_stage4(step1, io);
io[1] = _mm_add_epi32(step1[1], step1[6]);
io[2] = _mm_add_epi32(step1[2], step1[5]);
io[3] = _mm_add_epi32(step1[3], step1[4]);
io[4] = _mm_sub_epi32(step1[3], step1[4]);
io[5] = _mm_sub_epi32(step1[2], step1[5]);
io[6] = _mm_sub_epi32(step1[1], step1[6]);
io[7] = _mm_sub_epi32(step1[0], step1[7]);
} }
static void highbd_idct8x8_12_half1d(__m128i *const io) { static void highbd_idct8x8_12_half1d(__m128i *const io) {
const __m128i cp_28q_28q =
_mm_setr_epi32((int)cospi_28_64 << 2, 0, (int)cospi_28_64 << 2, 0);
const __m128i cp_4q_4q =
_mm_setr_epi32((int)cospi_4_64 << 2, 0, (int)cospi_4_64 << 2, 0);
const __m128i cp_n20q_n20q =
_mm_setr_epi32(-(int)cospi_20_64 * 4, 0, -(int)cospi_20_64 * 4, 0);
const __m128i cp_12q_12q =
_mm_setr_epi32((int)cospi_12_64 << 2, 0, (int)cospi_12_64 << 2, 0);
const __m128i cp_16q_16q =
_mm_setr_epi32((int)cospi_16_64 << 2, 0, (int)cospi_16_64 << 2, 0);
const __m128i cp_8q_8q =
_mm_setr_epi32((int)cospi_8_64 << 2, 0, (int)cospi_8_64 << 2, 0);
const __m128i cp_24q_24q =
_mm_setr_epi32((int)cospi_24_64 << 2, 0, (int)cospi_24_64 << 2, 0);
__m128i temp1[4], temp2[4], step1[8], step2[8]; __m128i temp1[4], temp2[4], step1[8], step2[8];
transpose_32bit_4x4(io, io); transpose_32bit_4x4(io, io);
...@@ -108,18 +73,18 @@ static void highbd_idct8x8_12_half1d(__m128i *const io) { ...@@ -108,18 +73,18 @@ static void highbd_idct8x8_12_half1d(__m128i *const io) {
step1[0] = io[0]; step1[0] = io[0];
step1[1] = io[2]; step1[1] = io[2];
extend_64bit(io[1], temp1); extend_64bit(io[1], temp1);
step1[4] = multiplication_round_shift(temp1, cp_28q_28q); step1[4] = multiplication_round_shift_sse4_1(temp1, (int)cospi_28_64);
step1[7] = multiplication_round_shift(temp1, cp_4q_4q); step1[7] = multiplication_round_shift_sse4_1(temp1, (int)cospi_4_64);
extend_64bit(io[3], temp1); extend_64bit(io[3], temp1);
step1[5] = multiplication_round_shift(temp1, cp_n20q_n20q); step1[5] = multiplication_round_shift_sse4_1(temp1, -(int)cospi_20_64);
step1[6] = multiplication_round_shift(temp1, cp_12q_12q); step1[6] = multiplication_round_shift_sse4_1(temp1, (int)cospi_12_64);
// stage 2 // stage 2
extend_64bit(step1[0], temp1); extend_64bit(step1[0], temp1);
step2[0] = multiplication_round_shift(temp1, cp_16q_16q); step2[0] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
extend_64bit(step1[1], temp1); extend_64bit(step1[1], temp1);
step2[2] = multiplication_round_shift(temp1, cp_24q_24q); step2[2] = multiplication_round_shift_sse4_1(temp1, (int)cospi_24_64);
step2[3] = multiplication_round_shift(temp1, cp_8q_8q); step2[3] = multiplication_round_shift_sse4_1(temp1, (int)cospi_8_64);
step2[4] = _mm_add_epi32(step1[4], step1[5]); step2[4] = _mm_add_epi32(step1[4], step1[5]);
step2[5] = _mm_sub_epi32(step1[4], step1[5]); step2[5] = _mm_sub_epi32(step1[4], step1[5]);
step2[6] = _mm_sub_epi32(step1[7], step1[6]); step2[6] = _mm_sub_epi32(step1[7], step1[6]);
...@@ -133,21 +98,14 @@ static void highbd_idct8x8_12_half1d(__m128i *const io) { ...@@ -133,21 +98,14 @@ static void highbd_idct8x8_12_half1d(__m128i *const io) {
step1[4] = step2[4]; step1[4] = step2[4];
temp2[0] = _mm_sub_epi32(step2[6], step2[5]); temp2[0] = _mm_sub_epi32(step2[6], step2[5]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step1[5] = multiplication_round_shift(temp1, cp_16q_16q); step1[5] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
temp2[0] = _mm_add_epi32(step2[6], step2[5]); temp2[0] = _mm_add_epi32(step2[6], step2[5]);
extend_64bit(temp2[0], temp1); extend_64bit(temp2[0], temp1);
step1[6] = multiplication_round_shift(temp1, cp_16q_16q); step1[6] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
step1[7] = step2[7]; step1[7] = step2[7];
// stage 4 // stage 4
io[0] = _mm_add_epi32(step1[0], step1[7]); highbd_idct8_stage4(step1, io);
io[1] = _mm_add_epi32(step1[1], step1[6]);
io[2] = _mm_add_epi32(step1[2], step1[5]);
io[3] = _mm_add_epi32(step1[3], step1[4]);
io[4] = _mm_sub_epi32(step1[3], step1[4]);
io[5] = _mm_sub_epi32(step1[2], step1[5]);
io[6] = _mm_sub_epi32(step1[1], step1[6]);
io[7] = _mm_sub_epi32(step1[0], step1[7]);
} }
void vpx_highbd_idct8x8_64_add_sse4_1(const tran_low_t *input, uint16_t *dest, void vpx_highbd_idct8x8_64_add_sse4_1(const tran_low_t *input, uint16_t *dest,
...@@ -210,20 +168,14 @@ void vpx_highbd_idct8x8_64_add_sse4_1(const tran_low_t *input, uint16_t *dest, ...@@ -210,20 +168,14 @@ void vpx_highbd_idct8x8_64_add_sse4_1(const tran_low_t *input, uint16_t *dest,
io[6] = io[10]; io[6] = io[10];
io[7] = io[11]; io[7] = io[11];
highbd_idct8x8_half1d(io); highbd_idct8x8_half1d(io);
io[8] = temp[0]; io[8] = temp[0];
io[9] = temp[1]; io[9] = temp[1];
io[10] = temp[2]; io[10] = temp[2];
io[11] = temp[3]; io[11] = temp[3];
highbd_idct8x8_half1d(&io[8]); highbd_idct8x8_half1d(&io[8]);
io[0] = wraplow_16bit_shift5(io[0], io[8], _mm_set1_epi32(16)); highbd_idct8x8_final_round(io);
io[1] = wraplow_16bit_shift5(io[1], io[9], _mm_set1_epi32(16));
io[2] = wraplow_16bit_shift5(io[2], io[10], _mm_set1_epi32(16));
io[3] = wraplow_16bit_shift5(io[3], io[11], _mm_set1_epi32(16));
io[4] = wraplow_16bit_shift5(io[4], io[12], _mm_set1_epi32(16));
io[5] = wraplow_16bit_shift5(io[5], io[13], _mm_set1_epi32(16));
io[6] = wraplow_16bit_shift5(io[6], io[14], _mm_set1_epi32(16));
io[7] = wraplow_16bit_shift5(io[7], io[15], _mm_set1_epi32(16));
} }
recon_and_store_8(io, dest, stride, bd); recon_and_store_8(io, dest, stride, bd);
...@@ -266,14 +218,7 @@ void vpx_highbd_idct8x8_12_add_sse4_1(const tran_low_t *input, uint16_t *dest, ...@@ -266,14 +218,7 @@ void vpx_highbd_idct8x8_12_add_sse4_1(const tran_low_t *input, uint16_t *dest,
io[11] = temp[3]; io[11] = temp[3];
highbd_idct8x8_12_half1d(&io[8]); highbd_idct8x8_12_half1d(&io[8]);
io[0] = wraplow_16bit_shift5(io[0], io[8], _mm_set1_epi32(16)); highbd_idct8x8_final_round(io);
io[1] = wraplow_16bit_shift5(io[1], io[9], _mm_set1_epi32(16));
io[2] = wraplow_16bit_shift5(io[2], io[10], _mm_set1_epi32(16));
io[3] = wraplow_16bit_shift5(io[3], io[11], _mm_set1_epi32(16));
io[4] = wraplow_16bit_shift5(io[4], io[12], _mm_set1_epi32(16));
io[5] = wraplow_16bit_shift5(io[5], io[13], _mm_set1_epi32(16));
io[6] = wraplow_16bit_shift5(io[6], io[14], _mm_set1_epi32(16));
io[7] = wraplow_16bit_shift5(io[7], io[15], _mm_set1_epi32(16));
} }
recon_and_store_8(io, dest, stride, bd); recon_and_store_8(io, dest, stride, bd);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_ #define VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_
#include <emmintrin.h> // SSE2 #include <emmintrin.h> // SSE2
#include "./vpx_config.h" #include "./vpx_config.h"
#include "vpx/vpx_integer.h" #include "vpx/vpx_integer.h"
#include "vpx_dsp/inv_txfm.h" #include "vpx_dsp/inv_txfm.h"
...@@ -44,9 +45,8 @@ static INLINE __m128i wraplow_16bit_shift5(const __m128i in0, const __m128i in1, ...@@ -44,9 +45,8 @@ static INLINE __m128i wraplow_16bit_shift5(const __m128i in0, const __m128i in1,
} }
static INLINE __m128i dct_const_round_shift_64bit(const __m128i in) { static INLINE __m128i dct_const_round_shift_64bit(const __m128i in) {
const __m128i t = _mm_add_epi64( const __m128i t =
in, _mm_add_epi64(in, pair_set_epi32(DCT_CONST_ROUNDING << 2, 0));
_mm_setr_epi32(DCT_CONST_ROUNDING << 2, 0, DCT_CONST_ROUNDING << 2, 0));
return _mm_srli_si128(t, 2); return _mm_srli_si128(t, 2);
} }
...@@ -56,6 +56,94 @@ static INLINE __m128i pack_4(const __m128i in0, const __m128i in1) { ...@@ -56,6 +56,94 @@ static INLINE __m128i pack_4(const __m128i in0, const __m128i in1) {
return _mm_unpacklo_epi32(t0, t1); // 0, 1, 2, 3 return _mm_unpacklo_epi32(t0, t1); // 0, 1, 2, 3
} }
static INLINE void abs_extend_64bit_sse2(const __m128i in,
__m128i *const out /*out[2]*/,
__m128i *const sign /*sign[2]*/) {
sign[0] = _mm_srai_epi32(in, 31);
out[0] = _mm_xor_si128(in, sign[0]);
out[0] = _mm_sub_epi32(out[0], sign[0]);
sign[1] = _mm_unpackhi_epi32(sign[0], sign[0]); // 64-bit sign of 2, 3
sign[0] = _mm_unpacklo_epi32(sign[0], sign[0]); // 64-bit sign of 0, 1
out[1] = _mm_unpackhi_epi32(out[0], out[0]); // 2, 3
out[0] = _mm_unpacklo_epi32(out[0], out[0]); // 0, 1
}
// Note: cospi must be non negative.
static INLINE __m128i multiply_apply_sign_sse2(const __m128i in,
const __m128i sign,
const __m128i cospi) {
__m128i out = _mm_mul_epu32(in, cospi);
out = _mm_xor_si128(out, sign);
return _mm_sub_epi64(out, sign);
}
// Note: c must be non negative.
static INLINE __m128i multiplication_round_shift_sse2(
const __m128i *const in /*in[2]*/, const __m128i *const sign /*sign[2]*/,
const int c) {
const __m128i pair_c = pair_set_epi32(c << 2, 0);
__m128i t0, t1;
t0 = multiply_apply_sign_sse2(in[0], sign[0], pair_c);
t1 = multiply_apply_sign_sse2(in[1], sign[1], pair_c);
t0 = dct_const_round_shift_64bit(t0);
t1 = dct_const_round_shift_64bit(t1);
return pack_4(t0, t1);
}
// Note: c0 and c1 must be non negative.
static INLINE void highbd_multiplication_and_add_sse2(
const __m128i in0, const __m128i in1, const int c0, const int c1,
__m128i *const out0, __m128i *const out1) {
const __m128i pair_c0 = pair_set_epi32(c0 << 2, 0);
const __m128i pair_c1 = pair_set_epi32(c1 << 2, 0);
__m128i temp1[4], temp2[4], sign1[4], sign2[4];
abs_extend_64bit_sse2(in0, temp1, sign1);
abs_extend_64bit_sse2(in1, temp2, sign2);
temp1[2] = multiply_apply_sign_sse2(temp1[0], sign1[0], pair_c1);
temp1[3] = multiply_apply_sign_sse2(temp1[1], sign1[1], pair_c1);
temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], pair_c0);
temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], pair_c0);
temp2[2] = multiply_apply_sign_sse2(temp2[0], sign2[0], pair_c0);
temp2[3] = multiply_apply_sign_sse2(temp2[1], sign2[1], pair_c0);
temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], pair_c1);
temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], pair_c1);
temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]);
temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]);
temp2[0] = _mm_add_epi64(temp1[2], temp2[2]);
temp2[1] = _mm_add_epi64(temp1[3], temp2[3]);
temp1[0] = dct_const_round_shift_64bit(temp1[0]);
temp1[1] = dct_const_round_shift_64bit(temp1[1]);
temp2[0] = dct_const_round_shift_64bit(temp2[0]);
temp2[1] = dct_const_round_shift_64bit(temp2[1]);
*out0 = pack_4(temp1[0], temp1[1]);
*out1 = pack_4(temp2[0], temp2[1]);
}
static INLINE void highbd_idct8_stage4(const __m128i *const in,
__m128i *const out) {
out[0] = _mm_add_epi32(in[0], in[7]);
out[1] = _mm_add_epi32(in[1], in[6]);
out[2] = _mm_add_epi32(in[2], in[5]);
out[3] = _mm_add_epi32(in[3], in[4]);
out[4] = _mm_sub_epi32(in[3], in[4]);
out[5] = _mm_sub_epi32(in[2], in[5]);
out[6] = _mm_sub_epi32(in[1], in[6]);
out[7] = _mm_sub_epi32(in[0], in[7]);
}
static INLINE void highbd_idct8x8_final_round(__m128i *const io) {
io[0] = wraplow_16bit_shift5(io[0], io[8], _mm_set1_epi32(16));
io[1] = wraplow_16bit_shift5(io[1], io[9], _mm_set1_epi32(16));
io[2] = wraplow_16bit_shift5(io[2], io[10], _mm_set1_epi32(16));
io[3] = wraplow_16bit_shift5(io[3], io[11], _mm_set1_epi32(16));
io[4] = wraplow_16bit_shift5(io[4], io[12], _mm_set1_epi32(16));
io[5] = wraplow_16bit_shift5(io[5], io[13], _mm_set1_epi32(16));
io[6] = wraplow_16bit_shift5(io[6], io[14], _mm_set1_epi32(16));
io[7] = wraplow_16bit_shift5(io[7], io[15], _mm_set1_epi32(16));
}
static INLINE __m128i add_clamp(const __m128i in0, const __m128i in1, static INLINE __m128i add_clamp(const __m128i in0, const __m128i in1,
const int bd) { const int bd) {
const __m128i zero = _mm_set1_epi16(0); const __m128i zero = _mm_set1_epi16(0);
......
...@@ -14,37 +14,38 @@ ...@@ -14,37 +14,38 @@
#include <smmintrin.h> // SSE4.1 #include <smmintrin.h> // SSE4.1
#include "./vpx_config.h" #include "./vpx_config.h"
#include "vpx/vpx_integer.h" #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
#include "vpx_dsp/inv_txfm.h"
#include "vpx_dsp/x86/txfm_common_sse2.h"
static INLINE __m128i multiplication_round_shift(const __m128i *const in, static INLINE __m128i multiplication_round_shift_sse4_1(
const __m128i cospi) { const __m128i *const in /*in[2]*/, const int c) {
const __m128i pair_c = pair_set_epi32(c << 2, 0);
__m128i t0, t1; __m128i t0, t1;
t0 = _mm_mul_epi32(in[0], cospi);
t1 = _mm_mul_epi32(in[1], cospi); t0 = _mm_mul_epi32(in[0], pair_c);
t1 = _mm_mul_epi32(in[1], pair_c);
t0 = dct_const_round_shift_64bit(t0); t0 = dct_const_round_shift_64bit(t0);
t1 = dct_const_round_shift_64bit(t1); t1 = dct_const_round_shift_64bit(t1);
return pack_4(t0, t1); return pack_4(t0, t1);
} }
static INLINE void multiplication_and_add_2_ssse4_1(const __m128i *const in0, static INLINE void highbd_multiplication_and_add_sse4_1(
const __m128i *const in1, const __m128i in0, const __m128i in1, const int c0, const int c1,
const __m128i *const cst0, __m128i *const out0, __m128i *const out1) {
const __m128i *const cst1, const __m128i pair_c0 = pair_set_epi32(c0 << 2, 0);
__m128i *const out0, const __m128i pair_c1 = pair_set_epi32(c1 << 2, 0);
__m128i *const out1) {
__m128i temp1[4], temp2[4]; __m128i temp1[4], temp2[4];
extend_64bit(*in0, temp1);
extend_64bit(*in1, temp2); extend_64bit(in0, temp1);
temp1[2] = _mm_mul_epi32(temp1[0], *cst1); extend_64bit(in1, temp2);
temp1[3] = _mm_mul_epi32(temp1[1], *cst1); temp1[2] = _mm_mul_epi32(temp1[0], pair_c1);
temp1[0] = _mm_mul_epi32(temp1[0], *cst0); temp1[3] = _mm_mul_epi32(temp1[1], pair_c1);
temp1[1] = _mm_mul_epi32(temp1[1], *cst0); temp1[0] = _mm_mul_epi32(temp1[0], pair_c0);
temp2[2] = _mm_mul_epi32(temp2[0], *cst0); temp1[1] = _mm_mul_epi32(temp1[1], pair_c0);
temp2[3] = _mm_mul_epi32(temp2[1], *cst0); temp2[2] = _mm_mul_epi32(temp2[0], pair_c0);
temp2[0] = _mm_mul_epi32(temp2[0], *cst1); temp2[3] = _mm_mul_epi32(temp2[1], pair_c0);
temp2[1] = _mm_mul_epi32(temp2[1], *cst1); temp2[0] = _mm_mul_epi32(temp2[0], pair_c1);
temp2[1] = _mm_mul_epi32(temp2[1], pair_c1);
temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]); temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]);
temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]); temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]);
temp2[0] = _mm_add_epi64(temp1[2], temp2[2]); temp2[0] = _mm_add_epi64(temp1[2], temp2[2]);
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* be found in the AUTHORS file in the root of the source tree. * be found in the AUTHORS file in the root of the source tree.
*/ */
#include <emmintrin.h> // SSE2
#include "./vpx_dsp_rtcd.h" #include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/inv_txfm_sse2.h" #include "vpx_dsp/x86/inv_txfm_sse2.h"
#include "vpx_dsp/x86/transpose_sse2.h" #include "vpx_dsp/x86/transpose_sse2.h"
...@@ -146,72 +148,6 @@ void iadst4_sse2(__m128i *in) { ...@@ -146,72 +148,6 @@ void iadst4_sse2(__m128i *in) {
in[1] = _mm_packs_epi32(u[2], u[3]); in[1] = _mm_packs_epi32(u[2], u[3]);
} }
// Multiply elements by constants and add them together.
static INLINE void multiplication_and_add(
const __m128i *const in0, const __m128i *const in1,
const __m128i *const in2, const __m128i *const in3,
const __m128i *const cst0, const __m128i *const cst1,
const __m128i *const cst2, const __m128i *const cst3, __m128i *const res0,
__m128i *const res1, __m128i *const res2, __m128i *const res3) {
const __m128i lo_0 = _mm_unpacklo_epi16(*in0, *in1);
const __m128i hi_0 = _mm_unpackhi_epi16(*in0, *in1);
const __m128i lo_1 = _mm_unpacklo_epi16(*in2, *in3);
const __m128i hi_1 = _mm_unpackhi_epi16(*in2, *in3);
*res0 = idct_calc_wraplow_sse2(lo_0, hi_0, *cst0);
*res1 = idct_calc_wraplow_sse2(lo_0, hi_0, *cst1);
*res2 = idct_calc_wraplow_sse2(lo_1, hi_1, *cst2);