Commit 55c126a5 authored by Scott LaVarnway's avatar Scott LaVarnway

vpx: [x86] add vpx_hadamard_16x16_avx2()

This version is ~1.91x faster than the sse2 version.  When
highbitdepth is enabled, it is ~1.74x.

Change-Id: I2b0e92ede9f55c6259ca07bf1f8c8a5d0d0955bd
parent 12df8407
...@@ -268,6 +268,11 @@ INSTANTIATE_TEST_CASE_P(SSE2, Hadamard16x16Test, ...@@ -268,6 +268,11 @@ INSTANTIATE_TEST_CASE_P(SSE2, Hadamard16x16Test,
::testing::Values(&vpx_hadamard_16x16_sse2)); ::testing::Values(&vpx_hadamard_16x16_sse2));
#endif // HAVE_SSE2 #endif // HAVE_SSE2
#if HAVE_AVX2
INSTANTIATE_TEST_CASE_P(AVX2, Hadamard16x16Test,
::testing::Values(&vpx_hadamard_16x16_avx2));
#endif // HAVE_AVX2
#if HAVE_VSX #if HAVE_VSX
INSTANTIATE_TEST_CASE_P(VSX, Hadamard16x16Test, INSTANTIATE_TEST_CASE_P(VSX, Hadamard16x16Test,
::testing::Values(&vpx_hadamard_16x16_vsx)); ::testing::Values(&vpx_hadamard_16x16_vsx));
......
...@@ -289,6 +289,7 @@ endif ...@@ -289,6 +289,7 @@ endif
# avg # avg
DSP_SRCS-yes += avg.c DSP_SRCS-yes += avg.c
DSP_SRCS-$(HAVE_SSE2) += x86/avg_intrin_sse2.c DSP_SRCS-$(HAVE_SSE2) += x86/avg_intrin_sse2.c
DSP_SRCS-$(HAVE_AVX2) += x86/avg_intrin_avx2.c
DSP_SRCS-$(HAVE_NEON) += arm/avg_neon.c DSP_SRCS-$(HAVE_NEON) += arm/avg_neon.c
DSP_SRCS-$(HAVE_NEON) += arm/hadamard_neon.c DSP_SRCS-$(HAVE_NEON) += arm/hadamard_neon.c
DSP_SRCS-$(HAVE_MSA) += mips/avg_msa.c DSP_SRCS-$(HAVE_MSA) += mips/avg_msa.c
......
...@@ -769,7 +769,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { ...@@ -769,7 +769,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") {
specialize qw/vpx_hadamard_8x8 sse2 neon vsx/, "$ssse3_x86_64"; specialize qw/vpx_hadamard_8x8 sse2 neon vsx/, "$ssse3_x86_64";
add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, tran_low_t *coeff"; add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, tran_low_t *coeff";
specialize qw/vpx_hadamard_16x16 sse2 neon vsx/; specialize qw/vpx_hadamard_16x16 avx2 sse2 neon vsx/;
add_proto qw/int vpx_satd/, "const tran_low_t *coeff, int length"; add_proto qw/int vpx_satd/, "const tran_low_t *coeff, int length";
specialize qw/vpx_satd sse2 neon/; specialize qw/vpx_satd sse2 neon/;
...@@ -778,7 +778,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") { ...@@ -778,7 +778,7 @@ if (vpx_config("CONFIG_VP9_ENCODER") eq "yes") {
specialize qw/vpx_hadamard_8x8 sse2 neon msa vsx/, "$ssse3_x86_64"; specialize qw/vpx_hadamard_8x8 sse2 neon msa vsx/, "$ssse3_x86_64";
add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff"; add_proto qw/void vpx_hadamard_16x16/, "const int16_t *src_diff, int src_stride, int16_t *coeff";
specialize qw/vpx_hadamard_16x16 sse2 neon msa vsx/; specialize qw/vpx_hadamard_16x16 avx2 sse2 neon msa vsx/;
add_proto qw/int vpx_satd/, "const int16_t *coeff, int length"; add_proto qw/int vpx_satd/, "const int16_t *coeff, int length";
specialize qw/vpx_satd sse2 neon msa/; specialize qw/vpx_satd sse2 neon msa/;
......
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <immintrin.h>
#include "./vpx_dsp_rtcd.h"
#include "vpx/vpx_integer.h"
#include "vpx_dsp/x86/bitdepth_conversion_avx2.h"
#include "vpx_ports/mem.h"
static void hadamard_col8x2_avx2(__m256i *in, int iter) {
__m256i a0 = in[0];
__m256i a1 = in[1];
__m256i a2 = in[2];
__m256i a3 = in[3];
__m256i a4 = in[4];
__m256i a5 = in[5];
__m256i a6 = in[6];
__m256i a7 = in[7];
__m256i b0 = _mm256_add_epi16(a0, a1);
__m256i b1 = _mm256_sub_epi16(a0, a1);
__m256i b2 = _mm256_add_epi16(a2, a3);
__m256i b3 = _mm256_sub_epi16(a2, a3);
__m256i b4 = _mm256_add_epi16(a4, a5);
__m256i b5 = _mm256_sub_epi16(a4, a5);
__m256i b6 = _mm256_add_epi16(a6, a7);
__m256i b7 = _mm256_sub_epi16(a6, a7);
a0 = _mm256_add_epi16(b0, b2);
a1 = _mm256_add_epi16(b1, b3);
a2 = _mm256_sub_epi16(b0, b2);
a3 = _mm256_sub_epi16(b1, b3);
a4 = _mm256_add_epi16(b4, b6);
a5 = _mm256_add_epi16(b5, b7);
a6 = _mm256_sub_epi16(b4, b6);
a7 = _mm256_sub_epi16(b5, b7);
if (iter == 0) {
b0 = _mm256_add_epi16(a0, a4);
b7 = _mm256_add_epi16(a1, a5);
b3 = _mm256_add_epi16(a2, a6);
b4 = _mm256_add_epi16(a3, a7);
b2 = _mm256_sub_epi16(a0, a4);
b6 = _mm256_sub_epi16(a1, a5);
b1 = _mm256_sub_epi16(a2, a6);
b5 = _mm256_sub_epi16(a3, a7);
a0 = _mm256_unpacklo_epi16(b0, b1);
a1 = _mm256_unpacklo_epi16(b2, b3);
a2 = _mm256_unpackhi_epi16(b0, b1);
a3 = _mm256_unpackhi_epi16(b2, b3);
a4 = _mm256_unpacklo_epi16(b4, b5);
a5 = _mm256_unpacklo_epi16(b6, b7);
a6 = _mm256_unpackhi_epi16(b4, b5);
a7 = _mm256_unpackhi_epi16(b6, b7);
b0 = _mm256_unpacklo_epi32(a0, a1);
b1 = _mm256_unpacklo_epi32(a4, a5);
b2 = _mm256_unpackhi_epi32(a0, a1);
b3 = _mm256_unpackhi_epi32(a4, a5);
b4 = _mm256_unpacklo_epi32(a2, a3);
b5 = _mm256_unpacklo_epi32(a6, a7);
b6 = _mm256_unpackhi_epi32(a2, a3);
b7 = _mm256_unpackhi_epi32(a6, a7);
in[0] = _mm256_unpacklo_epi64(b0, b1);
in[1] = _mm256_unpackhi_epi64(b0, b1);
in[2] = _mm256_unpacklo_epi64(b2, b3);
in[3] = _mm256_unpackhi_epi64(b2, b3);
in[4] = _mm256_unpacklo_epi64(b4, b5);
in[5] = _mm256_unpackhi_epi64(b4, b5);
in[6] = _mm256_unpacklo_epi64(b6, b7);
in[7] = _mm256_unpackhi_epi64(b6, b7);
} else {
in[0] = _mm256_add_epi16(a0, a4);
in[7] = _mm256_add_epi16(a1, a5);
in[3] = _mm256_add_epi16(a2, a6);
in[4] = _mm256_add_epi16(a3, a7);
in[2] = _mm256_sub_epi16(a0, a4);
in[6] = _mm256_sub_epi16(a1, a5);
in[1] = _mm256_sub_epi16(a2, a6);
in[5] = _mm256_sub_epi16(a3, a7);
}
}
static void hadamard_8x8x2_avx2(int16_t const *src_diff, int src_stride,
tran_low_t *coeff) {
__m256i src[8];
src[0] = _mm256_loadu_si256((const __m256i *)src_diff);
src[1] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[2] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[3] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[4] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[5] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[6] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
src[7] = _mm256_loadu_si256((const __m256i *)(src_diff += src_stride));
hadamard_col8x2_avx2(src, 0);
hadamard_col8x2_avx2(src, 1);
store_tran_low(_mm256_castsi256_si128(src[0]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[1]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[2]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[3]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[4]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[5]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[6]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[7]), coeff);
coeff += 8;
src[0] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[0], 1));
src[1] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[1], 1));
src[2] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[2], 1));
src[3] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[3], 1));
src[4] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[4], 1));
src[5] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[5], 1));
src[6] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[6], 1));
src[7] = _mm256_castsi128_si256(_mm256_extractf128_si256(src[7], 1));
store_tran_low(_mm256_castsi256_si128(src[0]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[1]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[2]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[3]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[4]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[5]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[6]), coeff);
coeff += 8;
store_tran_low(_mm256_castsi256_si128(src[7]), coeff);
}
void vpx_hadamard_16x16_avx2(int16_t const *src_diff, int src_stride,
tran_low_t *coeff) {
int idx;
for (idx = 0; idx < 2; ++idx) {
int16_t const *src_ptr = src_diff + idx * 8 * src_stride;
hadamard_8x8x2_avx2(src_ptr, src_stride, coeff + (idx * 64 * 2));
}
for (idx = 0; idx < 64; idx += 16) {
const __m256i coeff0 = load_tran_low(coeff);
const __m256i coeff1 = load_tran_low(coeff + 64);
const __m256i coeff2 = load_tran_low(coeff + 128);
const __m256i coeff3 = load_tran_low(coeff + 192);
__m256i b0 = _mm256_add_epi16(coeff0, coeff1);
__m256i b1 = _mm256_sub_epi16(coeff0, coeff1);
__m256i b2 = _mm256_add_epi16(coeff2, coeff3);
__m256i b3 = _mm256_sub_epi16(coeff2, coeff3);
b0 = _mm256_srai_epi16(b0, 1);
b1 = _mm256_srai_epi16(b1, 1);
b2 = _mm256_srai_epi16(b2, 1);
b3 = _mm256_srai_epi16(b3, 1);
store_tran_low_256(_mm256_add_epi16(b0, b2), coeff);
store_tran_low_256(_mm256_add_epi16(b1, b3), coeff + 64);
store_tran_low_256(_mm256_sub_epi16(b0, b2), coeff + 128);
store_tran_low_256(_mm256_sub_epi16(b1, b3), coeff + 192);
coeff += 16;
}
}
...@@ -27,4 +27,33 @@ static INLINE __m256i load_tran_low(const tran_low_t *a) { ...@@ -27,4 +27,33 @@ static INLINE __m256i load_tran_low(const tran_low_t *a) {
#endif #endif
} }
// Store 8 16 bit values. If the destination is 32 bits then sign extend the
// values by multiplying by 1.
static INLINE void store_tran_low(__m128i a, tran_low_t *b) {
#if CONFIG_VP9_HIGHBITDEPTH
const __m128i one = _mm_set1_epi16(1);
const __m128i a_hi = _mm_mulhi_epi16(a, one);
const __m128i a_lo = _mm_mullo_epi16(a, one);
const __m128i a_1 = _mm_unpacklo_epi16(a_lo, a_hi);
const __m128i a_2 = _mm_unpackhi_epi16(a_lo, a_hi);
_mm_store_si128((__m128i *)(b), a_1);
_mm_store_si128((__m128i *)(b + 4), a_2);
#else
_mm_store_si128((__m128i *)(b), a);
#endif
}
static INLINE void store_tran_low_256(__m256i a, tran_low_t *b) {
#if CONFIG_VP9_HIGHBITDEPTH
const __m256i one = _mm256_set1_epi16(1);
const __m256i a_hi = _mm256_mulhi_epi16(a, one);
const __m256i a_lo = _mm256_mullo_epi16(a, one);
const __m256i a_1 = _mm256_unpacklo_epi16(a_lo, a_hi);
const __m256i a_2 = _mm256_unpackhi_epi16(a_lo, a_hi);
_mm256_storeu_si256((__m256i *)(b), a_1);
_mm256_storeu_si256((__m256i *)(b + 8), a_2);
#else
_mm256_storeu_si256((__m256i *)(b), a);
#endif
}
#endif // VPX_DSP_X86_BITDEPTH_CONVERSION_AVX2_H_ #endif // VPX_DSP_X86_BITDEPTH_CONVERSION_AVX2_H_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment