Commit 9a71811d authored by Linfeng Zhang's avatar Linfeng Zhang Committed by Gerrit Code Review
Browse files

Merge changes Id6a8c549,Ib1e0650b,Ic369dd86

* changes:
  Refactor x86/vpx_subpixel_8t_intrin_ssse3.c
  Add vpx_dsp/x86/mem_sse2.h
  Add transpose_8bit_{4x4,8x8}() x86 optimization
parents ffa3a3c4 6543213e
......@@ -580,6 +580,29 @@ TEST_P(ConvolveTest, DISABLED_Avg_Speed) {
UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
}
TEST_P(ConvolveTest, DISABLED_Scale_Speed) {
const uint8_t *const in = input();
uint8_t *const out = output();
const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
const int kNumTests = 5000000;
const int width = Width();
const int height = Height();
vpx_usec_timer timer;
SetConstantInput(127);
vpx_usec_timer_start(&timer);
for (int n = 0; n < kNumTests; ++n) {
UUT_->shv8_[0](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
width, height);
}
vpx_usec_timer_mark(&timer);
const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
printf("convolve_scale_%dx%d_%d: %d us\n", width, height,
UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
}
TEST_P(ConvolveTest, Copy) {
uint8_t *const in = input();
uint8_t *const out = output();
......
......@@ -50,7 +50,6 @@ DSP_SRCS-yes += intrapred.c
DSP_SRCS-$(HAVE_SSE) += x86/intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/intrapred_sse2.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
DSP_SRCS-$(HAVE_VSX) += ppc/intrapred_vsx.c
ifeq ($(CONFIG_VP9_HIGHBITDEPTH),yes)
......@@ -89,6 +88,7 @@ DSP_SRCS-yes += vpx_filter.h
DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/convolve.h
DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/vpx_asm_stubs.c
DSP_SRCS-$(HAVE_SSSE3) += x86/convolve_ssse3.h
DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_8t_sse2.asm
DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_bilinear_sse2.asm
DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm
......@@ -386,6 +386,7 @@ DSP_SRCS-$(HAVE_VSX) += ppc/transpose_vsx.h
DSP_SRCS-$(HAVE_VSX) += ppc/bitdepth_conversion_vsx.h
# X86 utilities
DSP_SRCS-$(HAVE_SSE2) += x86/mem_sse2.h
DSP_SRCS-$(HAVE_SSE2) += x86/transpose_sse2.h
DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
......
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_DSP_X86_CONVOLVE_SSSE3_H_
#define VPX_DSP_X86_CONVOLVE_SSSE3_H_
#include <tmmintrin.h> // SSSE3
#include "./vpx_config.h"
static INLINE void shuffle_filter_ssse3(const int16_t *const filter,
__m128i *const f) {
const __m128i f_values = _mm_load_si128((const __m128i *)filter);
// pack and duplicate the filter values
f[0] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0200u));
f[1] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0604u));
f[2] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0a08u));
f[3] = _mm_shuffle_epi8(f_values, _mm_set1_epi16(0x0e0cu));
}
static INLINE __m128i convolve8_8_ssse3(const __m128i *const s,
const __m128i *const f) {
// multiply 2 adjacent elements with the filter and add the result
const __m128i k_64 = _mm_set1_epi16(1 << 6);
const __m128i x0 = _mm_maddubs_epi16(s[0], f[0]);
const __m128i x1 = _mm_maddubs_epi16(s[1], f[1]);
const __m128i x2 = _mm_maddubs_epi16(s[2], f[2]);
const __m128i x3 = _mm_maddubs_epi16(s[3], f[3]);
// add and saturate the results together
const __m128i min_x2x1 = _mm_min_epi16(x2, x1);
const __m128i max_x2x1 = _mm_max_epi16(x2, x1);
__m128i temp = _mm_adds_epi16(x0, x3);
temp = _mm_adds_epi16(temp, min_x2x1);
temp = _mm_adds_epi16(temp, max_x2x1);
// round and shift by 7 bit each 16 bit
temp = _mm_adds_epi16(temp, k_64);
temp = _mm_srai_epi16(temp, 7);
return temp;
}
#endif // VPX_DSP_X86_CONVOLVE_SSSE3_H_
/*
* Copyright (c) 2017 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_DSP_X86_MEM_SSE2_H_
#define VPX_DSP_X86_MEM_SSE2_H_
#include <emmintrin.h> // SSE2
#include "./vpx_config.h"
static INLINE void load_8bit_4x4(const uint8_t *const s, const ptrdiff_t stride,
__m128i *const d) {
d[0] = _mm_cvtsi32_si128(*(const int *)(s + 0 * stride));
d[1] = _mm_cvtsi32_si128(*(const int *)(s + 1 * stride));
d[2] = _mm_cvtsi32_si128(*(const int *)(s + 2 * stride));
d[3] = _mm_cvtsi32_si128(*(const int *)(s + 3 * stride));
}
static INLINE void load_8bit_4x8(const uint8_t *const s, const ptrdiff_t stride,
__m128i *const d) {
load_8bit_4x4(s + 0 * stride, stride, &d[0]);
load_8bit_4x4(s + 4 * stride, stride, &d[4]);
}
static INLINE void load_8bit_8x4(const uint8_t *const s, const ptrdiff_t stride,
__m128i *const d) {
d[0] = _mm_loadl_epi64((const __m128i *)(s + 0 * stride));
d[1] = _mm_loadl_epi64((const __m128i *)(s + 1 * stride));
d[2] = _mm_loadl_epi64((const __m128i *)(s + 2 * stride));
d[3] = _mm_loadl_epi64((const __m128i *)(s + 3 * stride));
}
static INLINE void load_8bit_8x8(const uint8_t *const s, const ptrdiff_t stride,
__m128i *const d) {
load_8bit_8x4(s + 0 * stride, stride, &d[0]);
load_8bit_8x4(s + 4 * stride, stride, &d[4]);
}
static INLINE void load_8bit_16x8(const uint8_t *const s,
const ptrdiff_t stride, __m128i *const d) {
d[0] = _mm_load_si128((const __m128i *)(s + 0 * stride));
d[1] = _mm_load_si128((const __m128i *)(s + 1 * stride));
d[2] = _mm_load_si128((const __m128i *)(s + 2 * stride));
d[3] = _mm_load_si128((const __m128i *)(s + 3 * stride));
d[4] = _mm_load_si128((const __m128i *)(s + 4 * stride));
d[5] = _mm_load_si128((const __m128i *)(s + 5 * stride));
d[6] = _mm_load_si128((const __m128i *)(s + 6 * stride));
d[7] = _mm_load_si128((const __m128i *)(s + 7 * stride));
}
static INLINE void loadu_8bit_16x8(const uint8_t *const s,
const ptrdiff_t stride, __m128i *const d) {
d[0] = _mm_loadu_si128((const __m128i *)(s + 0 * stride));
d[1] = _mm_loadu_si128((const __m128i *)(s + 1 * stride));
d[2] = _mm_loadu_si128((const __m128i *)(s + 2 * stride));
d[3] = _mm_loadu_si128((const __m128i *)(s + 3 * stride));
d[4] = _mm_loadu_si128((const __m128i *)(s + 4 * stride));
d[5] = _mm_loadu_si128((const __m128i *)(s + 5 * stride));
d[6] = _mm_loadu_si128((const __m128i *)(s + 6 * stride));
d[7] = _mm_loadu_si128((const __m128i *)(s + 7 * stride));
}
static INLINE void store_8bit_4x4(const __m128i *const s, uint8_t *const d,
const ptrdiff_t stride) {
*(int *)(d + 0 * stride) = _mm_cvtsi128_si32(s[0]);
*(int *)(d + 1 * stride) = _mm_cvtsi128_si32(s[1]);
*(int *)(d + 2 * stride) = _mm_cvtsi128_si32(s[2]);
*(int *)(d + 3 * stride) = _mm_cvtsi128_si32(s[3]);
}
static INLINE void store_8bit_8x8(const __m128i *const s, uint8_t *const d,
const ptrdiff_t stride) {
_mm_storel_epi64((__m128i *)(d + 0 * stride), s[0]);
_mm_storel_epi64((__m128i *)(d + 1 * stride), s[1]);
_mm_storel_epi64((__m128i *)(d + 2 * stride), s[2]);
_mm_storel_epi64((__m128i *)(d + 3 * stride), s[3]);
_mm_storel_epi64((__m128i *)(d + 4 * stride), s[4]);
_mm_storel_epi64((__m128i *)(d + 5 * stride), s[5]);
_mm_storel_epi64((__m128i *)(d + 6 * stride), s[6]);
_mm_storel_epi64((__m128i *)(d + 7 * stride), s[7]);
}
#endif // VPX_DSP_X86_MEM_SSE2_H_
......@@ -11,7 +11,86 @@
#ifndef VPX_DSP_X86_TRANSPOSE_SSE2_H_
#define VPX_DSP_X86_TRANSPOSE_SSE2_H_
#include "./vpx_dsp_rtcd.h"
#include <emmintrin.h> // SSE2
#include "./vpx_config.h"
static INLINE __m128i transpose_8bit_4x4(const __m128i *const in) {
// Unpack 16 bit elements. Goes from:
// in[0]: 00 01 02 03
// in[1]: 10 11 12 13
// in[2]: 20 21 22 23
// in[3]: 30 31 32 33
// to:
// a0: 00 10 01 11 02 12 03 13
// a1: 20 30 21 31 22 32 23 33
const __m128i a0 = _mm_unpacklo_epi8(in[0], in[1]);
const __m128i a1 = _mm_unpacklo_epi8(in[2], in[3]);
// Unpack 32 bit elements resulting in:
// 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
return _mm_unpacklo_epi16(a0, a1);
}
static INLINE void transpose_8bit_8x8(const __m128i *const in,
__m128i *const out) {
// Unpack 8 bit elements. Goes from:
// in[0]: 00 01 02 03 04 05 06 07
// in[1]: 10 11 12 13 14 15 16 17
// in[2]: 20 21 22 23 24 25 26 27
// in[3]: 30 31 32 33 34 35 36 37
// in[4]: 40 41 42 43 44 45 46 47
// in[5]: 50 51 52 53 54 55 56 57
// in[6]: 60 61 62 63 64 65 66 67
// in[7]: 70 71 72 73 74 75 76 77
// to:
// a0: 00 10 01 11 02 12 03 13 04 14 05 15 06 16 07 17
// a1: 20 30 21 31 22 32 23 33 24 34 25 35 26 36 27 37
// a2: 40 50 41 51 42 52 43 53 44 54 45 55 46 56 47 57
// a3: 60 70 61 71 62 72 63 73 64 74 65 75 66 76 67 77
const __m128i a0 = _mm_unpacklo_epi8(in[0], in[1]);
const __m128i a1 = _mm_unpacklo_epi8(in[2], in[3]);
const __m128i a2 = _mm_unpacklo_epi8(in[4], in[5]);
const __m128i a3 = _mm_unpacklo_epi8(in[6], in[7]);
// Unpack 16 bit elements resulting in:
// b0: 00 10 20 30 01 11 21 31 02 12 22 32 03 13 23 33
// b1: 40 50 60 70 41 51 61 71 42 52 62 72 43 53 63 73
// b2: 04 14 24 34 05 15 25 35 06 16 26 36 07 17 27 37
// b3: 44 54 64 74 45 55 65 75 46 56 66 76 47 57 67 77
const __m128i b0 = _mm_unpacklo_epi16(a0, a1);
const __m128i b1 = _mm_unpackhi_epi16(a0, a1);
const __m128i b2 = _mm_unpacklo_epi16(a2, a3);
const __m128i b3 = _mm_unpackhi_epi16(a2, a3);
// Unpack 32 bit elements resulting in:
// c0: 00 10 20 30 40 50 60 70 01 11 21 31 41 51 61 71
// c1: 02 12 22 32 42 52 62 72 03 13 23 33 43 53 63 73
// c2: 04 14 24 34 44 54 64 74 05 15 25 35 45 55 65 75
// c3: 06 16 26 36 46 56 66 76 07 17 27 37 47 57 67 77
const __m128i c0 = _mm_unpacklo_epi32(b0, b2);
const __m128i c1 = _mm_unpackhi_epi32(b0, b2);
const __m128i c2 = _mm_unpacklo_epi32(b1, b3);
const __m128i c3 = _mm_unpackhi_epi32(b1, b3);
// Unpack 64 bit elements resulting in:
// out[0]: 00 10 20 30 40 50 60 70
// out[1]: 01 11 21 31 41 51 61 71
// out[2]: 02 12 22 32 42 52 62 72
// out[3]: 03 13 23 33 43 53 63 73
// out[4]: 04 14 24 34 44 54 64 74
// out[5]: 05 15 25 35 45 55 65 75
// out[6]: 06 16 26 36 46 56 66 76
// out[7]: 07 17 27 37 47 57 67 77
out[0] = _mm_unpacklo_epi64(c0, c0);
out[1] = _mm_unpackhi_epi64(c0, c0);
out[2] = _mm_unpacklo_epi64(c1, c1);
out[3] = _mm_unpackhi_epi64(c1, c1);
out[4] = _mm_unpacklo_epi64(c2, c2);
out[5] = _mm_unpackhi_epi64(c2, c2);
out[6] = _mm_unpacklo_epi64(c3, c3);
out[7] = _mm_unpackhi_epi64(c3, c3);
}
static INLINE void transpose_16bit_4x4(const __m128i *const in,
__m128i *const out) {
......
This diff is collapsed.
......@@ -327,12 +327,12 @@ cglobal filter_block1d16_%1, 6, 6, 14, LOCAL_VARS_SIZE, \
%endm
INIT_XMM ssse3
SUBPIX_HFILTER16 h8
SUBPIX_HFILTER16 h8_avg
SUBPIX_HFILTER8 h8
SUBPIX_HFILTER8 h8_avg
SUBPIX_HFILTER4 h8
SUBPIX_HFILTER4 h8_avg
SUBPIX_HFILTER16 h8 ; vpx_filter_block1d16_h8_ssse3
SUBPIX_HFILTER16 h8_avg ; vpx_filter_block1d16_h8_avg_ssse3
SUBPIX_HFILTER8 h8 ; vpx_filter_block1d8_h8_ssse3
SUBPIX_HFILTER8 h8_avg ; vpx_filter_block1d8_h8_avg_ssse3
SUBPIX_HFILTER4 h8 ; vpx_filter_block1d4_h8_ssse3
SUBPIX_HFILTER4 h8_avg ; vpx_filter_block1d4_h8_avg_ssse3
;-------------------------------------------------------------------------------
......@@ -795,9 +795,9 @@ cglobal filter_block1d16_%1, 6, NUM_GENERAL_REG_USED, 16, LOCAL_VARS_SIZE, \
%endm
INIT_XMM ssse3
SUBPIX_VFILTER16 v8
SUBPIX_VFILTER16 v8_avg
SUBPIX_VFILTER v8, 8
SUBPIX_VFILTER v8_avg, 8
SUBPIX_VFILTER v8, 4
SUBPIX_VFILTER v8_avg, 4
SUBPIX_VFILTER16 v8 ; vpx_filter_block1d16_v8_ssse3
SUBPIX_VFILTER16 v8_avg ; vpx_filter_block1d16_v8_avg_ssse3
SUBPIX_VFILTER v8, 8 ; vpx_filter_block1d8_v8_ssse3
SUBPIX_VFILTER v8_avg, 8 ; vpx_filter_block1d8_v8_avg_ssse3
SUBPIX_VFILTER v8, 4 ; vpx_filter_block1d4_v8_ssse3
SUBPIX_VFILTER v8_avg, 4 ; vpx_filter_block1d4_v8_avg_ssse3
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment