Commit f1b09c04 authored by Parag Salasakar's avatar Parag Salasakar Committed by Gerrit Code Review
Browse files

Merge "mips msa vp9 convolve8 avg horiz optimization"

parents 5df6c045 b8c1cdcd
......@@ -1818,7 +1818,7 @@ INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest, ::testing::Values(
#if HAVE_MSA
const ConvolveFunctions convolve8_msa(
vp9_convolve_copy_msa, vp9_convolve_avg_msa,
vp9_convolve8_horiz_msa, vp9_convolve8_avg_horiz_c,
vp9_convolve8_horiz_msa, vp9_convolve8_avg_horiz_msa,
vp9_convolve8_vert_msa, vp9_convolve8_avg_vert_msa,
vp9_convolve8_msa, vp9_convolve8_avg_c, 0);
......
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp9_rtcd.h"
#include "vp9/common/mips/msa/vp9_convolve_msa.h"
static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter) {
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 dst0, dst1, dst2, dst3, res2, res3;
v16u8 mask0, mask1, mask2, mask3;
v8i16 filt, res0, res1;
mask0 = LD_UB(&mc_filt_mask_arr[16]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
LD_SB4(src, src_stride, src0, src1, src2, src3);
XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, res0, res1);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
SRARI_H2_SH(res0, res1, FILTER_BITS);
SAT_SH2_SH(res0, res1, 7);
PCKEV_B2_UB(res0, res0, res1, res1, res2, res3);
ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
XORI_B2_128_UB(res2, res3);
AVER_UB2_UB(res2, dst0, res3, dst2, res2, res3);
ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
}
static void common_hz_8t_and_aver_dst_4x8_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter) {
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 mask0, mask1, mask2, mask3, res0, res1, res2, res3;
v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
v8i16 filt, vec0, vec1, vec2, vec3;
mask0 = LD_UB(&mc_filt_mask_arr[16]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
LD_SB4(src, src_stride, src0, src1, src2, src3);
XORI_B4_128_SB(src0, src1, src2, src3);
src += (4 * src_stride);
LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, vec0, vec1);
LD_SB4(src, src_stride, src0, src1, src2, src3);
XORI_B4_128_SB(src0, src1, src2, src3);
HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
filt0, filt1, filt2, filt3, vec2, vec3);
SRARI_H4_SH(vec0, vec1, vec2, vec3, FILTER_BITS);
SAT_SH4_SH(vec0, vec1, vec2, vec3, 7);
PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2,
res3);
ILVR_D2_UB(res1, res0, res3, res2, res0, res2);
XORI_B2_128_UB(res0, res2);
ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
dst6);
ILVR_D2_UB(dst2, dst0, dst6, dst4, dst0, dst4);
AVER_UB2_UB(res0, dst0, res2, dst4, res0, res2);
ST4x8_UB(res0, res2, dst, dst_stride);
}
static void common_hz_8t_and_aver_dst_4w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
if (4 == height) {
common_hz_8t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
} else if (8 == height) {
common_hz_8t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
}
}
static void common_hz_8t_and_aver_dst_8w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
int32_t loop_cnt;
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
v8i16 filt, out0, out1, out2, out3;
mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = (height >> 2); loop_cnt--;) {
LD_SB4(src, src_stride, src0, src1, src2, src3);
XORI_B4_128_SB(src0, src1, src2, src3);
src += (4 * src_stride);
HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
mask3, filt0, filt1, filt2, filt3, out0, out1,
out2, out3);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
SAT_SH4_SH(out0, out1, out2, out3, 7);
CONVERT_UB_AVG_ST8x4_UB(out0, out1, out2, out3, dst0, dst1, dst2, dst3,
dst, dst_stride);
dst += (4 * dst_stride);
}
}
static void common_hz_8t_and_aver_dst_16w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
int32_t loop_cnt;
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 mask0, mask1, mask2, mask3, dst0, dst1;
v8i16 filt, out0, out1, out2, out3;
v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = height >> 1; loop_cnt--;) {
LD_SB2(src, src_stride, src0, src2);
LD_SB2(src + 8, src_stride, src1, src3);
src += (2 * src_stride);
XORI_B4_128_SB(src0, src1, src2, src3);
VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12);
VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13);
VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
vec14);
VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
vec15);
DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
vec9, vec10, vec11);
DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1,
vec2, vec3);
DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
vec9, vec10, vec11);
ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
out2, out3);
LD_UB2(dst, dst_stride, dst0, dst1);
SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
SAT_SH4_SH(out0, out1, out2, out3, 7);
PCKEV_XORI128_AVG_ST_UB(out1, out0, dst0, dst);
dst += dst_stride;
PCKEV_XORI128_AVG_ST_UB(out3, out2, dst1, dst);
dst += dst_stride;
}
}
static void common_hz_8t_and_aver_dst_32w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
v8i16 filt, out0, out1, out2, out3;
v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = height; loop_cnt--;) {
src0 = LD_SB(src);
src2 = LD_SB(src + 16);
src3 = LD_SB(src + 24);
src1 = __msa_sldi_b(src2, src0, 8);
src += src_stride;
XORI_B4_128_SB(src0, src1, src2, src3);
VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12);
VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9, vec13);
VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
vec14);
VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
vec15);
DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
vec9, vec10, vec11);
DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1,
vec2, vec3);
DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
vec9, vec10, vec11);
ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
out2, out3);
SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
SAT_SH4_SH(out0, out1, out2, out3, 7);
LD_UB2(dst, 16, dst1, dst2);
PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, dst);
PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, dst + 16);
dst += dst_stride;
}
}
static void common_hz_8t_and_aver_dst_64w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
uint32_t loop_cnt, cnt;
v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3;
v16u8 dst1, dst2, mask0, mask1, mask2, mask3;
v8i16 filt, out0, out1, out2, out3;
v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8i16 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
mask0 = LD_UB(&mc_filt_mask_arr[0]);
src -= 3;
/* rearranging filter */
filt = LD_SH(filter);
SPLATI_H4_SB(filt, 0, 1, 2, 3, filt0, filt1, filt2, filt3);
mask1 = mask0 + 2;
mask2 = mask0 + 4;
mask3 = mask0 + 6;
for (loop_cnt = height; loop_cnt--;) {
for (cnt = 0; cnt < 2; ++cnt) {
src0 = LD_SB(&src[cnt << 5]);
src2 = LD_SB(&src[16 + (cnt << 5)]);
src3 = LD_SB(&src[24 + (cnt << 5)]);
src1 = __msa_sldi_b(src2, src0, 8);
XORI_B4_128_SB(src0, src1, src2, src3);
VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8,
vec12);
VSHF_B4_SH(src1, src1, mask0, mask1, mask2, mask3, vec1, vec5, vec9,
vec13);
VSHF_B4_SH(src2, src2, mask0, mask1, mask2, mask3, vec2, vec6, vec10,
vec14);
VSHF_B4_SH(src3, src3, mask0, mask1, mask2, mask3, vec3, vec7, vec11,
vec15);
DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0,
vec1, vec2, vec3);
DOTP_SB4_SH(vec8, vec9, vec10, vec11, filt2, filt2, filt2, filt2, vec8,
vec9, vec10, vec11);
DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0,
vec1, vec2, vec3);
DPADD_SB4_SH(vec12, vec13, vec14, vec15, filt3, filt3, filt3, filt3, vec8,
vec9, vec10, vec11);
ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1,
out2, out3);
SRARI_H4_SH(out0, out1, out2, out3, FILTER_BITS);
SAT_SH4_SH(out0, out1, out2, out3, 7);
LD_UB2(&dst[cnt << 5], 16, dst1, dst2);
PCKEV_XORI128_AVG_ST_UB(out1, out0, dst1, &dst[cnt << 5]);
PCKEV_XORI128_AVG_ST_UB(out3, out2, dst2, &dst[16 + (cnt << 5)]);
}
src += src_stride;
dst += dst_stride;
}
}
static void common_hz_2t_and_aver_dst_4x4_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter) {
v16i8 src0, src1, src2, src3, mask;
v16u8 filt0, dst0, dst1, dst2, dst3, vec0, vec1, res0, res1;
v8u16 vec2, vec3, const255, filt;
mask = LD_SB(&mc_filt_mask_arr[16]);
/* rearranging filter */
filt = LD_UH(filter);
filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
const255 = (v8u16)__msa_ldi_h(255);
LD_SB4(src, src_stride, src0, src1, src2, src3);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3);
SRARI_H2_UH(vec2, vec3, FILTER_BITS);
MIN_UH2_UH(vec2, vec3, const255);
PCKEV_B2_UB(vec2, vec2, vec3, vec3, res0, res1);
ILVR_W2_UB(dst1, dst0, dst3, dst2, dst0, dst2);
AVER_UB2_UB(res0, dst0, res1, dst2, res0, res1);
ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
}
static void common_hz_2t_and_aver_dst_4x8_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter) {
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
v16u8 filt0, vec0, vec1, vec2, vec3, res0, res1, res2, res3;
v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
v8u16 vec4, vec5, vec6, vec7, const255, filt;
mask = LD_SB(&mc_filt_mask_arr[16]);
/* rearranging filter */
filt = LD_UH(filter);
filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
const255 = (v8u16)__msa_ldi_h(255);
LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7);
LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7);
VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1);
VSHF_B2_UB(src4, src5, src6, src7, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5,
vec6, vec7);
SRARI_H4_UH(vec4, vec5, vec6, vec7, FILTER_BITS);
MIN_UH4_UH(vec4, vec5, vec6, vec7, const255);
PCKEV_B4_UB(vec4, vec4, vec5, vec5, vec6, vec6, vec7, vec7, res0, res1, res2,
res3);
ILVR_W4_UB(dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6, dst0, dst2, dst4,
dst6);
AVER_UB4_UB(res0, dst0, res1, dst2, res2, dst4, res3, dst6, res0, res1, res2,
res3);
ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, dst_stride);
dst += (4 * dst_stride);
ST4x4_UB(res2, res3, 0, 1, 0, 1, dst, dst_stride);
}
static void common_hz_2t_and_aver_dst_4w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
if (4 == height) {
common_hz_2t_and_aver_dst_4x4_msa(src, src_stride, dst, dst_stride, filter);
} else if (8 == height) {
common_hz_2t_and_aver_dst_4x8_msa(src, src_stride, dst, dst_stride, filter);
}
}
static void common_hz_2t_and_aver_dst_8x4_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter) {
v16i8 src0, src1, src2, src3, mask;
v16u8 filt0, dst0, dst1, dst2, dst3;
v8u16 vec0, vec1, vec2, vec3, const255, filt;
mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
filt = LD_UH(filter);
filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
const255 = (v8u16)__msa_ldi_h(255);
LD_SB4(src, src_stride, src0, src1, src2, src3);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
dst, dst_stride);
}
static void common_hz_2t_and_aver_dst_8x8mult_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
v16i8 src0, src1, src2, src3, mask;
v16u8 filt0, dst0, dst1, dst2, dst3;
v8u16 vec0, vec1, vec2, vec3, const255, filt;
mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
filt = LD_UH(filter);
filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
const255 = (v8u16)__msa_ldi_h(255);
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
dst, dst_stride);
dst += (4 * dst_stride);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
dst, dst_stride);
dst += (4 * dst_stride);
if (16 == height) {
LD_SB4(src, src_stride, src0, src1, src2, src3);
src += (4 * src_stride);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
LD_SB4(src, src_stride, src0, src1, src2, src3);
PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
dst, dst_stride);
dst += (4 * dst_stride);
VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1,
vec2, vec3);
SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(vec0, vec1, vec2, vec3, const255);
PCKEV_AVG_ST8x4_UB(vec0, dst0, vec1, dst1, vec2, dst2, vec3, dst3,
dst, dst_stride);
}
}
static void common_hz_2t_and_aver_dst_8w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
if (4 == height) {
common_hz_2t_and_aver_dst_8x4_msa(src, src_stride, dst, dst_stride, filter);
} else {
common_hz_2t_and_aver_dst_8x8mult_msa(src, src_stride, dst, dst_stride,
filter, height);
}
}
static void common_hz_2t_and_aver_dst_16w_msa(const uint8_t *src,
int32_t src_stride,
uint8_t *dst,
int32_t dst_stride,
int8_t *filter,
int32_t height) {
uint32_t loop_cnt;
v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask;
v16u8 filt0, dst0, dst1, dst2, dst3;
v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
v8u16 res0, res1, res2, res3, res4, res5, res6, res7, const255, filt;
mask = LD_SB(&mc_filt_mask_arr[0]);
/* rearranging filter */
filt = LD_UH(filter);
filt0 = (v16u8)__msa_splati_h((v8i16)filt, 0);
const255 = (v8u16)__msa_ldi_h(255);
LD_SB4(src, src_stride, src0, src2, src4, src6);
LD_SB4(src + 8, src_stride, src1, src3, src5, src7);
src += (4 * src_stride);
VSHF_B2_UB(src0, src0, src1, src1, mask, mask, vec0, vec1);
VSHF_B2_UB(src2, src2, src3, src3, mask, mask, vec2, vec3);
VSHF_B2_UB(src4, src4, src5, src5, mask, mask, vec4, vec5);
VSHF_B2_UB(src6, src6, src7, src7, mask, mask, vec6, vec7);
DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, res0, res1,
res2, res3);
DOTP_UB4_UH(vec4, vec5, vec6, vec7, filt0, filt0, filt0, filt0, res4, res5,
res6, res7);
SRARI_H4_UH(res0, res1, res2, res3, FILTER_BITS);
SRARI_H4_UH(res4, res5, res6, res7, FILTER_BITS);
LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
MIN_UH4_UH(res0, res1, res2, res3, const255);
MIN_UH4_UH(res4, res5, res6, res7, const255);
PCKEV_AVG_ST_UB(res1, res0, dst0, dst);
dst += dst_stride;
PCKEV_AVG_ST_UB(res3, res2, dst1, dst);
dst += dst_stride;<