Commit 3e702502 authored by James Zern's avatar James Zern Committed by Gerrit Code Review
Browse files

Merge "Add vpx_highbd_idct16x16_10_add_neon()"

parents c7342f35 06200817
......@@ -486,6 +486,15 @@ const PartialInvTxfmParam neon_partial_idct_tests[] = {
make_tuple(
&vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_38_add_neon>, TX_16X16, 38, 12, 2),
make_tuple(
&vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_10_add_neon>, TX_16X16, 10, 8, 2),
make_tuple(
&vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_10_add_neon>, TX_16X16, 10, 10, 2),
make_tuple(
&vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_10_add_neon>, TX_16X16, 10, 12, 2),
make_tuple(
&vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
&highbd_wrapper<vpx_highbd_idct16x16_1_add_neon>, TX_16X16, 1, 8, 2),
......
......@@ -33,6 +33,19 @@ static INLINE void highbd_idct16x16_add_wrap_low_8x2(const int64x2x2_t *const t,
d1->val[1] = vcombine_s32(t32[3].val[0], t32[3].val[1]);
}
static INLINE void highbd_idct16x16_add_wrap_low_4x2(const int64x2x2_t *const t,
int32x4_t *const d0,
int32x4_t *const d1) {
int32x2x2_t t32[2];
t32[0].val[0] = vrshrn_n_s64(t[0].val[0], DCT_CONST_BITS);
t32[0].val[1] = vrshrn_n_s64(t[0].val[1], DCT_CONST_BITS);
t32[1].val[0] = vrshrn_n_s64(t[1].val[0], DCT_CONST_BITS);
t32[1].val[1] = vrshrn_n_s64(t[1].val[1], DCT_CONST_BITS);
*d0 = vcombine_s32(t32[0].val[0], t32[0].val[1]);
*d1 = vcombine_s32(t32[1].val[0], t32[1].val[1]);
}
static INLINE int32x4x2_t
highbd_idct16x16_add_wrap_low_8x1(const int64x2x2_t *const t) {
int32x2x2_t t32[2];
......@@ -47,6 +60,14 @@ highbd_idct16x16_add_wrap_low_8x1(const int64x2x2_t *const t) {
return d;
}
static INLINE int32x4_t highbd_idct16x16_add_wrap_low_4x1(const int64x2x2_t t) {
int32x2x2_t t32;
t32.val[0] = vrshrn_n_s64(t.val[0], DCT_CONST_BITS);
t32.val[1] = vrshrn_n_s64(t.val[1], DCT_CONST_BITS);
return vcombine_s32(t32.val[0], t32.val[1]);
}
static INLINE void highbd_idct_cospi_2_30(const int32x4x2_t s0,
const int32x4x2_t s1,
const int32x4_t cospi_2_30_10_22,
......@@ -336,6 +357,27 @@ static INLINE void highbd_idct_cospi_8_24_q_kernel(
vget_low_s32(cospi_0_8_16_24), 1);
}
static INLINE void highbd_idct_cospi_8_24_d_kernel(
const int32x4_t s0, const int32x4_t s1, const int32x4_t cospi_0_8_16_24,
int64x2x2_t *const t) {
t[0].val[0] =
vmull_lane_s32(vget_low_s32(s0), vget_high_s32(cospi_0_8_16_24), 1);
t[0].val[1] =
vmull_lane_s32(vget_high_s32(s0), vget_high_s32(cospi_0_8_16_24), 1);
t[1].val[0] =
vmull_lane_s32(vget_low_s32(s1), vget_high_s32(cospi_0_8_16_24), 1);
t[1].val[1] =
vmull_lane_s32(vget_high_s32(s1), vget_high_s32(cospi_0_8_16_24), 1);
t[0].val[0] = vmlsl_lane_s32(t[0].val[0], vget_low_s32(s1),
vget_low_s32(cospi_0_8_16_24), 1);
t[0].val[1] = vmlsl_lane_s32(t[0].val[1], vget_high_s32(s1),
vget_low_s32(cospi_0_8_16_24), 1);
t[1].val[0] = vmlal_lane_s32(t[1].val[0], vget_low_s32(s0),
vget_low_s32(cospi_0_8_16_24), 1);
t[1].val[1] = vmlal_lane_s32(t[1].val[1], vget_high_s32(s0),
vget_low_s32(cospi_0_8_16_24), 1);
}
static INLINE void highbd_idct_cospi_8_24_q(const int32x4x2_t s0,
const int32x4x2_t s1,
const int32x4_t cospi_0_8_16_24,
......@@ -347,6 +389,17 @@ static INLINE void highbd_idct_cospi_8_24_q(const int32x4x2_t s0,
highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
}
static INLINE void highbd_idct_cospi_8_24_d(const int32x4_t s0,
const int32x4_t s1,
const int32x4_t cospi_0_8_16_24,
int32x4_t *const d0,
int32x4_t *const d1) {
int64x2x2_t t[2];
highbd_idct_cospi_8_24_d_kernel(s0, s1, cospi_0_8_16_24, t);
highbd_idct16x16_add_wrap_low_4x2(t, d0, d1);
}
static INLINE void highbd_idct_cospi_8_24_neg_q(const int32x4x2_t s0,
const int32x4x2_t s1,
const int32x4_t cospi_0_8_16_24,
......@@ -362,6 +415,19 @@ static INLINE void highbd_idct_cospi_8_24_neg_q(const int32x4x2_t s0,
highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
}
static INLINE void highbd_idct_cospi_8_24_neg_d(const int32x4_t s0,
const int32x4_t s1,
const int32x4_t cospi_0_8_16_24,
int32x4_t *const d0,
int32x4_t *const d1) {
int64x2x2_t t[2];
highbd_idct_cospi_8_24_d_kernel(s0, s1, cospi_0_8_16_24, t);
t[1].val[0] = vsubq_s64(vdupq_n_s64(0), t[1].val[0]);
t[1].val[1] = vsubq_s64(vdupq_n_s64(0), t[1].val[1]);
highbd_idct16x16_add_wrap_low_4x2(t, d0, d1);
}
static INLINE void highbd_idct_cospi_16_16_q(const int32x4x2_t s0,
const int32x4x2_t s1,
const int32x4_t cospi_0_8_16_24,
......@@ -396,8 +462,30 @@ static INLINE void highbd_idct_cospi_16_16_q(const int32x4x2_t s0,
highbd_idct16x16_add_wrap_low_8x2(t, d0, d1);
}
static INLINE void highbd_idct16x16_add_stage7(const int32x4x2_t *const step2,
int32x4x2_t *const out) {
static INLINE void highbd_idct_cospi_16_16_d(const int32x4_t s0,
const int32x4_t s1,
const int32x4_t cospi_0_8_16_24,
int32x4_t *const d0,
int32x4_t *const d1) {
int64x2x2_t t[3];
t[2].val[0] =
vmull_lane_s32(vget_low_s32(s1), vget_high_s32(cospi_0_8_16_24), 0);
t[2].val[1] =
vmull_lane_s32(vget_high_s32(s1), vget_high_s32(cospi_0_8_16_24), 0);
t[0].val[0] = vmlsl_lane_s32(t[2].val[0], vget_low_s32(s0),
vget_high_s32(cospi_0_8_16_24), 0);
t[0].val[1] = vmlsl_lane_s32(t[2].val[1], vget_high_s32(s0),
vget_high_s32(cospi_0_8_16_24), 0);
t[1].val[0] = vmlal_lane_s32(t[2].val[0], vget_low_s32(s0),
vget_high_s32(cospi_0_8_16_24), 0);
t[1].val[1] = vmlal_lane_s32(t[2].val[1], vget_high_s32(s0),
vget_high_s32(cospi_0_8_16_24), 0);
highbd_idct16x16_add_wrap_low_4x2(t, d0, d1);
}
static INLINE void highbd_idct16x16_add_stage7_dual(
const int32x4x2_t *const step2, int32x4x2_t *const out) {
out[0].val[0] = vaddq_s32(step2[0].val[0], step2[15].val[0]);
out[0].val[1] = vaddq_s32(step2[0].val[1], step2[15].val[1]);
out[1].val[0] = vaddq_s32(step2[1].val[0], step2[14].val[0]);
......@@ -432,6 +520,26 @@ static INLINE void highbd_idct16x16_add_stage7(const int32x4x2_t *const step2,
out[15].val[1] = vsubq_s32(step2[0].val[1], step2[15].val[1]);
}
static INLINE void highbd_idct16x16_add_stage7(const int32x4_t *const step2,
int32x4_t *const out) {
out[0] = vaddq_s32(step2[0], step2[15]);
out[1] = vaddq_s32(step2[1], step2[14]);
out[2] = vaddq_s32(step2[2], step2[13]);
out[3] = vaddq_s32(step2[3], step2[12]);
out[4] = vaddq_s32(step2[4], step2[11]);
out[5] = vaddq_s32(step2[5], step2[10]);
out[6] = vaddq_s32(step2[6], step2[9]);
out[7] = vaddq_s32(step2[7], step2[8]);
out[8] = vsubq_s32(step2[7], step2[8]);
out[9] = vsubq_s32(step2[6], step2[9]);
out[10] = vsubq_s32(step2[5], step2[10]);
out[11] = vsubq_s32(step2[4], step2[11]);
out[12] = vsubq_s32(step2[3], step2[12]);
out[13] = vsubq_s32(step2[2], step2[13]);
out[14] = vsubq_s32(step2[1], step2[14]);
out[15] = vsubq_s32(step2[0], step2[15]);
}
static INLINE void highbd_idct16x16_store_pass1(const int32x4x2_t *const out,
int32_t *output) {
// Save the result into output
......@@ -745,7 +853,7 @@ static void highbd_idct16x16_256_add_half1d(const int32_t *input,
step2[15] = step1[15];
// stage 7
highbd_idct16x16_add_stage7(step2, out);
highbd_idct16x16_add_stage7_dual(step2, out);
if (output) {
highbd_idct16x16_store_pass1(out, output);
......@@ -765,6 +873,15 @@ static INLINE int32x4x2_t highbd_idct_cospi_lane0_dual(const int32x4x2_t s,
return highbd_idct16x16_add_wrap_low_8x1(t);
}
static INLINE int32x4_t highbd_idct_cospi_lane0(const int32x4_t s,
const int32x2_t coef) {
int64x2x2_t t;
t.val[0] = vmull_lane_s32(vget_low_s32(s), coef, 0);
t.val[1] = vmull_lane_s32(vget_high_s32(s), coef, 0);
return highbd_idct16x16_add_wrap_low_4x1(t);
}
static INLINE int32x4x2_t highbd_idct_cospi_lane1_dual(const int32x4x2_t s,
const int32x2_t coef) {
int64x2x2_t t[2];
......@@ -776,6 +893,15 @@ static INLINE int32x4x2_t highbd_idct_cospi_lane1_dual(const int32x4x2_t s,
return highbd_idct16x16_add_wrap_low_8x1(t);
}
static INLINE int32x4_t highbd_idct_cospi_lane1(const int32x4_t s,
const int32x2_t coef) {
int64x2x2_t t;
t.val[0] = vmull_lane_s32(vget_low_s32(s), coef, 1);
t.val[1] = vmull_lane_s32(vget_high_s32(s), coef, 1);
return highbd_idct16x16_add_wrap_low_4x1(t);
}
static INLINE int32x4x2_t highbd_idct_add_dual(const int32x4x2_t s0,
const int32x4x2_t s1) {
int32x4x2_t t;
......@@ -938,9 +1064,274 @@ static void highbd_idct16x16_38_add_half1d(const int32_t *input,
step2[14] = step1[14];
step2[15] = step1[15];
// stage 7
highbd_idct16x16_add_stage7_dual(step2, out);
if (output) {
highbd_idct16x16_store_pass1(out, output);
} else {
highbd_idct16x16_add_store(out, dest, stride, bd);
}
}
void highbd_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
int32_t *output) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
const int32x4_t cospi_6_26_14_18N = vld1q_s32(kCospi32 + 12);
int32x4_t in[4], step1[16], step2[16], out[16];
// Load input (4x4)
in[0] = vld1q_s32(input);
input += 16;
in[1] = vld1q_s32(input);
input += 16;
in[2] = vld1q_s32(input);
input += 16;
in[3] = vld1q_s32(input);
// Transpose
transpose_s32_4x4(&in[0], &in[1], &in[2], &in[3]);
// stage 1
step1[0] = in[0 / 2];
step1[4] = in[4 / 2];
step1[8] = in[2 / 2];
step1[12] = in[6 / 2];
// stage 2
step2[0] = step1[0];
step2[4] = step1[4];
step2[8] = highbd_idct_cospi_lane1(step1[8], vget_low_s32(cospi_2_30_10_22));
step2[11] =
highbd_idct_cospi_lane1(step1[12], vget_low_s32(cospi_6_26_14_18N));
step2[12] =
highbd_idct_cospi_lane0(step1[12], vget_low_s32(cospi_6_26_14_18N));
step2[15] = highbd_idct_cospi_lane0(step1[8], vget_low_s32(cospi_2_30_10_22));
// stage 3
step1[0] = step2[0];
step1[4] =
highbd_idct_cospi_lane1(step2[4], vget_high_s32(cospi_4_12_20N_28));
step1[7] = highbd_idct_cospi_lane0(step2[4], vget_low_s32(cospi_4_12_20N_28));
step1[8] = step2[8];
step1[9] = step2[8];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
step1[14] = step2[15];
step1[15] = step2[15];
// stage 4
step2[0] = step2[1] =
highbd_idct_cospi_lane0(step1[0], vget_high_s32(cospi_0_8_16_24));
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
step2[7] = step1[7];
step2[8] = step1[8];
highbd_idct_cospi_8_24_d(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
&step2[14]);
highbd_idct_cospi_8_24_neg_d(step1[13], step1[10], cospi_0_8_16_24,
&step2[13], &step2[10]);
step2[11] = step1[11];
step2[12] = step1[12];
step2[15] = step1[15];
// stage 5
step1[0] = step2[0];
step1[1] = step2[1];
step1[2] = step2[1];
step1[3] = step2[0];
step1[4] = step2[4];
highbd_idct_cospi_16_16_d(step2[5], step2[6], cospi_0_8_16_24, &step1[5],
&step1[6]);
step1[7] = step2[7];
step1[8] = vaddq_s32(step2[8], step2[11]);
step1[9] = vaddq_s32(step2[9], step2[10]);
step1[10] = vsubq_s32(step2[9], step2[10]);
step1[11] = vsubq_s32(step2[8], step2[11]);
step1[12] = vsubq_s32(step2[15], step2[12]);
step1[13] = vsubq_s32(step2[14], step2[13]);
step1[14] = vaddq_s32(step2[14], step2[13]);
step1[15] = vaddq_s32(step2[15], step2[12]);
// stage 6
step2[0] = vaddq_s32(step1[0], step1[7]);
step2[1] = vaddq_s32(step1[1], step1[6]);
step2[2] = vaddq_s32(step1[2], step1[5]);
step2[3] = vaddq_s32(step1[3], step1[4]);
step2[4] = vsubq_s32(step1[3], step1[4]);
step2[5] = vsubq_s32(step1[2], step1[5]);
step2[6] = vsubq_s32(step1[1], step1[6]);
step2[7] = vsubq_s32(step1[0], step1[7]);
highbd_idct_cospi_16_16_d(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
&step2[13]);
highbd_idct_cospi_16_16_d(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
&step2[12]);
step2[8] = step1[8];
step2[9] = step1[9];
step2[14] = step1[14];
step2[15] = step1[15];
// stage 7
highbd_idct16x16_add_stage7(step2, out);
// pass 1: save the result into output
vst1q_s32(output, out[0]);
output += 4;
vst1q_s32(output, out[1]);
output += 4;
vst1q_s32(output, out[2]);
output += 4;
vst1q_s32(output, out[3]);
output += 4;
vst1q_s32(output, out[4]);
output += 4;
vst1q_s32(output, out[5]);
output += 4;
vst1q_s32(output, out[6]);
output += 4;
vst1q_s32(output, out[7]);
output += 4;
vst1q_s32(output, out[8]);
output += 4;
vst1q_s32(output, out[9]);
output += 4;
vst1q_s32(output, out[10]);
output += 4;
vst1q_s32(output, out[11]);
output += 4;
vst1q_s32(output, out[12]);
output += 4;
vst1q_s32(output, out[13]);
output += 4;
vst1q_s32(output, out[14]);
output += 4;
vst1q_s32(output, out[15]);
}
void highbd_idct16x16_10_add_half1d_pass2(const int32_t *input,
int32_t *const output,
uint16_t *const dest,
const int stride, const int bd) {
const int32x4_t cospi_0_8_16_24 = vld1q_s32(kCospi32 + 0);
const int32x4_t cospi_4_12_20N_28 = vld1q_s32(kCospi32 + 4);
const int32x4_t cospi_2_30_10_22 = vld1q_s32(kCospi32 + 8);
const int32x4_t cospi_6_26_14_18N = vld1q_s32(kCospi32 + 12);
int32x4x2_t in[4], step1[16], step2[16], out[16];
// Load input (4x8)
in[0].val[0] = vld1q_s32(input);
input += 4;
in[0].val[1] = vld1q_s32(input);
input += 4;
in[1].val[0] = vld1q_s32(input);
input += 4;
in[1].val[1] = vld1q_s32(input);
input += 4;
in[2].val[0] = vld1q_s32(input);
input += 4;
in[2].val[1] = vld1q_s32(input);
input += 4;
in[3].val[0] = vld1q_s32(input);
input += 4;
in[3].val[1] = vld1q_s32(input);
// Transpose
transpose_s32_4x8(&in[0].val[0], &in[0].val[1], &in[1].val[0], &in[1].val[1],
&in[2].val[0], &in[2].val[1], &in[3].val[0], &in[3].val[1]);
// stage 1
step1[0] = in[0 / 2];
step1[4] = in[4 / 2];
step1[8] = in[2 / 2];
step1[12] = in[6 / 2];
// stage 2
step2[0] = step1[0];
step2[4] = step1[4];
step2[8] =
highbd_idct_cospi_lane1_dual(step1[8], vget_low_s32(cospi_2_30_10_22));
step2[11] =
highbd_idct_cospi_lane1_dual(step1[12], vget_low_s32(cospi_6_26_14_18N));
step2[12] =
highbd_idct_cospi_lane0_dual(step1[12], vget_low_s32(cospi_6_26_14_18N));
step2[15] =
highbd_idct_cospi_lane0_dual(step1[8], vget_low_s32(cospi_2_30_10_22));
// stage 3
step1[0] = step2[0];
step1[4] =
highbd_idct_cospi_lane1_dual(step2[4], vget_high_s32(cospi_4_12_20N_28));
step1[7] =
highbd_idct_cospi_lane0_dual(step2[4], vget_low_s32(cospi_4_12_20N_28));
step1[8] = step2[8];
step1[9] = step2[8];
step1[10] = step2[11];
step1[11] = step2[11];
step1[12] = step2[12];
step1[13] = step2[12];
step1[14] = step2[15];
step1[15] = step2[15];
// stage 4
step2[0] = step2[1] =
highbd_idct_cospi_lane0_dual(step1[0], vget_high_s32(cospi_0_8_16_24));
step2[4] = step1[4];
step2[5] = step1[4];
step2[6] = step1[7];
step2[7] = step1[7];
step2[8] = step1[8];
highbd_idct_cospi_8_24_q(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
&step2[14]);
highbd_idct_cospi_8_24_neg_q(step1[13], step1[10], cospi_0_8_16_24,
&step2[13], &step2[10]);
step2[11] = step1[11];
step2[12] = step1[12];
step2[15] = step1[15];
// stage 5
step1[0] = step2[0];
step1[1] = step2[1];
step1[2] = step2[1];
step1[3] = step2[0];
step1[4] = step2[4];
highbd_idct_cospi_16_16_q(step2[5], step2[6], cospi_0_8_16_24, &step1[5],
&step1[6]);
step1[7] = step2[7];
step1[8] = highbd_idct_add_dual(step2[8], step2[11]);
step1[9] = highbd_idct_add_dual(step2[9], step2[10]);
step1[10] = highbd_idct_sub_dual(step2[9], step2[10]);
step1[11] = highbd_idct_sub_dual(step2[8], step2[11]);
step1[12] = highbd_idct_sub_dual(step2[15], step2[12]);
step1[13] = highbd_idct_sub_dual(step2[14], step2[13]);
step1[14] = highbd_idct_add_dual(step2[14], step2[13]);
step1[15] = highbd_idct_add_dual(step2[15], step2[12]);
// stage 6
step2[0] = highbd_idct_add_dual(step1[0], step1[7]);
step2[1] = highbd_idct_add_dual(step1[1], step1[6]);
step2[2] = highbd_idct_add_dual(step1[2], step1[5]);
step2[3] = highbd_idct_add_dual(step1[3], step1[4]);
step2[4] = highbd_idct_sub_dual(step1[3], step1[4]);
step2[5] = highbd_idct_sub_dual(step1[2], step1[5]);
step2[6] = highbd_idct_sub_dual(step1[1], step1[6]);
step2[7] = highbd_idct_sub_dual(step1[0], step1[7]);
highbd_idct_cospi_16_16_q(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
&step2[13]);
highbd_idct_cospi_16_16_q(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
&step2[12]);
step2[8] = step1[8];
step2[9] = step1[9];
step2[14] = step1[14];
step2[15] = step1[15];
// stage 7
highbd_idct16x16_add_stage7_dual(step2, out);
if (output) {
highbd_idct16x16_store_pass1(out, output);
} else {
......@@ -1026,6 +1417,42 @@ void vpx_highbd_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest8,
}
}
void vpx_highbd_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest8,
int stride, int bd) {
uint16_t *dest = CONVERT_TO_SHORTPTR(dest8);
if (bd == 8) {
int16_t row_idct_output[4 * 16];
// pass 1
// Parallel idct on the upper 8 rows
idct16x16_10_add_half1d_pass1(input, row_idct_output);
// pass 2
// Parallel idct to get the left 8 columns
idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 1);
// Parallel idct to get the right 8 columns
idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8,
stride, 1);
} else {
int32_t row_idct_output[4 * 16];
// pass 1
// Parallel idct on the upper 8 rows
highbd_idct16x16_10_add_half1d_pass1(input, row_idct_output);
// pass 2
// Parallel idct to get the left 8 columns
highbd_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride,
bd);
// Parallel idct to get the right 8 columns
highbd_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL,
dest + 8, stride, bd);
}
}
static INLINE void highbd_idct16x16_1_add_pos_kernel(uint16_t **dest,
const int stride,
const int16x8_t res,
......
......@@ -484,8 +484,7 @@ void idct16x16_38_add_half1d(const void *const input, int16_t *const output,
}
}
static void idct16x16_10_add_half1d_pass1(const tran_low_t *input,
int16_t *output) {
void idct16x16_10_add_half1d_pass1(const tran_low_t *input, int16_t *output) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
......@@ -638,8 +637,9 @@ static void idct16x16_10_add_half1d_pass1(const tran_low_t *input,
vst1_s16(output, out[15]);
}
static void idct16x16_10_add_half1d_pass2(const int16_t *input, int16_t *output,
uint8_t *dest, const int stride) {
void idct16x16_10_add_half1d_pass2(const int16_t *input, int16_t *const output,
void *const dest, const int stride,
const int highbd_flag) {
const int16x8_t cospis0 = vld1q_s16(kCospi);
const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
......@@ -751,27 +751,16 @@ static void idct16x16_10_add_half1d_pass2(const int16_t *input, int16_t *output,
step2[15] = step1[15];
// stage 7
out[0] = vaddq_s16(step2[0], step2[15]);
out[1] = vaddq_s16(step2[1], step2[14]);
out[2] = vaddq_s16(step2[2], step2[13]);
out[3] = vaddq_s16(step2[3], step2[12]);
out[4] = vaddq_s16(step2[4], step2[11]);
out[5] = vaddq_s16(step2[5], step2[10]);
out[6] = vaddq_s16(step2[6], step2[9]);
out[7] = vaddq_s16(step2[7], step2[8]);
out[8] = vsubq_s16(step2[7], step2[8]);
out[9] = vsubq_s16(step2[6], step2[9]);
out[10] = vsubq_s16(step2[5], step2[10]);
out[11] = vsubq_s16(step2[4], step2[11]);
out[12] = vsubq_s16(step2[3], step2[12]);
out[13] = vsubq_s16(step2[2], step2[13]);
out[14] = vsubq_s16(step2[1], step2[14]);
out[15] = vsubq_s16(step2[0], step2[15]);
idct16x16_add_stage7(step2, out);