inv_txfm_ssse3.c 12.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

#include <tmmintrin.h>

#include "./vpx_dsp_rtcd.h"
#include "vpx_dsp/x86/inv_txfm_sse2.h"
15
#include "vpx_dsp/x86/inv_txfm_ssse3.h"
16
#include "vpx_dsp/x86/transpose_sse2.h"
17 18
#include "vpx_dsp/x86/txfm_common_sse2.h"

19 20 21 22 23 24 25 26 27 28
static INLINE void partial_butterfly_ssse3(const __m128i in, const int c0,
                                           const int c1, __m128i *const out0,
                                           __m128i *const out1) {
  const __m128i cst0 = _mm_set1_epi16(2 * c0);
  const __m128i cst1 = _mm_set1_epi16(2 * c1);
  *out0 = _mm_mulhrs_epi16(in, cst0);
  *out1 = _mm_mulhrs_epi16(in, cst1);
}

static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
29
  const __m128i coef_pair = _mm_set1_epi16(2 * cospi_16_64);
30 31 32
  return _mm_mulhrs_epi16(in, coef_pair);
}

33 34
void vpx_idct8x8_12_add_ssse3(const tran_low_t *input, uint8_t *dest,
                              int stride) {
35
  __m128i io[8];
36

37 38 39 40
  io[0] = load_input_data4(input + 0 * 8);
  io[1] = load_input_data4(input + 1 * 8);
  io[2] = load_input_data4(input + 2 * 8);
  io[3] = load_input_data4(input + 3 * 8);
41

42 43
  idct8x8_12_add_kernel_ssse3(io);
  write_buffer_8x8(io, dest, stride);
44
}
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59
// Group the coefficient calculation into smaller functions to prevent stack
// spillover in 32x32 idct optimizations:
// quarter_1: 0-7
// quarter_2: 8-15
// quarter_3_4: 16-23, 24-31

// For each 8x32 block __m128i in[32],
// Input with index, 0, 4
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_1(const __m128i *const in /*in[32]*/,
                                            __m128i *const out /*out[8]*/) {
  __m128i step1[8], step2[8];

  // stage 3
60
  partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
61 62 63 64 65 66 67 68 69 70 71 72 73 74

  // stage 4
  step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
  step2[4] = step1[4];
  step2[5] = step1[4];
  step2[6] = step1[7];
  step2[7] = step1[7];

  // stage 5
  step1[0] = step2[0];
  step1[1] = step2[0];
  step1[2] = step2[0];
  step1[3] = step2[0];
  step1[4] = step2[4];
75
  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
  step1[7] = step2[7];

  // stage 6
  out[0] = _mm_add_epi16(step1[0], step1[7]);
  out[1] = _mm_add_epi16(step1[1], step1[6]);
  out[2] = _mm_add_epi16(step1[2], step1[5]);
  out[3] = _mm_add_epi16(step1[3], step1[4]);
  out[4] = _mm_sub_epi16(step1[3], step1[4]);
  out[5] = _mm_sub_epi16(step1[2], step1[5]);
  out[6] = _mm_sub_epi16(step1[1], step1[6]);
  out[7] = _mm_sub_epi16(step1[0], step1[7]);
}

// For each 8x32 block __m128i in[32],
// Input with index, 2, 6
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_2(const __m128i *const in /*in[32]*/,
                                            __m128i *const out /*out[16]*/) {
  __m128i step1[16], step2[16];

  // stage 2
97
  partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
98
                          &step2[15]);
99
  partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
                          &step2[12]);

  // stage 3
  step1[8] = step2[8];
  step1[9] = step2[8];
  step1[14] = step2[15];
  step1[15] = step2[15];
  step1[10] = step2[11];
  step1[11] = step2[11];
  step1[12] = step2[12];
  step1[13] = step2[12];

  idct32_8x32_quarter_2_stage_4_to_6(step1, out);
}

static INLINE void idct32_34_8x32_quarter_1_2(
    const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  __m128i temp[16];
  idct32_34_8x32_quarter_1(in, temp);
  idct32_34_8x32_quarter_2(in, temp);
  // stage 7
  add_sub_butterfly(temp, out, 16);
122 123
}

124 125 126 127 128 129 130 131
// For each 8x32 block __m128i in[32],
// Input with odd index, 1, 3, 5, 7
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32_34_8x32_quarter_3_4(
    const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  __m128i step1[32];

  // stage 1
132
  partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
133
                          &step1[31]);
134
  partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
135
                          &step1[28]);
136
  partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
137
                          &step1[27]);
138
  partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
139 140 141
                          &step1[24]);

  // stage 3
142
  butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
143
            &step1[30]);
144 145 146 147 148 149
  butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
            &step1[29]);
  butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
            &step1[26]);
  butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
            &step1[25]);
150 151 152 153 154 155 156 157 158 159 160 161

  idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
}

void idct32_34_8x32_ssse3(const __m128i *const in /*in[32]*/,
                          __m128i *const out /*out[32]*/) {
  __m128i temp[32];

  idct32_34_8x32_quarter_1_2(in, temp);
  idct32_34_8x32_quarter_3_4(in, temp);
  // final stage
  add_sub_butterfly(temp, out, 32);
162 163 164 165 166
}

// Only upper-left 8x8 has non-zero coeff
void vpx_idct32x32_34_add_ssse3(const tran_low_t *input, uint8_t *dest,
                                int stride) {
167
  __m128i io[32], col[32];
168 169 170
  int i;

  // Load input data. Only need to load the top left 8x8 block.
171 172 173
  load_transpose_16bit_8x8(input, 32, io);
  idct32_34_8x32_ssse3(io, col);

174
  for (i = 0; i < 32; i += 8) {
175
    int j;
176 177
    transpose_16bit_8x8(col + i, io);
    idct32_34_8x32_ssse3(io, io);
178 179

    for (j = 0; j < 32; ++j) {
180
      write_buffer_8x1(dest + j * stride, io[j]);
181 182 183 184 185
    }

    dest += 8;
  }
}
186

187 188 189 190 191 192 193 194
// For each 8x32 block __m128i in[32],
// Input with index, 0, 4, 8, 12
// output pixels: 0-7 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_1(const __m128i *const in /*in[32]*/,
                                             __m128i *const out /*out[8]*/) {
  __m128i step1[8], step2[8];

  // stage 3
195 196 197
  partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
  partial_butterfly_ssse3(in[12], -cospi_20_64, cospi_12_64, &step1[5],
                          &step1[6]);
198 199 200

  // stage 4
  step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
201
  partial_butterfly_ssse3(in[8], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
202 203 204 205 206 207 208 209 210 211 212
  step2[4] = _mm_add_epi16(step1[4], step1[5]);
  step2[5] = _mm_sub_epi16(step1[4], step1[5]);
  step2[6] = _mm_sub_epi16(step1[7], step1[6]);
  step2[7] = _mm_add_epi16(step1[7], step1[6]);

  // stage 5
  step1[0] = _mm_add_epi16(step2[0], step2[3]);
  step1[1] = _mm_add_epi16(step2[0], step2[2]);
  step1[2] = _mm_sub_epi16(step2[0], step2[2]);
  step1[3] = _mm_sub_epi16(step2[0], step2[3]);
  step1[4] = step2[4];
213
  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
214 215 216 217 218 219 220 221 222 223 224
  step1[7] = step2[7];

  // stage 6
  out[0] = _mm_add_epi16(step1[0], step1[7]);
  out[1] = _mm_add_epi16(step1[1], step1[6]);
  out[2] = _mm_add_epi16(step1[2], step1[5]);
  out[3] = _mm_add_epi16(step1[3], step1[4]);
  out[4] = _mm_sub_epi16(step1[3], step1[4]);
  out[5] = _mm_sub_epi16(step1[2], step1[5]);
  out[6] = _mm_sub_epi16(step1[1], step1[6]);
  out[7] = _mm_sub_epi16(step1[0], step1[7]);
225
}
226

227 228 229 230 231 232 233 234
// For each 8x32 block __m128i in[32],
// Input with index, 2, 6, 10, 14
// output pixels: 8-15 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_2(const __m128i *const in /*in[32]*/,
                                             __m128i *const out /*out[16]*/) {
  __m128i step1[16], step2[16];

  // stage 2
235
  partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
236
                          &step2[15]);
237 238 239 240 241
  partial_butterfly_ssse3(in[14], -cospi_18_64, cospi_14_64, &step2[9],
                          &step2[14]);
  partial_butterfly_ssse3(in[10], cospi_22_64, cospi_10_64, &step2[10],
                          &step2[13]);
  partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
242 243 244 245 246 247 248 249 250 251 252 253 254
                          &step2[12]);

  // stage 3
  step1[8] = _mm_add_epi16(step2[8], step2[9]);
  step1[9] = _mm_sub_epi16(step2[8], step2[9]);
  step1[10] = _mm_sub_epi16(step2[11], step2[10]);
  step1[11] = _mm_add_epi16(step2[11], step2[10]);
  step1[12] = _mm_add_epi16(step2[12], step2[13]);
  step1[13] = _mm_sub_epi16(step2[12], step2[13]);
  step1[14] = _mm_sub_epi16(step2[15], step2[14]);
  step1[15] = _mm_add_epi16(step2[15], step2[14]);

  idct32_8x32_quarter_2_stage_4_to_6(step1, out);
255 256
}

257 258
static INLINE void idct32_135_8x32_quarter_1_2(
    const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
259
  __m128i temp[16];
260 261 262
  idct32_135_8x32_quarter_1(in, temp);
  idct32_135_8x32_quarter_2(in, temp);
  // stage 7
263 264 265
  add_sub_butterfly(temp, out, 16);
}

266 267 268 269 270 271 272 273 274
// For each 8x32 block __m128i in[32],
// Input with odd index,
// 1, 3, 5, 7, 9, 11, 13, 15
// output pixels: 16-23, 24-31 in __m128i out[32]
static INLINE void idct32_135_8x32_quarter_3_4(
    const __m128i *const in /*in[32]*/, __m128i *const out /*out[32]*/) {
  __m128i step1[32], step2[32];

  // stage 1
275
  partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
276
                          &step1[31]);
277 278 279
  partial_butterfly_ssse3(in[15], -cospi_17_64, cospi_15_64, &step1[17],
                          &step1[30]);
  partial_butterfly_ssse3(in[9], cospi_23_64, cospi_9_64, &step1[18],
280
                          &step1[29]);
281
  partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
282 283
                          &step1[28]);

284
  partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
285
                          &step1[27]);
286 287
  partial_butterfly_ssse3(in[11], -cospi_21_64, cospi_11_64, &step1[21],
                          &step1[26]);
288

289 290 291
  partial_butterfly_ssse3(in[13], cospi_19_64, cospi_13_64, &step1[22],
                          &step1[25]);
  partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
                          &step1[24]);

  // stage 2
  step2[16] = _mm_add_epi16(step1[16], step1[17]);
  step2[17] = _mm_sub_epi16(step1[16], step1[17]);
  step2[18] = _mm_sub_epi16(step1[19], step1[18]);
  step2[19] = _mm_add_epi16(step1[19], step1[18]);
  step2[20] = _mm_add_epi16(step1[20], step1[21]);
  step2[21] = _mm_sub_epi16(step1[20], step1[21]);
  step2[22] = _mm_sub_epi16(step1[23], step1[22]);
  step2[23] = _mm_add_epi16(step1[23], step1[22]);

  step2[24] = _mm_add_epi16(step1[24], step1[25]);
  step2[25] = _mm_sub_epi16(step1[24], step1[25]);
  step2[26] = _mm_sub_epi16(step1[27], step1[26]);
  step2[27] = _mm_add_epi16(step1[27], step1[26]);
  step2[28] = _mm_add_epi16(step1[28], step1[29]);
  step2[29] = _mm_sub_epi16(step1[28], step1[29]);
  step2[30] = _mm_sub_epi16(step1[31], step1[30]);
  step2[31] = _mm_add_epi16(step1[31], step1[30]);

  // stage 3
  step1[16] = step2[16];
  step1[31] = step2[31];
316
  butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
317
            &step1[30]);
318 319
  butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
            &step1[29]);
320 321
  step1[19] = step2[19];
  step1[20] = step2[20];
322 323 324 325
  butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
            &step1[26]);
  butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
            &step1[25]);
326 327 328 329 330 331
  step1[23] = step2[23];
  step1[24] = step2[24];
  step1[27] = step2[27];
  step1[28] = step2[28];

  idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
332 333
}

334 335 336 337 338 339 340
void idct32_135_8x32_ssse3(const __m128i *const in /*in[32]*/,
                           __m128i *const out /*out[32]*/) {
  __m128i temp[32];
  idct32_135_8x32_quarter_1_2(in, temp);
  idct32_135_8x32_quarter_3_4(in, temp);
  // final stage
  add_sub_butterfly(temp, out, 32);
341 342 343 344
}

void vpx_idct32x32_135_add_ssse3(const tran_low_t *input, uint8_t *dest,
                                 int stride) {
345 346
  __m128i col[2][32], io[32];
  int i;
347 348

  // rows
349 350 351 352 353 354
  for (i = 0; i < 2; i++) {
    load_transpose_16bit_8x8(&input[0], 32, &io[0]);
    load_transpose_16bit_8x8(&input[8], 32, &io[8]);
    idct32_135_8x32_ssse3(io, col[i]);
    input += 32 << 3;
  }
355

356 357 358 359 360 361 362 363
  // columns
  for (i = 0; i < 32; i += 8) {
    transpose_16bit_8x8(col[0] + i, io);
    transpose_16bit_8x8(col[1] + i, io + 8);
    idct32_135_8x32_ssse3(io, io);
    store_buffer_8x32(io, dest, stride);
    dest += 8;
  }
364
}