avg.c 6.71 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
James Zern's avatar
James Zern committed
10 11 12
#include <stdlib.h>

#include "./vpx_dsp_rtcd.h"
13 14
#include "vpx_ports/mem.h"

James Zern's avatar
James Zern committed
15
unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) {
16 17 18 19 20 21 22
  int i, j;
  int sum = 0;
  for (i = 0; i < 8; ++i, s+=p)
    for (j = 0; j < 8; sum += s[j], ++j) {}

  return (sum + 32) >> 6;
}
23

James Zern's avatar
James Zern committed
24
unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) {
25 26 27 28 29 30 31 32
  int i, j;
  int sum = 0;
  for (i = 0; i < 4; ++i, s+=p)
    for (j = 0; j < 4; sum += s[j], ++j) {}

  return (sum + 8) >> 4;
}

33 34
// src_diff: first pass, 9 bit, dynamic range [-255, 255]
//           second pass, 12 bit, dynamic range [-2040, 2040]
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
static void hadamard_col8(const int16_t *src_diff, int src_stride,
                          int16_t *coeff) {
  int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
  int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
  int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
  int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
  int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
  int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
  int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
  int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];

  int16_t c0 = b0 + b2;
  int16_t c1 = b1 + b3;
  int16_t c2 = b0 - b2;
  int16_t c3 = b1 - b3;
  int16_t c4 = b4 + b6;
  int16_t c5 = b5 + b7;
  int16_t c6 = b4 - b6;
  int16_t c7 = b5 - b7;

  coeff[0] = c0 + c4;
  coeff[7] = c1 + c5;
  coeff[3] = c2 + c6;
  coeff[4] = c3 + c7;
  coeff[2] = c0 - c4;
  coeff[6] = c1 - c5;
  coeff[1] = c2 - c6;
  coeff[5] = c3 - c7;
}

Johann's avatar
Johann committed
65 66
// The order of the output coeff of the hadamard is not important. For
// optimization purposes the final transpose may be skipped.
67
void vpx_hadamard_8x8_c(const int16_t *src_diff, int src_stride,
68 69 70 71 72
                        int16_t *coeff) {
  int idx;
  int16_t buffer[64];
  int16_t *tmp_buf = &buffer[0];
  for (idx = 0; idx < 8; ++idx) {
73 74
    hadamard_col8(src_diff, src_stride, tmp_buf);  // src_diff: 9 bit
                                                   // dynamic range [-255, 255]
75 76 77 78 79 80
    tmp_buf += 8;
    ++src_diff;
  }

  tmp_buf = &buffer[0];
  for (idx = 0; idx < 8; ++idx) {
81 82 83 84
    hadamard_col8(tmp_buf, 8, coeff);  // tmp_buf: 12 bit
                                       // dynamic range [-2040, 2040]
    coeff += 8;  // coeff: 15 bit
                 // dynamic range [-16320, 16320]
85 86 87 88 89
    ++tmp_buf;
  }
}

// In place 16x16 2D Hadamard transform
90
void vpx_hadamard_16x16_c(const int16_t *src_diff, int src_stride,
91
                          int16_t *coeff) {
92
  int idx;
93
  for (idx = 0; idx < 4; ++idx) {
94
    // src_diff: 9 bit, dynamic range [-255, 255]
95
    const int16_t *src_ptr = src_diff + (idx >> 1) * 8 * src_stride
96
                                + (idx & 0x01) * 8;
James Zern's avatar
James Zern committed
97
    vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
98 99
  }

100
  // coeff: 15 bit, dynamic range [-16320, 16320]
101 102 103 104 105 106
  for (idx = 0; idx < 64; ++idx) {
    int16_t a0 = coeff[0];
    int16_t a1 = coeff[64];
    int16_t a2 = coeff[128];
    int16_t a3 = coeff[192];

107 108 109 110
    int16_t b0 = (a0 + a1) >> 1;  // (a0 + a1): 16 bit, [-32640, 32640]
    int16_t b1 = (a0 - a1) >> 1;  // b0-b3: 15 bit, dynamic range
    int16_t b2 = (a2 + a3) >> 1;  // [-16320, 16320]
    int16_t b3 = (a2 - a3) >> 1;
111

112 113 114 115
    coeff[0]   = b0 + b2;  // 16 bit, [-32640, 32640]
    coeff[64]  = b1 + b3;
    coeff[128] = b0 - b2;
    coeff[192] = b1 - b3;
116 117 118 119 120

    ++coeff;
  }
}

121 122
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
James Zern's avatar
James Zern committed
123
int vpx_satd_c(const int16_t *coeff, int length) {
124 125 126 127 128
  int i;
  int satd = 0;
  for (i = 0; i < length; ++i)
    satd += abs(coeff[i]);

129
  // satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
James Zern's avatar
James Zern committed
130
  return satd;
131 132
}

133
// Integer projection onto row vectors.
134
// height: value range {16, 32, 64}.
135
void vpx_int_pro_row_c(int16_t hbuf[16], const uint8_t *ref,
136 137
                       const int ref_stride, const int height) {
  int idx;
138
  const int norm_factor = height >> 1;
139 140 141
  for (idx = 0; idx < 16; ++idx) {
    int i;
    hbuf[idx] = 0;
142
    // hbuf[idx]: 14 bit, dynamic range [0, 16320].
143 144
    for (i = 0; i < height; ++i)
      hbuf[idx] += ref[i * ref_stride];
145
    // hbuf[idx]: 9 bit, dynamic range [0, 510].
146
    hbuf[idx] /= norm_factor;
147 148 149 150
    ++ref;
  }
}

151
// width: value range {16, 32, 64}.
152
int16_t vpx_int_pro_col_c(const uint8_t *ref, const int width) {
153 154
  int idx;
  int16_t sum = 0;
155
  // sum: 14 bit, dynamic range [0, 16320]
156 157
  for (idx = 0; idx < width; ++idx)
    sum += ref[idx];
158
  return sum;
159 160
}

161 162 163
// ref: [0 - 510]
// src: [0 - 510]
// bwl: {2, 3, 4}
164
int vpx_vector_var_c(const int16_t *ref, const int16_t *src,
165
                     const int bwl) {
166
  int i;
167 168 169 170
  int width = 4 << bwl;
  int sse = 0, mean = 0, var;

  for (i = 0; i < width; ++i) {
171 172 173
    int diff = ref[i] - src[i];  // diff: dynamic range [-510, 510], 10 bits.
    mean += diff;                // mean: dynamic range 16 bits.
    sse += diff * diff;          // sse:  dynamic range 26 bits.
174 175
  }

176
  // (mean * mean): dynamic range 31 bits.
177 178
  var = sse - ((mean * mean) >> (bwl + 2));
  return var;
179 180
}

James Zern's avatar
James Zern committed
181
void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
182 183 184 185 186 187 188 189 190 191 192 193 194
                      int *min, int *max) {
  int i, j;
  *min = 255;
  *max = 0;
  for (i = 0; i < 8; ++i, s += p, d += dp) {
    for (j = 0; j < 8; ++j) {
      int diff = abs(s[j]-d[j]);
      *min = diff < *min ? diff : *min;
      *max = diff > *max ? diff : *max;
    }
  }
}

195
#if CONFIG_VP9_HIGHBITDEPTH
James Zern's avatar
James Zern committed
196
unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) {
197 198 199 200 201 202 203 204
  int i, j;
  int sum = 0;
  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
  for (i = 0; i < 8; ++i, s+=p)
    for (j = 0; j < 8; sum += s[j], ++j) {}

  return (sum + 32) >> 6;
}
205

James Zern's avatar
James Zern committed
206
unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) {
207 208 209 210 211 212 213 214
  int i, j;
  int sum = 0;
  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
  for (i = 0; i < 4; ++i, s+=p)
    for (j = 0; j < 4; sum += s[j], ++j) {}

  return (sum + 8) >> 4;
}
215

James Zern's avatar
James Zern committed
216
void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
217 218 219 220
                             int dp, int *min, int *max) {
  int i, j;
  const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
  const uint16_t* d = CONVERT_TO_SHORTPTR(d8);
221 222
  *min = 255;
  *max = 0;
223 224 225 226 227 228 229 230
  for (i = 0; i < 8; ++i, s += p, d += dp) {
    for (j = 0; j < 8; ++j) {
      int diff = abs(s[j]-d[j]);
      *min = diff < *min ? diff : *min;
      *max = diff > *max ? diff : *max;
    }
  }
}
231 232
#endif  // CONFIG_VP9_HIGHBITDEPTH

233