Commit e0cc52db authored by clang-format's avatar clang-format Committed by James Zern

vp9/encoder: apply clang-format

Change-Id: I45d9fb4013f50766b24363a86365e8063e8954c2
parent 3a04c9c9
......@@ -18,14 +18,13 @@
#include "vpx_dsp/txfm_common.h"
void vp9_fdct8x8_quant_neon(const int16_t *input, int stride,
int16_t* coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t* zbin_ptr,
const int16_t* round_ptr, const int16_t* quant_ptr,
const int16_t* quant_shift_ptr,
int16_t* qcoeff_ptr, int16_t* dqcoeff_ptr,
const int16_t* dequant_ptr, uint16_t* eob_ptr,
const int16_t* scan_ptr,
const int16_t* iscan_ptr) {
int16_t *coeff_ptr, intptr_t n_coeffs,
int skip_block, const int16_t *zbin_ptr,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
uint16_t *eob_ptr, const int16_t *scan_ptr,
const int16_t *iscan_ptr) {
int16_t temp_buffer[64];
(void)coeff_ptr;
......
......@@ -26,8 +26,8 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
const int16_t *round_ptr, const int16_t *quant_ptr,
const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
int16_t *dqcoeff_ptr, const int16_t *dequant_ptr,
uint16_t *eob_ptr,
const int16_t *scan, const int16_t *iscan) {
uint16_t *eob_ptr, const int16_t *scan,
const int16_t *iscan) {
// TODO(jingning) Decide the need of these arguments after the
// quantization process is completed.
(void)zbin_ptr;
......@@ -54,12 +54,12 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[0]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
vget_low_s16(v_quant));
const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
vget_high_s16(v_quant));
const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
vshrn_n_s32(v_tmp_hi, 16));
const int32x4_t v_tmp_lo =
vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
const int32x4_t v_tmp_hi =
vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
const int16x8_t v_tmp2 =
vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
......@@ -79,12 +79,12 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
const int16x8_t v_coeff = vld1q_s16(&coeff_ptr[i]);
const int16x8_t v_coeff_sign = vshrq_n_s16(v_coeff, 15);
const int16x8_t v_tmp = vabaq_s16(v_round, v_coeff, v_zero);
const int32x4_t v_tmp_lo = vmull_s16(vget_low_s16(v_tmp),
vget_low_s16(v_quant));
const int32x4_t v_tmp_hi = vmull_s16(vget_high_s16(v_tmp),
vget_high_s16(v_quant));
const int16x8_t v_tmp2 = vcombine_s16(vshrn_n_s32(v_tmp_lo, 16),
vshrn_n_s32(v_tmp_hi, 16));
const int32x4_t v_tmp_lo =
vmull_s16(vget_low_s16(v_tmp), vget_low_s16(v_quant));
const int32x4_t v_tmp_hi =
vmull_s16(vget_high_s16(v_tmp), vget_high_s16(v_quant));
const int16x8_t v_tmp2 =
vcombine_s16(vshrn_n_s32(v_tmp_lo, 16), vshrn_n_s32(v_tmp_hi, 16));
const uint16x8_t v_nz_mask = vceqq_s16(v_tmp2, v_zero);
const int16x8_t v_iscan_plus1 = vaddq_s16(v_iscan, v_one);
const int16x8_t v_nz_iscan = vbslq_s16(v_nz_mask, v_zero, v_iscan_plus1);
......@@ -96,9 +96,8 @@ void vp9_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
vst1q_s16(&dqcoeff_ptr[i], v_dqcoeff);
}
{
const int16x4_t v_eobmax_3210 =
vmax_s16(vget_low_s16(v_eobmax_76543210),
vget_high_s16(v_eobmax_76543210));
const int16x4_t v_eobmax_3210 = vmax_s16(
vget_low_s16(v_eobmax_76543210), vget_high_s16(v_eobmax_76543210));
const int64x1_t v_eobmax_xx32 =
vshr_n_s64(vreinterpret_s64_s16(v_eobmax_3210), 32);
const int16x4_t v_eobmax_tmp =
......
This diff is collapsed.
This diff is collapsed.
......@@ -86,9 +86,7 @@ void vp9_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
VP9_FADST4(in0, in1, in2, in3, in0, in1, in2, in3);
break;
default:
assert(0);
break;
default: assert(0); break;
}
TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
......
......@@ -23,44 +23,42 @@ void vp9_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
switch (tx_type) {
case DCT_DCT:
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case ADST_DCT:
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case DCT_ADST:
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
case ADST_ADST:
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
break;
default:
assert(0);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2,
in3, in4, in5, in6, in7);
VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
in5, in6, in7);
break;
default: assert(0); break;
}
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
in0, in1, in2, in3, in4, in5, in6, in7);
TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
in4, in5, in6, in7);
SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
}
This diff is collapsed.
......@@ -11,12 +11,9 @@
#include "./vp9_rtcd.h"
#include "vpx_dsp/mips/macros_msa.h"
static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
uint32_t stride,
uint8_t *frm2_ptr,
int32_t filt_sth,
int32_t filt_wgt,
uint32_t *acc,
static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr, uint32_t stride,
uint8_t *frm2_ptr, int32_t filt_sth,
int32_t filt_wgt, uint32_t *acc,
uint16_t *cnt) {
uint32_t row;
uint64_t f0, f1, f2, f3;
......@@ -54,10 +51,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
......@@ -65,8 +62,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
mod0_w, mod1_w, mod2_w, mod3_w);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
......@@ -85,8 +82,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
......@@ -101,10 +98,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
HSUB_UB2_SH(frm_r, frm_l, diff0, diff1);
UNPCK_SH_SW(diff0, diff0_r, diff0_l);
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l,
diff1_l, mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
......@@ -112,8 +109,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
mod0_w, mod1_w, mod2_w, mod3_w);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
......@@ -131,8 +128,8 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
......@@ -141,13 +138,10 @@ static void temporal_filter_apply_8size_msa(uint8_t *frm1_ptr,
}
}
static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
uint32_t stride,
static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
uint8_t *frm2_ptr,
int32_t filt_sth,
int32_t filt_wgt,
uint32_t *acc,
uint16_t *cnt) {
int32_t filt_sth, int32_t filt_wgt,
uint32_t *acc, uint16_t *cnt) {
uint32_t row;
v16i8 frm1, frm2, frm3, frm4;
v16u8 frm_r, frm_l;
......@@ -183,8 +177,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
......@@ -192,8 +186,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
mod0_w, mod1_w, mod2_w, mod3_w);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
......@@ -212,8 +206,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
......@@ -230,8 +224,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(diff1, diff1_r, diff1_l);
MUL4(diff0_r, diff0_r, diff0_l, diff0_l, diff1_r, diff1_r, diff1_l, diff1_l,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3,
mod0_w, mod1_w, mod2_w, mod3_w);
MUL4(mod0_w, cnst3, mod1_w, cnst3, mod2_w, cnst3, mod3_w, cnst3, mod0_w,
mod1_w, mod2_w, mod3_w);
SRAR_W4_SW(mod0_w, mod1_w, mod2_w, mod3_w, strength);
diff0_r = (mod0_w < cnst16);
......@@ -239,8 +233,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
diff1_r = (mod2_w < cnst16);
diff1_l = (mod3_w < cnst16);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w,
mod0_w, mod1_w, mod2_w, mod3_w);
SUB4(cnst16, mod0_w, cnst16, mod1_w, cnst16, mod2_w, cnst16, mod3_w, mod0_w,
mod1_w, mod2_w, mod3_w);
mod0_w = diff0_r & mod0_w;
mod1_w = diff0_l & mod1_w;
......@@ -259,8 +253,8 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr,
UNPCK_SH_SW(frm2_l, frm2_lr, frm2_ll);
MUL4(mod0_w, frm2_rr, mod1_w, frm2_rl, mod2_w, frm2_lr, mod3_w, frm2_ll,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3,
mod0_w, mod1_w, mod2_w, mod3_w);
ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w,
mod2_w, mod3_w);
ST_SW2(mod0_w, mod1_w, acc, 4);
acc += 8;
ST_SW2(mod2_w, mod3_w, acc, 4);
......@@ -277,11 +271,11 @@ void vp9_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
int32_t filt_wgt, uint32_t *accu,
uint16_t *cnt) {
if (8 == (blk_w * blk_h)) {
temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr,
strength, filt_wgt, accu, cnt);
temporal_filter_apply_8size_msa(frame1_ptr, stride, frame2_ptr, strength,
filt_wgt, accu, cnt);
} else if (16 == (blk_w * blk_h)) {
temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr,
strength, filt_wgt, accu, cnt);
temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
filt_wgt, accu, cnt);
} else {
vp9_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
strength, filt_wgt, accu, cnt);
......
......@@ -22,8 +22,8 @@
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_segmentation.h"
static const double rate_ratio[MAX_SEGMENTS] =
{1.0, 0.75, 0.6, 0.5, 0.4, 0.3, 0.25};
static const double rate_ratio[MAX_SEGMENTS] = { 1.0, 0.75, 0.6, 0.5,
0.4, 0.3, 0.25 };
// Sets segment id 0 for the equatorial region, 1 for temperate region
// and 2 for the polar regions
......
......@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_ENCODER_VP9_AQ_360_H_
#define VP9_ENCODER_VP9_AQ_360_H_
......
......@@ -19,21 +19,24 @@
#include "vp9/common/vp9_seg_common.h"
#include "vp9/encoder/vp9_segmentation.h"
#define AQ_C_SEGMENTS 5
#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
#define AQ_C_SEGMENTS 5
#define DEFAULT_AQ2_SEG 3 // Neutral Q segment
#define AQ_C_STRENGTHS 3
static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
{ {1.75, 1.25, 1.05, 1.00, 0.90},
{2.00, 1.50, 1.15, 1.00, 0.85},
{2.50, 1.75, 1.25, 1.00, 0.80} };
static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
{ {0.15, 0.30, 0.55, 2.00, 100.0},
{0.20, 0.40, 0.65, 2.00, 100.0},
{0.25, 0.50, 0.75, 2.00, 100.0} };
static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] =
{ {-4.0, -3.0, -2.0, 100.00, 100.0},
{-3.5, -2.5, -1.5, 100.00, 100.0},
{-3.0, -2.0, -1.0, 100.00, 100.0} };
static const double aq_c_q_adj_factor[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
{ 1.75, 1.25, 1.05, 1.00, 0.90 },
{ 2.00, 1.50, 1.15, 1.00, 0.85 },
{ 2.50, 1.75, 1.25, 1.00, 0.80 }
};
static const double aq_c_transitions[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
{ 0.15, 0.30, 0.55, 2.00, 100.0 },
{ 0.20, 0.40, 0.65, 2.00, 100.0 },
{ 0.25, 0.50, 0.75, 2.00, 100.0 }
};
static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
{ -4.0, -3.0, -2.0, 100.00, 100.0 },
{ -3.5, -2.5, -1.5, 100.00, 100.0 },
{ -3.0, -2.0, -1.0, 100.00, 100.0 }
};
static int get_aq_c_strength(int q_index, vpx_bit_depth_t bit_depth) {
// Approximate base quatizer (truncated to int)
......@@ -78,14 +81,11 @@ void vp9_setup_in_frame_q_adj(VP9_COMP *cpi) {
for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
int qindex_delta;
if (segment == DEFAULT_AQ2_SEG)
continue;
qindex_delta =
vp9_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
aq_c_q_adj_factor[aq_strength][segment],
cm->bit_depth);
if (segment == DEFAULT_AQ2_SEG) continue;
qindex_delta = vp9_compute_qdelta_by_rate(
&cpi->rc, cm->frame_type, cm->base_qindex,
aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
// For AQ complexity mode, we dont allow Q0 in a segment if the base
// Q is not 0. Q0 (lossless) implies 4x4 only and in AQ mode 2 a segment
......@@ -125,26 +125,25 @@ void vp9_caq_select_segment(VP9_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
} else {
// Rate depends on fraction of a SB64 in frame (xmis * ymis / bw * bh).
// It is converted to bits * 256 units.
const int target_rate = (cpi->rc.sb64_target_rate * xmis * ymis * 256) /
(bw * bh);
const int target_rate =
(cpi->rc.sb64_target_rate * xmis * ymis * 256) / (bw * bh);
double logvar;
double low_var_thresh;
const int aq_strength = get_aq_c_strength(cm->base_qindex, cm->bit_depth);
vpx_clear_system_state();
low_var_thresh = (cpi->oxcf.pass == 2)
? VPXMAX(cpi->twopass.mb_av_energy, MIN_DEFAULT_LV_THRESH)
: DEFAULT_LV_THRESH;
low_var_thresh = (cpi->oxcf.pass == 2) ? VPXMAX(cpi->twopass.mb_av_energy,
MIN_DEFAULT_LV_THRESH)
: DEFAULT_LV_THRESH;
vp9_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
logvar = vp9_log_block_var(cpi, mb, bs);
segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
segment = AQ_C_SEGMENTS - 1; // Just in case no break out below.
for (i = 0; i < AQ_C_SEGMENTS; ++i) {
// Test rate against a threshold value and variance against a threshold.
// Increasing segment number (higher variance and complexity) = higher Q.
if ((projected_rate <
target_rate * aq_c_transitions[aq_strength][i]) &&
if ((projected_rate < target_rate * aq_c_transitions[aq_strength][i]) &&
(logvar < (low_var_thresh + aq_c_var_thresholds[aq_strength][i]))) {
segment = i;
break;
......
......@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_ENCODER_VP9_AQ_COMPLEXITY_H_
#define VP9_ENCODER_VP9_AQ_COMPLEXITY_H_
......@@ -23,8 +22,8 @@ struct macroblock;
// Select a segment for the current Block.
void vp9_caq_select_segment(struct VP9_COMP *cpi, struct macroblock *,
BLOCK_SIZE bs,
int mi_row, int mi_col, int projected_rate);
BLOCK_SIZE bs, int mi_row, int mi_col,
int projected_rate);
// This function sets up a set of segments with delta Q values around
// the baseline frame quantizer.
......
This diff is collapsed.
......@@ -8,7 +8,6 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_
#define VP9_ENCODER_VP9_AQ_CYCLICREFRESH_H_
......@@ -23,9 +22,9 @@ extern "C" {
// The segment ids used in cyclic refresh: from base (no boost) to increasing
// boost (higher delta-qp).
#define CR_SEGMENT_ID_BASE 0
#define CR_SEGMENT_ID_BOOST1 1
#define CR_SEGMENT_ID_BOOST2 2
#define CR_SEGMENT_ID_BASE 0
#define CR_SEGMENT_ID_BOOST1 1
#define CR_SEGMENT_ID_BOOST2 2
// Maximum rate target ratio for setting segment delta-qp.
#define CR_MAX_RATE_TARGET_RATIO 4.0
......@@ -91,8 +90,8 @@ int vp9_cyclic_refresh_rc_bits_per_mb(const struct VP9_COMP *cpi, int i,
// check if we should reset the segment_id, and update the cyclic_refresh map
// and segmentation map.
void vp9_cyclic_refresh_update_segment(struct VP9_COMP *const cpi,
MODE_INFO *const mi,
int mi_row, int mi_col, BLOCK_SIZE bsize,
MODE_INFO *const mi, int mi_row,
int mi_col, BLOCK_SIZE bsize,
int64_t rate, int64_t dist, int skip,
struct macroblock_plane *const p);
......
......@@ -23,19 +23,19 @@
#define ENERGY_MIN (-4)
#define ENERGY_MAX (1)
#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
#define ENERGY_IN_BOUNDS(energy)\
#define ENERGY_SPAN (ENERGY_MAX - ENERGY_MIN + 1)
#define ENERGY_IN_BOUNDS(energy) \
assert((energy) >= ENERGY_MIN && (energy) <= ENERGY_MAX)
static const double rate_ratio[MAX_SEGMENTS] =
{2.5, 2.0, 1.5, 1.0, 0.75, 1.0, 1.0, 1.0};
static const int segment_id[ENERGY_SPAN] = {0, 1, 1, 2, 3, 4};
static const double rate_ratio[MAX_SEGMENTS] = { 2.5, 2.0, 1.5, 1.0,
0.75, 1.0, 1.0, 1.0 };
static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
#define SEGMENT_ID(i) segment_id[(i) - ENERGY_MIN]
#define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = {0};
DECLARE_ALIGNED(16, static const uint8_t, vp9_64_zeros[64]) = { 0 };
#if CONFIG_VP9_HIGHBITDEPTH
DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = {0};
DECLARE_ALIGNED(16, static const uint16_t, vp9_highbd_64_zeros[64]) = { 0 };
#endif
unsigned int vp9_vaq_segment_id(int energy) {
......@@ -85,9 +85,9 @@ void vp9_vaq_frame_setup(VP9_COMP *cpi) {
/* TODO(agrange, paulwilkins): The block_variance calls the unoptimized versions
* of variance() and highbd_8_variance(). It should not.
*/
static void aq_variance(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
int w, int h, unsigned int *sse, int *sum) {
static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, int w, int h, unsigned int *sse,
int *sum) {
int i, j;
*sum = 0;
......@@ -106,9 +106,9 @@ static void aq_variance(const uint8_t *a, int a_stride,
}
#if CONFIG_VP9_HIGHBITDEPTH
static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
int w, int h, uint64_t *sse, uint64_t *sum) {
static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
uint64_t *sse, uint64_t *sum) {
int i, j;
uint16_t *a = CONVERT_TO_SHORTPTR(a8);
......@@ -127,9 +127,9 @@ static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
}
}
static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
int w, int h, unsigned int *sse, int *sum) {
static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, int w, int h,
unsigned int *sse, int *sum) {
uint64_t sse_long = 0;
uint64_t sum_long = 0;
aq_highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
......@@ -142,10 +142,10 @@ static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bs) {
MACROBLOCKD *xd = &x->e_mbd;
unsigned int var, sse;
int right_overflow = (xd->mb_to_right_edge < 0) ?
((-xd->mb_to_right_edge) >> 3) : 0;
int bottom_overflow = (xd->mb_to_bottom_edge < 0) ?
((-xd->mb_to_bottom_edge) >> 3) : 0;
int right_overflow =
(xd->mb_to_right_edge < 0) ? ((-xd->mb_to_right_edge) >> 3) : 0;
int bottom_overflow =
(xd->mb_to_bottom_edge < 0) ? ((-xd->mb_to_bottom_edge) >> 3) : 0;
if (right_overflow || bottom_overflow) {
const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
......@@ -159,30 +159,27 @@ static unsigned int block_variance(VP9_COMP *cpi, MACROBLOCK *x,
sse >>= 2 * (xd->bd - 8);
avg >>= (xd->bd - 8);
} else {
aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, bw, bh, &sse, &avg);
aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
bw, bh, &sse, &avg);
}
#else
aq_variance(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, bw, bh, &sse, &avg);
aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp9_64_zeros, 0,
bw, bh, &sse, &avg);
#endif // CONFIG_VP9_HIGHBITDEPTH
var = sse - (((int64_t)avg * avg) / (bw * bh));
return (unsigned int)(((uint64_t)256 * var) / (bw * bh));
} else {
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
x->plane[0].src.stride,
CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros),
0, &sse);
var =
cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
CONVERT_TO_BYTEPTR(vp9_highbd_64_zeros), 0, &sse);
} else {
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf,
x->plane[0].src.stride,
var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
vp9_64_zeros, 0, &sse);
}
#else