Newer
Older
break;
}
for (i = 0; i < 4; ++i)
for (j = 0; j < 4; ++j) {
if (i == 0 && j == 0) continue;
vpx_memcpy((m + j + mis * i)->mbmi.nzcs, m->mbmi.nzcs,
384 * sizeof(m->mbmi.nzcs[0]));
}
}
#endif
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled,
int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const int mis = cm->mode_info_stride;
assert(!xd->mode_info_context->mbmi.sb_type);
enc_debug = (cpi->common.current_video_frame == 11 && cm->show_frame &&
mb_row == 8 && mb_col == 0 && output_enabled);
if (enc_debug)
printf("Encode MB %d %d output %d\n", mb_row, mb_col, output_enabled);
#endif
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM && output_enabled) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (mbmi->ref_frame != INTRA_FRAME) {
if (mbmi->mode == ZEROMV) {
if (mbmi->ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else {
cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
vp9_update_zbin_extra(cpi, x);
if (enc_debug) {
printf("Mode %d skip %d tx_size %d\n", mbmi->mode, x->skip,
mbmi->txfm_size);
}
#endif
vp9_encode_intra16x16mbuv(cm, x);
vp9_encode_intra8x8mby(x);
vp9_encode_intra8x8mbuv(x);
vp9_encode_intra16x16mbuv(cm, x);
vp9_encode_intra16x16mby(cm, x);
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
#ifdef ENC_DEBUG
if (enc_debug)
printf("Mode %d skip %d tx_size %d ref %d ref2 %d mv %d %d interp %d\n",
mbmi->mode, x->skip, mbmi->txfm_size,
mbmi->ref_frame, mbmi->second_ref_frame,
mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col,
mbmi->interp_filter);
assert(cm->frame_type != KEY_FRAME);
ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[0], &xd->scale_factor_uv[0]);
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
vp9_encode_inter16x16(cm, x, mb_row, mb_col);
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
}
#endif
int i, j;
printf("\n");
printf("qcoeff\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
printf("predictor\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->predictor[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("src_diff\n");
for (i = 0; i < 384; i++) {
printf("%3d ", x->src_diff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("diff\n");
for (i = 0; i < 384; i++) {
printf("%3d ", xd->block[0].diff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("final y\n");
for (i = 0; i < 16; i++) {
for (j = 0; j < 16; j++)
printf("%3d ", xd->dst.y_buffer[i * xd->dst.y_stride + j]);
printf("\n");
}
printf("\n");
printf("final u\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
printf("%3d ", xd->dst.u_buffer[i * xd->dst.uv_stride + j]);
printf("\n");
}
printf("final v\n");
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++)
printf("%3d ", xd->dst.v_buffer[i * xd->dst.uv_stride + j]);
printf("\n");
}
#if CONFIG_CODE_NONZEROCOUNT
gather_nzcs_mb16(cm, xd);
#endif
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
// FIXME(rbultje): not tile-aware (mi - 1)
int mb_skip_context =
(mi - 1)->mbmi.mb_skip_coeff + (mi - mis)->mbmi.mb_skip_coeff;
mbmi->mb_skip_coeff = 1;
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_reset_sb_tokens_context(xd, BLOCK_SIZE_MB16X16);
if (output_enabled) {
int segment_id = mbmi->segment_id;
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!(mbmi->mb_skip_coeff ||
vp9_segfeature_active(&x->e_mbd, segment_id, SEG_LVL_SKIP))) {
assert(mbmi->txfm_size <= TX_16X16);
if (mbmi->mode != I4X4_PRED && mbmi->mode != I8X8_PRED &&
cpi->txfm_count_16x16p[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED ||
(mbmi->mode == SPLITMV &&
mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
} else if (mbmi->mode != I4X4_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
!(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4) &&
cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
}
}
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
uint8_t *dst = xd->dst.y_buffer;
const uint8_t *usrc = x->src.u_buffer;
uint8_t *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer;
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
int n;
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
const int mis = cm->mode_info_stride;
const int bwl = mb_width_log2(bsize);
const int bw = 1 << bwl, bh = 1 << mb_height_log2(bsize);
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV) {
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
} else {
cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
}
}
vp9_update_zbin_extra(cpi, x);
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sby_s(&x->e_mbd, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
if (output_enabled)
sum_intra_stats(cpi, x);
int ref_fb_idx;
assert(cm->frame_type != KEY_FRAME);
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->pre,
&cpi->common.yv12_fb[ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[0], &xd->scale_factor_uv[0]);
if (xd->mode_info_context->mbmi.second_ref_frame > 0) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->lst_fb_idx];
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->gld_fb_idx];
second_ref_fb_idx = cpi->common.ref_frame_map[cpi->alt_fb_idx];
setup_pred_block(&xd->second_pre,
&cpi->common.yv12_fb[second_ref_fb_idx],
mb_row, mb_col,
&xd->scale_factor[1], &xd->scale_factor_uv[1]);
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, bsize);
if (!x->skip) {
vp9_subtract_sby_s_c(x->src_diff, src, src_y_stride, dst, dst_y_stride,
bsize);
vp9_subtract_sbuv_s_c(x->src_diff, usrc, vsrc, src_uv_stride,
udst, vdst, dst_uv_stride, bsize);
switch (xd->mode_info_context->mbmi.txfm_size) {
case TX_32X32:
vp9_transform_sby_32x32(x, bsize);
vp9_quantize_sby_32x32(x, bsize);
if (bsize == BLOCK_SIZE_SB64X64) {
vp9_transform_sbuv_32x32(x, bsize);
vp9_quantize_sbuv_32x32(x, bsize);
} else {
vp9_transform_sbuv_16x16(x, bsize);
vp9_quantize_sbuv_16x16(x, bsize);
}
if (x->optimize) {
vp9_optimize_sby_32x32(cm, x, bsize);
if (bsize == BLOCK_SIZE_SB64X64)
vp9_optimize_sbuv_32x32(cm, x, bsize);
else
vp9_optimize_sbuv_16x16(cm, x, bsize);
vp9_inverse_transform_sby_32x32(xd, bsize);
if (bsize == BLOCK_SIZE_SB64X64)
vp9_inverse_transform_sbuv_32x32(xd, bsize);
else
vp9_inverse_transform_sbuv_16x16(xd, bsize);
break;
case TX_16X16:
vp9_transform_sby_16x16(x, bsize);
vp9_quantize_sby_16x16(x, bsize);
if (bsize >= BLOCK_SIZE_SB32X32) {
vp9_transform_sbuv_16x16(x, bsize);
vp9_quantize_sbuv_16x16(x, bsize);
} else {
vp9_transform_sbuv_8x8(x, bsize);
vp9_quantize_sbuv_8x8(x, bsize);
}
if (x->optimize) {
vp9_optimize_sby_16x16(cm, x, bsize);
if (bsize >= BLOCK_SIZE_SB32X32)
vp9_optimize_sbuv_16x16(cm, x, bsize);
else
vp9_optimize_sbuv_8x8(cm, x, bsize);
vp9_inverse_transform_sby_16x16(xd, bsize);
if (bsize >= BLOCK_SIZE_SB32X32)
vp9_inverse_transform_sbuv_16x16(xd, bsize);
else
vp9_inverse_transform_sbuv_8x8(xd, bsize);
break;
case TX_8X8:
vp9_transform_sby_8x8(x, bsize);
vp9_transform_sbuv_8x8(x, bsize);
vp9_quantize_sby_8x8(x, bsize);
vp9_quantize_sbuv_8x8(x, bsize);
if (x->optimize) {
vp9_optimize_sby_8x8(cm, x, bsize);
vp9_optimize_sbuv_8x8(cm, x, bsize);
vp9_inverse_transform_sby_8x8(xd, bsize);
vp9_inverse_transform_sbuv_8x8(xd, bsize);
break;
case TX_4X4:
vp9_transform_sby_4x4(x, bsize);
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sby_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize) {
vp9_optimize_sby_4x4(cm, x, bsize);
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_inverse_transform_sby_4x4(xd, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
break;
default: assert(0);
vp9_recon_sb_c(xd, bsize);
#if CONFIG_CODE_NONZEROCOUNT
if (bsize == BLOCK_SIZE_SB32X32) {
gather_nzcs_sb32(cm, &x->e_mbd);
} else {
gather_nzcs_sb64(cm, &x->e_mbd);
}
vp9_tokenize_sb(cpi, &x->e_mbd, t, !output_enabled, bsize);
// FIXME(rbultje): not tile-aware (mi - 1)
int mb_skip_context =
(mi - 1)->mbmi.mb_skip_coeff + (mi - mis)->mbmi.mb_skip_coeff;
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_reset_sb_tokens_context(xd, bsize);
// copy skip flag on all mb_mode_info contexts in this SB
// if this was a skip at this txfm size
for (n = 1; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
if (mb_col + x_idx < cm->mb_cols && mb_row + y_idx < cm->mb_rows)
mi[x_idx + y_idx * mis].mbmi.mb_skip_coeff = mi->mbmi.mb_skip_coeff;
}
if (output_enabled) {
if (cm->txfm_mode == TX_MODE_SELECT &&
!(mi->mbmi.mb_skip_coeff ||
vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP))) {
if (bsize >= BLOCK_SIZE_SB32X32) {
cpi->txfm_count_32x32p[mi->mbmi.txfm_size]++;
} else {
cpi->txfm_count_16x16p[mi->mbmi.txfm_size]++;
}
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_32X32 : cm->txfm_mode;
if (sz == TX_32X32 && bsize < BLOCK_SIZE_SB32X32)
sz = TX_16X16;
for (y = 0; y < bh; y++) {
for (x = 0; x < bw; x++) {
if (mb_col + x < cm->mb_cols && mb_row + y < cm->mb_rows) {
mi[mis * y + x].mbmi.txfm_size = sz;
}
}
}