Newer
Older
static void encode_intra_super_block(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int mb_col, int mb_row) {
const int output_enabled = 1;
int n;
MACROBLOCKD *xd = &x->e_mbd;
VP9_COMMON *cm = &cpi->common;
const uint8_t *src = x->src.y_buffer;
uint8_t *dst = xd->dst.y_buffer;
const uint8_t *usrc = x->src.u_buffer;
uint8_t *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer;
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
const VP9_ENCODER_RTCD *rtcd = IF_RTCD(&cpi->rtcd);
MODE_INFO *mi = xd->mode_info_context;
ENTROPY_CONTEXT_PLANES ta[4], tl[4];
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
vp9_build_intra_predictors_sby_s(&x->e_mbd);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
int x_idx = n & 1, y_idx = n >> 1;
xd->above_context = cm->above_context + mb_col + (n & 1);
xd->left_context = cm->left_context + (n >> 1);
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp9_fidct_mb(x, rtcd);
vp9_recon_mby_s_c(xd, dst + x_idx * 16 + y_idx * 16 * dst_y_stride);
vp9_recon_mbuv_s_c(xd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
if (output_enabled) {
memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
vp9_tokenize_mb(cpi, xd, t, 0);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
}
}
if (output_enabled) {
segment_id = mi->mbmi.segment_id;
sum_intra_stats(cpi, x);
update_sb_skip_coeff_state(cpi, x, ta, tl, tp, t, skip);
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip && skip[0] && skip[1] && skip[2] && skip[3]) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
cpi->txfm_count[mi->mbmi.txfm_size]++;
} else {
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_16X16 : cm->txfm_mode;
mi->mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[1].mbmi.txfm_size = sz;
if (mb_row < cm->mb_rows - 1) {
mi[cm->mode_info_stride].mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[cm->mode_info_stride + 1].mbmi.txfm_size = sz;
}
}
static void encode_intra_macro_block(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int output_enabled) {
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
vp9_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else if (mbmi->mode == B_PRED) {
vp9_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if (mbmi->mode != I8X8_PRED) {
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
int segment_id = mbmi->segment_id;
vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
(vp9_segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED) {
cpi->txfm_count[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
} else if (cpi->common.txfm_mode >= ALLOW_16X16 && mbmi->mode <= TM_PRED) {
mbmi->txfm_size = TX_16X16;
} else
if (cpi->common.txfm_mode >= ALLOW_8X8 && mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
}
vp9_tokenize_mb(cpi, &x->e_mbd, t, 1);
static void encode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset, int output_enabled,
int mb_col, int mb_row) {
VP9_COMMON *cm = &cpi->common;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
unsigned char *segment_id = &mbmi->segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
#if CONFIG_SUPERBLOCKS
assert(!xd->mode_info_context->mbmi.encoded_as_sb);
#endif
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (mbmi->ref_frame != INTRA_FRAME) {
if (mbmi->mode == ZEROMV) {
if (mbmi->ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp9_update_zbin_extra(cpi, x);
seg_ref_active = vp9_segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((mbmi->ref_frame == vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp9_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
vp9_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
vp9_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
printf("Segment=%d [%d, %d]: %d %d:\n", mbmi->segment_id, mb_col_debug,
mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i = 0; i < 25; i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
vp9_tokenize_mb(cpi, xd, t, !output_enabled);
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
#endif
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
if (cpi->common.mb_no_coeff_skip) {
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp9_fix_contexts(xd);
vp9_stuff_mb(cpi, xd, t, !output_enabled);
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;
if (output_enabled) {
int segment_id = mbmi->segment_id;
if (cpi->common.txfm_mode == TX_MODE_SELECT &&
!((cpi->common.mb_no_coeff_skip && mbmi->mb_skip_coeff) ||
(vp9_segfeature_active(&x->e_mbd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(&x->e_mbd, segment_id, SEG_LVL_EOB) == 0))) {
if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV) {
cpi->txfm_count[mbmi->txfm_size]++;
} else if (mbmi->mode == I8X8_PRED ||
(mbmi->mode == SPLITMV &&
mbmi->partitioning != PARTITIONING_4X4)) {
cpi->txfm_count_8x8p[mbmi->txfm_size]++;
} else if (mbmi->mode != B_PRED && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV && cpi->common.txfm_mode >= ALLOW_16X16) {
mbmi->txfm_size = TX_16X16;
} else if (mbmi->mode != B_PRED &&
!(mbmi->mode == SPLITMV &&
mbmi->partitioning == PARTITIONING_4X4) &&
cpi->common.txfm_mode >= ALLOW_8X8) {
mbmi->txfm_size = TX_8X8;
} else {
mbmi->txfm_size = TX_4X4;
}
}
static void encode_inter_superblock(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset,
int mb_col, int mb_row) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const uint8_t *src = x->src.y_buffer;
uint8_t *dst = xd->dst.y_buffer;
const uint8_t *usrc = x->src.u_buffer;
uint8_t *udst = xd->dst.u_buffer;
const uint8_t *vsrc = x->src.v_buffer;
uint8_t *vdst = xd->dst.v_buffer;
int src_y_stride = x->src.y_stride, dst_y_stride = xd->dst.y_stride;
int src_uv_stride = x->src.uv_stride, dst_uv_stride = xd->dst.uv_stride;
const VP9_ENCODER_RTCD *rtcd = IF_RTCD(&cpi->rtcd);
int seg_ref_active;
unsigned char ref_pred_flag;
int n;
TOKENEXTRA *tp[4];
int skip[4];
MODE_INFO *mi = x->e_mbd.mode_info_context;
unsigned int segment_id = mi->mbmi.segment_id;
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
ENTROPY_CONTEXT_PLANES ta[4], tl[4];
x->skip = 0;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
}
vp9_update_zbin_extra(cpi, x);
seg_ref_active = vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
vp9_build_intra_predictors_sby_s(&x->e_mbd);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd);
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
} else {
int ref_fb_idx;
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
else
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
}
vp9_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
xd->left_context = cm->left_context + y_idx;
xd->above_context = cm->above_context + mb_col + x_idx;
memcpy(&ta[n], xd->above_context, sizeof(ta[n]));
memcpy(&tl[n], xd->left_context, sizeof(tl[n]));
tp[n] = *t;
xd->mode_info_context = mi + x_idx + y_idx * cm->mode_info_stride;
vp9_subtract_mby_s_c(x->src_diff,
src + x_idx * 16 + y_idx * 16 * src_y_stride,
src_y_stride,
dst + x_idx * 16 + y_idx * 16 * dst_y_stride,
dst_y_stride);
vp9_subtract_mbuv_s_c(x->src_diff,
usrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
vsrc + x_idx * 8 + y_idx * 8 * src_uv_stride,
src_uv_stride,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
dst_uv_stride);
vp9_recon_mby_s_c(&x->e_mbd,
vp9_recon_mbuv_s_c(&x->e_mbd,
udst + x_idx * 8 + y_idx * 8 * dst_uv_stride,
vdst + x_idx * 8 + y_idx * 8 * dst_uv_stride);
if (!x->skip) {
if (output_enabled) {
vp9_tokenize_mb(cpi, &x->e_mbd, t, 0);
skip[n] = xd->mode_info_context->mbmi.mb_skip_coeff;
}
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
xd->mode_info_context->mbmi.mb_skip_coeff = skip[n] = 1;
// TODO(rbultje) this should be done per-sb instead of per-mb?
vp9_fix_contexts(xd);
vp9_stuff_mb(cpi, xd, t, 0);
// TODO(rbultje) this should be done per-sb instead of per-mb?
cpi->skip_false_count[mb_skip_context]++;
}
}
}
xd->mode_info_context = mi;
update_sb_skip_coeff_state(cpi, x, ta, tl, tp, t, skip);
if (cm->txfm_mode == TX_MODE_SELECT &&
!((cm->mb_no_coeff_skip && skip[0] && skip[1] && skip[2] && skip[3]) ||
(vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0))) {
cpi->txfm_count[mi->mbmi.txfm_size]++;
} else {
TX_SIZE sz = (cm->txfm_mode == TX_MODE_SELECT) ? TX_16X16 : cm->txfm_mode;
mi->mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[1].mbmi.txfm_size = sz;
if (mb_row < cm->mb_rows - 1) {
mi[cm->mode_info_stride].mbmi.txfm_size = sz;
if (mb_col < cm->mb_cols - 1)
mi[cm->mode_info_stride + 1].mbmi.txfm_size = sz;
}
}