Newer
Older
// GF active flags data structure
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
x->act_zbin_adj = 0;
cpi->seg0_idx = 0;
vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
xd->prev_mode_info_context = cm->prev_mi;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
// Copy data over into macro block data structures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->bmode_count)
vp8_zero(cpi->ymode_count)
vp8_zero(cpi->i8x8_mode_count)
vp8_zero(cpi->y_uv_mode_count)
vp8_zero(cpi->sub_mv_ref_count)
vp8_zero(cpi->mbsplit_count)
vp8_zero(cpi->common.fc.mv_ref_ct)
vp8_zero(cpi->common.fc.mv_ref_ct_a)
// vp8_zero(cpi->uv_mode_count)
#if CONFIG_HIGH_PRECISION_MV
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
xd->fullpixel_mask = 0xffffffff;
if (cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void encode_frame_internal(VP8_COMP *cpi) {
int mb_row;
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
compute_mod_refprobs(cm);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Functions setup for all frame types so we can use MC in AltRef
if (cm->mcomp_filter_type == SIXTAP) {
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg16x16);
}
#if CONFIG_ENHANCED_INTERP
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
else if (cm->mcomp_filter_type == EIGHTTAP) {
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg16x16);
} else if (cm->mcomp_filter_type == EIGHTTAP_SHARP) {
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap4x4_sharp);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap8x4_sharp);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap8x8_sharp);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap16x16_sharp);
xd->subpixel_predict_avg = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg4x4_sharp);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg8x8_sharp);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, eighttap_avg16x16_sharp);
}
else {
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg16x16);
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
if (cm->current_video_frame == 0) {
// Initially assume that we'll signal the prediction filter
// state at the frame level and that it is off.
cpi->common.pred_filter_mode = 0;
cpi->common.prob_pred_filter_off = 128;
}
cpi->pred_filter_on_count = 0;
cpi->pred_filter_off_count = 0;
// Experimental code
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
#if CONFIG_HIGH_PRECISION_MV
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->coef_counts_8x8);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
cpi->rd_single_diff = cpi->rd_comp_diff = cpi->rd_hybrid_diff = 0;
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
{
// For each row of SBs in the frame
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) {
int offset = (cm->mb_cols + 1) & ~0x1;
// adjust to the next row of SBs
x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP8_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
if ((ref_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) == (VP8_LAST_FLAG | VP8_GOLD_FLAG) &&
check_segref(xd, 1, LAST_FRAME))
return 1;
if ((ref_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) == (VP8_GOLD_FLAG | VP8_ALT_FLAG) &&
check_segref(xd, 1, GOLDEN_FRAME))
return 1;
if ((ref_flags & (VP8_ALT_FLAG | VP8_LAST_FLAG)) == (VP8_ALT_FLAG | VP8_LAST_FLAG) &&
check_segref(xd, 1, ALTREF_FRAME))
return 1;
return 0;
} else {
return (!!(ref_flags & VP8_GOLD_FLAG) +
!!(ref_flags & VP8_LAST_FLAG) +
!!(ref_flags & VP8_ALT_FLAG)) >= 2;
}
Ronald S. Bultje
committed
}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
int frame_type, pred_type;
int single_diff, comp_diff, hybrid_diff;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames. If the difference is above a certain
* threshold, it will actually re-encode the current frame using
* that different coding mode.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
frame_type = 3;
else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
frame_type = 1;
frame_type = 2;
if (frame_type == 3)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][2] &&
check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
pred_type = COMP_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
single_diff = cpi->rd_single_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
comp_diff = cpi->rd_comp_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][1] += comp_diff;
cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
int i;
for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
single_count_zero += cpi->single_pred_count[i];
comp_count_zero += cpi->comp_pred_count[i];
}
if (comp_count_zero == 0) {
cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
} else if (single_count_zero == 0) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
void vp8_setup_block_ptrs(MACROBLOCK *x) {
int r, c;
int i;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
for (i = 0; i < 25; i++) {
x->block[i].coeff = x->coeff + i * 16;
}
void vp8_build_block_offsets(MACROBLOCK *x) {
int block = 0;
int br, bc;
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
++block;
}
// u blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
// v blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
++ uv_modes_y[m][uvm];
if (m == B_PRED) {
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
do {
++ bct[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
if (m == I8X8_PRED) {
i8x8_modes[xd->block[0].bmi.as_mode.first]++;
i8x8_modes[xd->block[2].bmi.as_mode.first]++;
i8x8_modes[xd->block[8].bmi.as_mode.first]++;
i8x8_modes[xd->block[10].bmi.as_mode.first]++;
}
++cpi->ymode_count[m];
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
}
if (m == B_PRED) {
int b = 0;
do {
++ cpi->bmode_count[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
else
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
MACROBLOCK *x,
TOKENEXTRA **t,
int output_enabled) {
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
}
/* test code: set transform size based on mode selection */
if (cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count++;
} else {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
if (x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if (x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (output_enabled) {
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t);
}
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
extern void vp8_fix_contexts(MACROBLOCKD *x);
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset,
int output_enabled
) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == ZEROMV) {
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
vp8_update_zbin_extra(cpi, x);
}
seg_ref_active = segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
get_pred_ref(cm, xd)));
set_pred_flag(xd, PRED_REF, ref_pred_flag);
/* test code: set transform size based on mode selection */
if (cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count++;
} else {
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
if (xd->mode_info_context->mbmi.mode == B_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
} else if (xd->mode_info_context->mbmi.mode == I8X8_PRED) {
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
if (!x->skip) {
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
} else {
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
if (enc_debug) {
int i;
printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i = 0; i < 25; i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
#endif
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
if (cpi->common.mb_no_coeff_skip) {
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd);
} else {
vp8_stuff_mb(cpi, xd, t);
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count[mb_skip_context]++;