Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);
vp8_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
xd->left_context = &cm->left_context;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->ymode_count)
vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
xd->fullpixel_mask = 0xffffffff;
if(cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void encode_frame_internal(VP8_COMP *cpi)
{
int mb_row;
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
int totalrate;
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current genreal estimates for
// this frame which may be updated with each itteration of the recode loop.
compute_mod_refprobs( cm );
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n" );
fclose(statsfile);
}
#endif
// Functions setup for all frame types so we can use MC in AltRef
if (cm->mcomp_filter_type == SIXTAP)
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg16x16);
}
else
{
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg16x16);
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count = 0;
cpi->skip_false_count = 0;
#if 0
// Experimental code
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp8_zero(cpi->MVcount);
vp8_zero(cpi->coef_counts);
vp8cx_frame_init_quantizer(cpi);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Initialize encode frame context.
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
build_activity_map(cpi);
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
cpi->rd_single_diff = cpi->rd_dual_diff = cpi->rd_hybrid_diff = 0;
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->dual_pred_count, 0, sizeof(cpi->dual_pred_count));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
#if CONFIG_SUPERBLOCKS
// for each superblock row in the image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row+=2)
{
int offset = cm->mb_cols - 1 + (cm->mb_cols & 0x1);
encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
// adjust to the next row of SBs
x->src.y_buffer += 16 * x->src.y_stride - 16 * offset;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * offset;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * offset;
}
#else
// for each macroblock row in the image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
}
cpi->tok_count = tp - cpi->tok;
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit
cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
// Make a note of the percentage MBs coded Intra.
if (cm->frame_type == KEY_FRAME)
{
cpi->this_frame_percent_intra = 100;
}
else
{
int tot_modes;
tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
+ cpi->count_mb_ref_frame_usage[LAST_FRAME]
+ cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
+ cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
#if 0
{
int cnt = 0;
int flag[2] = {0, 0};
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
{
flag[0] = 1;
vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
break;
}
}
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
{
flag[1] = 1;
vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
break;
}
}
if (flag[0] || flag[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
}
#endif
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
void vp8_encode_frame(VP8_COMP *cpi)
{
if (cpi->sf.RD)
{
int frame_type, pred_type;
int redo = 0;
int single_diff, dual_diff, hybrid_diff;
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
/*
* This code does a single RD pass over the whole frame assuming
* either dual, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames. If the difference is above a certain
* threshold, it will actually re-encode the current frame using
* that different coding mode.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
frame_type = 3;
else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
frame_type = 1;
else
frame_type = 2;
if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = DUAL_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][1] &&
cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
cpi->common.dual_pred_mode = pred_type;
encode_frame_internal(cpi);
single_diff = cpi->rd_single_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
dual_diff = cpi->rd_dual_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][1] += dual_diff;
cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
int single_count_zero = 0;
int dual_count_zero = 0;
int i;
for ( i = 0; i < DUAL_PRED_CONTEXTS; i++ )
{
single_count_zero += cpi->single_pred_count[i];
dual_count_zero += cpi->dual_pred_count[i];
}
if (dual_count_zero == 0)
{
cpi->common.dual_pred_mode = SINGLE_PREDICTION_ONLY;
}
{
cpi->common.dual_pred_mode = DUAL_PREDICTION_ONLY;
}
}
}
else
{
encode_frame_internal(cpi);
}
}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
void vp8_setup_block_ptrs(MACROBLOCK *x)
{
int r, c;
int i;
for (r = 0; r < 4; r++)
{
for (c = 0; c < 4; c++)
{
x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].src_diff = x->src_diff + 384;
for (i = 0; i < 25; i++)
{
x->block[i].coeff = x->coeff + i * 16;
}
}
void vp8_build_block_offsets(MACROBLOCK *x)
{
int block = 0;
int br, bc;
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++)
{
for (bc = 0; bc < 4; bc++)
{
BLOCK *this_block = &x->block[block];
//this_block->base_src = &x->src.y_buffer;
//this_block->src_stride = x->src.y_stride;
//this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
++block;
}
}
// u blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
// v blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
}
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
if (m == B_PRED)
{
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
int b = 0;
do
{
++ bct[xd->block[b].bmi.as_mode];
if(m==I8X8_PRED)
{
i8x8_modes[xd->block[0].bmi.as_mode]++;
i8x8_modes[xd->block[2].bmi.as_mode]++;
i8x8_modes[xd->block[8].bmi.as_mode]++;
i8x8_modes[xd->block[10].bmi.as_mode]++;
}
#endif
++cpi->ymode_count[m];
++cpi->uv_mode_count[uvm];
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#if USE_ACT_INDEX
x->act_zbin_adj = *(x->mb_activity_ptr);
#else
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
{
// Non rd path deprecated in test code base
//if (cpi->sf.RD && cpi->compressor_speed != 2)
vp8_rd_pick_intra_mode(cpi, x, &rate);
//else
// vp8_pick_intra_mode(cpi, x, &rate);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
adjust_act_zbin( cpi, x );
vp8_update_zbin_extra(cpi, x);
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if(cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED)
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count++;
}
else
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count ++;
}
#endif
if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
{
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
}
else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if(x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t);
return rate;
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
extern void vp8_fix_contexts(MACROBLOCKD *x);
int vp8cx_encode_inter_macroblock
(
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset
)
{
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error = 0;
int rate;
int distortion;
unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
//if (cpi->sf.RD)
// For now this codebase is limited to a single rd encode path
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
cpi->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error, &single, &dual, &hybrid);
cpi->rd_single_diff += single;
cpi->rd_dual_diff += dual;
cpi->rd_hybrid_diff += hybrid;
if (x->e_mbd.mode_info_context->mbmi.ref_frame &&
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char pred_context;
pred_context = get_pred_context( cm, xd, PRED_DUAL );
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
cpi->dual_pred_count[pred_context]++;
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if( cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count ++;
}
else
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
#endif
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
/* restore cpi->zbin_mode_boost_enabled */
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
//else
// The non rd encode path has been deleted from this code base
// to simplify development
// vp8_pick_inter_mode
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Adjust the zbin based on this MB rate.
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to supress noise
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
{
if (xd->mode_info_context->mbmi.mode == ZEROMV)
{
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
/* The fast quantizer doesn't use zbin_extra, only do so with
* the regular quantizer. */
if (cpi->sf.improved_quant)
vp8_update_zbin_extra(cpi, x);
seg_ref_active = segfeature_active( xd, *segment_id, SEG_LVL_REF_FRAME );
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ( (xd->mode_info_context->mbmi.ref_frame ==
set_pred_flag( xd, PRED_REF, ref_pred_flag );
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
if ( !seg_ref_active ||
( ( check_segref( xd, *segment_id, INTRA_FRAME ) +
check_segref( xd, *segment_id, LAST_FRAME ) +
check_segref( xd, *segment_id, GOLDEN_FRAME ) +
check_segref( xd, *segment_id, ALTREF_FRAME ) ) > 1 ) )
// TODO this may not be a good idea as it makes sample size small and means
// the predictor functions cannot use data about most likely value only most
// likely unpredicted value.
//#if CONFIG_COMPRED
// // Only update count for incorrectly predicted cases
// if ( !ref_pred_flag )
//#endif
{
cpi->count_mb_ref_frame_usage
[xd->mode_info_context->mbmi.ref_frame]++;
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
if (xd->mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
else if(xd->mode_info_context->mbmi.mode == I8X8_PRED)
{
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
}
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
sum_intra_stats(cpi, x);
}
else
{
int ref_fb_idx;
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
}
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
{
#ifdef ENC_DEBUG
if (enc_debug)
{
int i;
printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i =0; i<400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i%16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i=0;i<25;i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
#endif
#ifdef ENC_DEBUG
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
}
#endif
}
else
{
if (cpi->common.mb_no_coeff_skip)
{
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count ++;
}
}
return rate;
}