Newer
Older
// Initialize encode frame context.
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
build_activity_map(cpi);
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
#if CONFIG_MULTITHREAD
if (cpi->b_multi_threaded)
{
vp8cx_init_mbrthread_data(cpi, x, cpi->mb_row_ei, 1, cpi->encoding_thread_count);
cpi->mt_current_mb_col[i] = -1;
for (i = 0; i < cpi->encoding_thread_count; i++)
{
sem_post(&cpi->h_event_start_encoding[i]);
}
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += (cpi->encoding_thread_count + 1))
{
vp8_zero(cm->left_context)
tp = cpi->tok + mb_row * (cm->mb_cols * 16 * 24);
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride * (cpi->encoding_thread_count + 1) - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride * (cpi->encoding_thread_count + 1) - 8 * cm->mb_cols;
xd->mode_info_context += xd->mode_info_stride * cpi->encoding_thread_count;
x->partition_info += xd->mode_info_stride * cpi->encoding_thread_count;
x->gf_active_ptr += cm->mb_cols * cpi->encoding_thread_count;
sem_wait(&cpi->h_event_end_encoding); /* wait for other threads to finish */
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
cpi->tok_count = 0;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row ++)
{
cpi->tok_count += cpi->tplist[mb_row].stop - cpi->tplist[mb_row].start;
}
if (xd->segmentation_enabled)
{
int i, j;
if (xd->segmentation_enabled)
{
for (i = 0; i < cpi->encoding_thread_count; i++)
{
for (j = 0; j < 4; j++)
segment_counts[j] += cpi->mb_row_ei[i].segment_counts[j];
}
}
}
for (i = 0; i < cpi->encoding_thread_count; i++)
{
totalrate += cpi->mb_row_ei[i].totalrate;
}
{
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
vp8_zero(cm->left_context)
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, segment_counts, &totalrate);
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
}
cpi->tok_count = tp - cpi->tok;
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// Work out the segment probabilites if segmentation is enabled
if (xd->segmentation_enabled)
{
int tot_count;
int count1,count2,count3,count4;
// Set to defaults
vpx_memset(xd->mb_segment_tree_probs, 255 , sizeof(xd->mb_segment_tree_probs));
#if CONFIG_SEGMENTATION
tot_count = segment_counts[12] + segment_counts[13] + segment_counts[14] + segment_counts[15];
count1 = segment_counts[12] + segment_counts[13];
count2 = segment_counts[14] + segment_counts[15];
if (tot_count)
prob[0] = (count1 * 255) / tot_count;
if (count1 > 0)
prob[1] = (segment_counts[12] * 255) /count1;
if (count2 > 0)
prob[2] = (segment_counts[14] * 255) /count2;
if (cm->frame_type != KEY_FRAME)
{
tot_count = segment_counts[4] + segment_counts[7];
if (tot_count)
xd->mb_segment_tree_probs[3] = (segment_counts[4] * 255)/tot_count;
tot_count = segment_counts[5] + segment_counts[8];
if (tot_count)
xd->mb_segment_tree_probs[4] = (segment_counts[5] * 255)/tot_count;
tot_count = segment_counts[6] + segment_counts[9];
if (tot_count)
xd->mb_segment_tree_probs[5] = (segment_counts[6] * 255)/tot_count;
}
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
count3 = segment_counts[0] + segment_counts[1];
count4 = segment_counts[2] + segment_counts[3];
if (tot_count)
xd->mb_segment_tree_probs[0] = (count3 * 255) / tot_count;
if (count3 > 0)
xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) /count3;
if (count4 > 0)
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) /count4;
for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
{
if (xd->mb_segment_tree_probs[i] == 0)
xd->mb_segment_tree_probs[i] = 1;
}
original_cost = count1 * vp8_cost_zero(prob[0]) + count2 * vp8_cost_one(prob[0]);
if (count1 > 0)
original_cost += segment_counts[12] * vp8_cost_zero(prob[1]) + segment_counts[13] * vp8_cost_one(prob[1]);
if (count2 > 0)
original_cost += segment_counts[14] * vp8_cost_zero(prob[2]) + segment_counts[15] * vp8_cost_one(prob[2]) ;
new_cost = 0;
if (cm->frame_type != KEY_FRAME)
{
new_cost = segment_counts[4] * vp8_cost_zero(xd->mb_segment_tree_probs[3]) + segment_counts[7] * vp8_cost_one(xd->mb_segment_tree_probs[3]);
new_cost += segment_counts[5] * vp8_cost_zero(xd->mb_segment_tree_probs[4]) + segment_counts[8] * vp8_cost_one(xd->mb_segment_tree_probs[4]);
new_cost += segment_counts[6] * vp8_cost_zero(xd->mb_segment_tree_probs[5]) + segment_counts[9] * vp8_cost_one (xd->mb_segment_tree_probs[5]);
}
if (tot_count > 0)
new_cost += count3 * vp8_cost_zero(xd->mb_segment_tree_probs[0]) + count4 * vp8_cost_one(xd->mb_segment_tree_probs[0]);
if (count3 > 0)
new_cost += segment_counts[0] * vp8_cost_zero(xd->mb_segment_tree_probs[1]) + segment_counts[1] * vp8_cost_one(xd->mb_segment_tree_probs[1]);
if (count4 > 0)
new_cost += segment_counts[2] * vp8_cost_zero(xd->mb_segment_tree_probs[2]) + segment_counts[3] * vp8_cost_one(xd->mb_segment_tree_probs[2]) ;
if (new_cost < original_cost)
xd->temporal_update = 1;
else
{
xd->temporal_update = 0;
xd->mb_segment_tree_probs[0] = prob[0];
xd->mb_segment_tree_probs[1] = prob[1];
xd->mb_segment_tree_probs[2] = prob[2];
}
tot_count = segment_counts[0] + segment_counts[1] + segment_counts[2] + segment_counts[3];
count1 = segment_counts[0] + segment_counts[1];
count2 = segment_counts[2] + segment_counts[3];
xd->mb_segment_tree_probs[0] = (count1 * 255) / tot_count;
if (count1 > 0)
xd->mb_segment_tree_probs[1] = (segment_counts[0] * 255) /count1;
if (count2 > 0)
xd->mb_segment_tree_probs[2] = (segment_counts[2] * 255) /count2;
#endif
// Zero probabilities not allowed
#if CONFIG_SEGMENTATION
for (i = 0; i < MB_FEATURE_TREE_PROBS+3; i++)
#else
for (i = 0; i < MB_FEATURE_TREE_PROBS; i++)
#endif
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
{
if (xd->mb_segment_tree_probs[i] == 0)
xd->mb_segment_tree_probs[i] = 1;
}
}
// 256 rate units to the bit
cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
// Make a note of the percentage MBs coded Intra.
if (cm->frame_type == KEY_FRAME)
{
cpi->this_frame_percent_intra = 100;
}
else
{
int tot_modes;
tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
+ cpi->count_mb_ref_frame_usage[LAST_FRAME]
+ cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
+ cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
#if 0
{
int cnt = 0;
int flag[2] = {0, 0};
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
{
flag[0] = 1;
vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
break;
}
}
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
{
flag[1] = 1;
vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
break;
}
}
if (flag[0] || flag[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
}
#endif
// Adjust the projected reference frame useage probability numbers to reflect
// what we have just seen. This may be usefull when we make multiple itterations
// of the recode loop rather than continuing to use values from the previous frame.
if ((cm->frame_type != KEY_FRAME) && !cm->refresh_alt_ref_frame && !cm->refresh_golden_frame)
{
const int *const rfct = cpi->count_mb_ref_frame_usage;
const int rf_intra = rfct[INTRA_FRAME];
const int rf_inter = rfct[LAST_FRAME] + rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME];
if ((rf_intra + rf_inter) > 0)
{
cpi->prob_intra_coded = (rf_intra * 255) / (rf_intra + rf_inter);
if (cpi->prob_intra_coded < 1)
cpi->prob_intra_coded = 1;
if ((cm->frames_since_golden > 0) || cpi->source_alt_ref_active)
{
cpi->prob_last_coded = rf_inter ? (rfct[LAST_FRAME] * 255) / rf_inter : 128;
if (cpi->prob_last_coded < 1)
cpi->prob_last_coded = 1;
cpi->prob_gf_coded = (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME])
? (rfct[GOLDEN_FRAME] * 255) / (rfct[GOLDEN_FRAME] + rfct[ALTREF_FRAME]) : 128;
if (cpi->prob_gf_coded < 1)
cpi->prob_gf_coded = 1;
}
}
}
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
void vp8_setup_block_ptrs(MACROBLOCK *x)
{
int r, c;
int i;
for (r = 0; r < 4; r++)
{
for (c = 0; c < 4; c++)
{
x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].src_diff = x->src_diff + 384;
for (i = 0; i < 25; i++)
{
x->block[i].coeff = x->coeff + i * 16;
}
}
void vp8_build_block_offsets(MACROBLOCK *x)
{
int block = 0;
int br, bc;
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++)
{
for (bc = 0; bc < 4; bc++)
{
BLOCK *this_block = &x->block[block];
//this_block->base_src = &x->src.y_buffer;
//this_block->src_stride = x->src.y_stride;
//this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
++block;
}
}
// u blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
// v blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
}
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
if (m == B_PRED)
{
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
int b = 0;
do
{
++ bct[xd->block[b].bmi.as_mode];
}
while (++b < 16);
}
#endif
++cpi->ymode_count[m];
++cpi->uv_mode_count[uvm];
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#if USE_ACT_INDEX
x->act_zbin_adj = *(x->mb_activity_ptr);
#else
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
{
#if CONFIG_T8X8
if (x->e_mbd.segmentation_enabled)
x->e_mbd.update_mb_segmentation_map = 1;
#endif
if (cpi->sf.RD && cpi->compressor_speed != 2)
vp8_rd_pick_intra_mode(cpi, x, &rate);
vp8_pick_intra_mode(cpi, x, &rate);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
adjust_act_zbin( cpi, x );
vp8_update_zbin_extra(cpi, x);
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
{
#if CONFIG_T8X8
if (x->e_mbd.segmentation_enabled)
x->e_mbd.mode_info_context->mbmi.segment_id |= (vp8_8x8_selection_intra(x) << 1);
#endif
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t);
#if CONFIG_T8X8
if( x->e_mbd.mode_info_context->mbmi.segment_id >=2)
cpi->t8x8_count++;
else
cpi->t4x4_count++;
#endif
return rate;
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
extern void vp8_fix_contexts(MACROBLOCKD *x);
int vp8cx_encode_inter_macroblock
(
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset
)
{
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error = 0;
int rate;
int distortion;
x->skip = 0;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[xd->mode_info_context->mbmi.segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
if (cpi->sf.RD)
{
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
cpi->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
/* restore cpi->zbin_mode_boost_enabled */
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Adjust the zbin based on this MB rate.
#if 0
// Experimental RD code
cpi->frame_distortion += distortion;
cpi->last_mb_distortion = distortion;
#endif
{
// If cyclic update enabled
if (cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
if ((xd->mode_info_context->mbmi.segment_id == 1) &&
((xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) || (xd->mode_info_context->mbmi.mode != ZEROMV)))
xd->mode_info_context->mbmi.segment_id = 0;
/* segment_id changed, so update */
vp8cx_mb_init_quantizer(cpi, x);
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to supress noise
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
{
if (xd->mode_info_context->mbmi.mode == ZEROMV)
{
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
/* The fast quantizer doesn't use zbin_extra, only do so with
* the regular quantizer. */
if (cpi->sf.improved_quant)
vp8_update_zbin_extra(cpi, x);
cpi->count_mb_ref_frame_usage[xd->mode_info_context->mbmi.ref_frame] ++;
#if CONFIG_T8X8
if (xd->segmentation_enabled)
x->e_mbd.update_mb_segmentation_map = 1;
#endif
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
if (xd->mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
else
{
#if CONFIG_T8X8
if (xd->segmentation_enabled)
xd->mode_info_context->mbmi.segment_id |= (vp8_8x8_selection_intra(x) << 1);
#endif
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
sum_intra_stats(cpi, x);
}
else
{
int ref_fb_idx;
#if CONFIG_T8X8
if (xd->segmentation_enabled)
xd->mode_info_context->mbmi.segment_id |= (vp8_8x8_selection_inter(x) << 1);
#endif
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
#if CONFIG_T8X8
if (x->e_mbd.mode_info_context->mbmi.segment_id >=2)
cpi->t8x8_count++;
else
cpi->t4x4_count++;
#endif
{
#ifdef ENC_DEBUG
if (enc_debug)
{
int i;
printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i =0; i<400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i%16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i=0;i<25;i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
#endif
#ifdef ENC_DEBUG
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
}
#endif
}
else
{
if (cpi->common.mb_no_coeff_skip)
{
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count ++;
}
}
return rate;
}