Newer
Older
recon_yoffset += 16;
recon_uvoffset += 8;
// skip to next mb
xd->mode_info_context++;
xd->prev_mode_info_context++;
assert((xd->prev_mode_info_context - cpi->common.prev_mip)
==(xd->mode_info_context - cpi->common.mip));
}
//extend the recon for intra prediction
vp8_extend_mb_row(
&cm->yv12_fb[dst_fb_idx],
xd->dst.y_buffer + 16,
xd->dst.u_buffer + 8,
xd->dst.v_buffer + 8);
// this is to account for the border
xd->prev_mode_info_context++;
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n" );
fclose(statsfile);
}
#endif
}
void init_encode_frame_mb_context(VP8_COMP *cpi)
{
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
// GF active flags data structure
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
x->vector_range = 32;
x->act_zbin_adj = 0;
x->partition_info = x->pi;
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
xd->prev_mode_info_context = cm->prev_mi;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
// Copy data over into macro block data sturctures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp8_build_block_offsets(x);
vp8_setup_block_dptrs(&x->e_mbd);
vp8_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
xd->left_context = &cm->left_context;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->ymode_count)
vp8_zero(cpi->uv_mode_count)
x->mvc = cm->fc.mvc;
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
//#if CONFIG_COMPRED
// TODO... this will all need changing for new reference frame coding model
// in addition... ref_frame_cost should not be in the MACROBLOCKD structure as
// it is only referenced in the encoder.
//#endif
xd->ref_frame_cost[INTRA_FRAME] = vp8_cost_zero(cm->prob_intra_coded);
// Special case treatment when GF and ARF are not sensible options for reference
if (cpi->ref_frame_flags == VP8_LAST_FLAG)
{
xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cm->prob_intra_coded)
xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(255)
+ vp8_cost_zero(128);
xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(255)
+ vp8_cost_one(128);
}
else
{
xd->ref_frame_cost[LAST_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_zero(cm->prob_last_coded);
xd->ref_frame_cost[GOLDEN_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(cm->prob_last_coded)
+ vp8_cost_zero(cm->prob_gf_coded);
xd->ref_frame_cost[ALTREF_FRAME] = vp8_cost_one(cm->prob_intra_coded)
+ vp8_cost_one(cm->prob_last_coded)
+ vp8_cost_one(cm->prob_gf_coded);
xd->fullpixel_mask = 0xffffffff;
if(cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void encode_frame_internal(VP8_COMP *cpi)
{
int mb_row;
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
TOKENEXTRA *tp = cpi->tok;
int totalrate;
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current genreal estimates for
// this frame which may be updated with each itteration of the recode loop.
compute_mod_refprobs( cm );
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n" );
fclose(statsfile);
}
#endif
totalrate = 0;
if (cpi->compressor_speed == 2)
{
if (cpi->oxcf.cpu_used < 0)
cpi->Speed = -(cpi->oxcf.cpu_used);
else
vp8_auto_select_speed(cpi);
}
// Functions setup for all frame types so we can use MC in AltRef
if (cm->mcomp_filter_type == SIXTAP)
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, sixtap_avg16x16);
}
else
{
xd->subpixel_predict = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
&cpi->common.rtcd.subpix, bilinear_avg16x16);
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count = 0;
cpi->skip_false_count = 0;
#if 0
// Experimental code
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
#endif
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp8_zero(cpi->MVcount);
vp8_zero(cpi->coef_counts);
vp8cx_frame_init_quantizer(cpi);
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Initialize encode frame context.
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
build_activity_map(cpi);
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
cpi->rd_single_diff = cpi->rd_dual_diff = cpi->rd_hybrid_diff = 0;
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->dual_pred_count, 0, sizeof(cpi->dual_pred_count));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
#if CONFIG_SUPERBLOCKS
// for each superblock row in the image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row+=2)
{
int offset = cm->mb_cols - 1 + (cm->mb_cols & 0x1);
encode_sb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
// adjust to the next row of SBs
x->src.y_buffer += 16 * x->src.y_stride - 16 * offset;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * offset;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * offset;
}
#else
// for each macroblock row in the image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++)
{
encode_mb_row(cpi, cm, mb_row, x, xd, &tp, &totalrate);
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
x->src.u_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
x->src.v_buffer += 8 * x->src.uv_stride - 8 * cm->mb_cols;
}
cpi->tok_count = tp - cpi->tok;
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit
cpi->projected_frame_size = totalrate >> 8; // projected_frame_size in units of BYTES
// Make a note of the percentage MBs coded Intra.
if (cm->frame_type == KEY_FRAME)
{
cpi->this_frame_percent_intra = 100;
}
else
{
int tot_modes;
tot_modes = cpi->count_mb_ref_frame_usage[INTRA_FRAME]
+ cpi->count_mb_ref_frame_usage[LAST_FRAME]
+ cpi->count_mb_ref_frame_usage[GOLDEN_FRAME]
+ cpi->count_mb_ref_frame_usage[ALTREF_FRAME];
if (tot_modes)
cpi->this_frame_percent_intra = cpi->count_mb_ref_frame_usage[INTRA_FRAME] * 100 / tot_modes;
}
#if 0
{
int cnt = 0;
int flag[2] = {0, 0};
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[0][cnt] != cm->fc.mvc[0][cnt])
{
flag[0] = 1;
vpx_memcpy(cm->fc.pre_mvc[0], cm->fc.mvc[0], MVPcount);
break;
}
}
for (cnt = 0; cnt < MVPcount; cnt++)
{
if (cm->fc.pre_mvc[1][cnt] != cm->fc.mvc[1][cnt])
{
flag[1] = 1;
vpx_memcpy(cm->fc.pre_mvc[1], cm->fc.mvc[1], MVPcount);
break;
}
}
if (flag[0] || flag[1])
vp8_build_component_cost_table(cpi->mb.mvcost, (const MV_CONTEXT *) cm->fc.mvc, flag);
}
#endif
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
void vp8_encode_frame(VP8_COMP *cpi)
{
if (cpi->sf.RD)
{
int frame_type, pred_type;
int redo = 0;
int single_diff, dual_diff, hybrid_diff;
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
/*
* This code does a single RD pass over the whole frame assuming
* either dual, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames. If the difference is above a certain
* threshold, it will actually re-encode the current frame using
* that different coding mode.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
frame_type = 3;
else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
frame_type = 1;
else
frame_type = 2;
if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = DUAL_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][1] &&
cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
cpi->common.dual_pred_mode = pred_type;
encode_frame_internal(cpi);
single_diff = cpi->rd_single_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][0] += single_diff;
cpi->rd_prediction_type_threshes[frame_type][0] >>= 1;
dual_diff = cpi->rd_dual_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][1] += dual_diff;
cpi->rd_prediction_type_threshes[frame_type][1] >>= 1;
hybrid_diff = cpi->rd_hybrid_diff / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][2] += hybrid_diff;
cpi->rd_prediction_type_threshes[frame_type][2] >>= 1;
if (cpi->common.dual_pred_mode == HYBRID_PREDICTION)
int single_count_zero = 0;
int dual_count_zero = 0;
int i;
for ( i = 0; i < DUAL_PRED_CONTEXTS; i++ )
{
single_count_zero += cpi->single_pred_count[i];
dual_count_zero += cpi->dual_pred_count[i];
}
if (dual_count_zero == 0)
{
cpi->common.dual_pred_mode = SINGLE_PREDICTION_ONLY;
}
{
cpi->common.dual_pred_mode = DUAL_PREDICTION_ONLY;
}
}
}
else
{
encode_frame_internal(cpi);
}
}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
void vp8_setup_block_ptrs(MACROBLOCK *x)
{
int r, c;
int i;
for (r = 0; r < 4; r++)
{
for (c = 0; c < 4; c++)
{
x->block[r*4+c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[16 + r*2+c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++)
{
for (c = 0; c < 2; c++)
{
x->block[20 + r*2+c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].src_diff = x->src_diff + 384;
for (i = 0; i < 25; i++)
{
x->block[i].coeff = x->coeff + i * 16;
}
}
void vp8_build_block_offsets(MACROBLOCK *x)
{
int block = 0;
int br, bc;
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++)
{
for (bc = 0; bc < 4; bc++)
{
BLOCK *this_block = &x->block[block];
//this_block->base_src = &x->src.y_buffer;
//this_block->src_stride = x->src.y_stride;
//this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
++block;
}
}
// u blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
// v blocks
for (br = 0; br < 2; br++)
{
for (bc = 0; bc < 2; bc++)
{
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
}
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x)
{
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
#ifdef MODE_STATS
const int is_key = cpi->common.frame_type == KEY_FRAME;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
if (m == B_PRED)
{
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
int b = 0;
do
{
++ bct[xd->block[b].bmi.as_mode];
if(m==I8X8_PRED)
{
i8x8_modes[xd->block[0].bmi.as_mode]++;
i8x8_modes[xd->block[2].bmi.as_mode]++;
i8x8_modes[xd->block[8].bmi.as_mode]++;
i8x8_modes[xd->block[10].bmi.as_mode]++;
}
#endif
++cpi->ymode_count[m];
++cpi->uv_mode_count[uvm];
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin( VP8_COMP *cpi, MACROBLOCK *x )
#if USE_ACT_INDEX
x->act_zbin_adj = *(x->mb_activity_ptr);
#else
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4*cpi->activity_avg;
b = 4*act + cpi->activity_avg;
x->act_zbin_adj = (int)(((int64_t)b + (a>>1))/a) - 1;
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b>>1))/b);
int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t)
{
if (cpi->sf.RD && cpi->compressor_speed != 2)
vp8_rd_pick_intra_mode(cpi, x, &rate);
vp8_pick_intra_mode(cpi, x, &rate);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
adjust_act_zbin( cpi, x );
vp8_update_zbin_extra(cpi, x);
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if(cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED)
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count++;
}
else
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count ++;
}
#endif
if(x->e_mbd.mode_info_context->mbmi.mode == I8X8_PRED)
{
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
}
else if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if(x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t);
return rate;
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
extern void vp8_fix_contexts(MACROBLOCKD *x);
int vp8cx_encode_inter_macroblock
(
VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
int recon_yoffset, int recon_uvoffset
)
{
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
int intra_error = 0;
int rate;
int distortion;
unsigned char *segment_id = &xd->mode_info_context->mbmi.segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
if (xd->segmentation_enabled)
x->encode_breakout = cpi->segment_encode_breakout[*segment_id];
else
x->encode_breakout = cpi->oxcf.encode_breakout;
if (cpi->sf.RD)
{
int zbin_mode_boost_enabled = cpi->zbin_mode_boost_enabled;
/* Are we using the fast quantizer for the mode selection? */
if(cpi->sf.use_fastquant_for_pick)
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
fastquantb_pair);
/* the fast quantizer does not use zbin_extra, so
* do not recalculate */
cpi->zbin_mode_boost_enabled = 0;
}
vp8_rd_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error, &single, &dual, &hybrid);
cpi->rd_single_diff += single;
cpi->rd_dual_diff += dual;
cpi->rd_hybrid_diff += hybrid;
if (x->e_mbd.mode_info_context->mbmi.ref_frame &&
x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
unsigned char pred_context;
pred_context = get_pred_context( cm, xd, PRED_DUAL );
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
cpi->dual_pred_count[pred_context]++;
#if CONFIG_T8X8
/* test code: set transform size based on mode selection */
if( cpi->common.txfm_mode == ALLOW_8X8
&& x->e_mbd.mode_info_context->mbmi.mode != I8X8_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != B_PRED
&& x->e_mbd.mode_info_context->mbmi.mode != SPLITMV)
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_8X8;
cpi->t8x8_count ++;
}
else
{
x->e_mbd.mode_info_context->mbmi.txfm_size = TX_4X4;
cpi->t4x4_count++;
}
#endif
/* switch back to the regular quantizer for the encode */
if (cpi->sf.improved_quant)
{
cpi->mb.quantize_b = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb);
cpi->mb.quantize_b_pair = QUANTIZE_INVOKE(&cpi->rtcd.quantize,
quantb_pair);
/* restore cpi->zbin_mode_boost_enabled */
cpi->zbin_mode_boost_enabled = zbin_mode_boost_enabled;
vp8_pick_inter_mode(cpi, x, recon_yoffset, recon_uvoffset, &rate,
&distortion, &intra_error);
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
{
// Adjust the zbin based on this MB rate.
#if 0
// Experimental RD code
cpi->frame_distortion += distortion;
cpi->last_mb_distortion = distortion;
#endif
{
// If cyclic update enabled
if (cpi->cyclic_refresh_mode_enabled)
{
// Clear segment_id back to 0 if not coded (last frame 0,0)
( (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME) ||
(xd->mode_info_context->mbmi.mode != ZEROMV) ) )
/* segment_id changed, so update */
vp8cx_mb_init_quantizer(cpi, x);
//segfeature_test_function(cpi, xd);
#if DBG_PRNT_SEGMAP
// Debug output
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "%2d%2d%2d ",
xd->mode_info_context->mbmi.ref_frame,
xd->mode_info_context->mbmi.mode );
fclose(statsfile);
}
#endif
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to supress noise
if ( xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME )
{
if (xd->mode_info_context->mbmi.mode == ZEROMV)
{
if (xd->mode_info_context->mbmi.ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
}
else if (xd->mode_info_context->mbmi.mode == SPLITMV)
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
/* The fast quantizer doesn't use zbin_extra, only do so with
* the regular quantizer. */
if (cpi->sf.improved_quant)
vp8_update_zbin_extra(cpi, x);
seg_ref_active = segfeature_active( xd, *segment_id, SEG_LVL_REF_FRAME );
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ( (xd->mode_info_context->mbmi.ref_frame ==
set_pred_flag( xd, PRED_REF, ref_pred_flag );
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
if ( !seg_ref_active ||
( ( check_segref( xd, *segment_id, INTRA_FRAME ) +
check_segref( xd, *segment_id, LAST_FRAME ) +
check_segref( xd, *segment_id, GOLDEN_FRAME ) +
check_segref( xd, *segment_id, ALTREF_FRAME ) ) > 1 ) )
// TODO this may not be a good idea as it makes sample size small and means
// the predictor functions cannot use data about most likely value only most
// likely unpredicted value.
//#if CONFIG_COMPRED
// // Only update count for incorrectly predicted cases
// if ( !ref_pred_flag )
//#endif
{
cpi->count_mb_ref_frame_usage
[xd->mode_info_context->mbmi.ref_frame]++;
}
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
if (xd->mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
}
else if(xd->mode_info_context->mbmi.mode == I8X8_PRED)
{
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
}
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
}
sum_intra_stats(cpi, x);
}
else
{
int ref_fb_idx;
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = cpi->common.gld_fb_idx;
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame) {
int second_ref_fb_idx;
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = cpi->common.lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
}
{
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
{
#ifdef ENC_DEBUG
if (enc_debug)
{
int i;
printf("Segment=%d [%d, %d]: %d %d:\n", x->e_mbd.mode_info_context->mbmi.segment_id, mb_col_debug, mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i =0; i<400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i%16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i=0;i<25;i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
#endif
#ifdef ENC_DEBUG
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
}
#endif
}
else
{
if (cpi->common.mb_no_coeff_skip)
{
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count ++;
}
}
return rate;
}