Newer
Older
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
x->act_zbin_adj = 0;
cpi->seg0_idx = 0;
vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
xd->prev_mode_info_context = cm->prev_mi;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp8_init_mbmode_probs(cm);
// Copy data over into macro block data structures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
// set up frame for intra coded blocks
vp8_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp8_zero(cpi->count_mb_ref_frame_usage)
vp8_zero(cpi->bmode_count)
vp8_zero(cpi->ymode_count)
vp8_zero(cpi->i8x8_mode_count)
vp8_zero(cpi->y_uv_mode_count)
vp8_zero(cpi->sub_mv_ref_count)
vp8_zero(cpi->mbsplit_count)
vp8_zero(cpi->common.fc.mv_ref_ct)
vp8_zero(cpi->common.fc.mv_ref_ct_a)
// vp8_zero(cpi->uv_mode_count)
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
xd->fullpixel_mask = 0xffffffff;
if (cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void encode_frame_internal(VP8_COMP *cpi) {
int mb_row;
MACROBLOCK *const x = & cpi->mb;
VP8_COMMON *const cm = & cpi->common;
MACROBLOCKD *const xd = & x->e_mbd;
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
compute_mod_refprobs(cm);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Functions setup for all frame types so we can use MC in AltRef
vp8_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
if (cm->current_video_frame == 0) {
// Initially assume that we'll signal the prediction filter
// state at the frame level and that it is off.
cpi->common.pred_filter_mode = 0;
cpi->common.prob_pred_filter_off = 128;
}
cpi->pred_filter_on_count = 0;
cpi->pred_filter_off_count = 0;
#endif
#if CONFIG_SWITCHABLE_INTERP
vp8_zero(cpi->switchable_interp_count);
// Experimental code
cpi->frame_distortion = 0;
cpi->last_mb_distortion = 0;
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp8_zero(cpi->MVcount);
vp8_zero(cpi->MVcount_hp);
vp8_zero(cpi->coef_counts);
vp8_zero(cpi->coef_counts_8x8);
#if CONFIG_TX16X16
vp8_zero(cpi->coef_counts_16x16);
#endif
vp8_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp8cx_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
{
// For each row of SBs in the frame
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) {
int offset = (cm->mb_cols + 1) & ~0x1;
// adjust to the next row of SBs
x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP8_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
if ((ref_flags & (VP8_LAST_FLAG | VP8_GOLD_FLAG)) == (VP8_LAST_FLAG | VP8_GOLD_FLAG) &&
check_segref(xd, 1, LAST_FRAME))
return 1;
if ((ref_flags & (VP8_GOLD_FLAG | VP8_ALT_FLAG)) == (VP8_GOLD_FLAG | VP8_ALT_FLAG) &&
check_segref(xd, 1, GOLDEN_FRAME))
return 1;
if ((ref_flags & (VP8_ALT_FLAG | VP8_LAST_FLAG)) == (VP8_ALT_FLAG | VP8_LAST_FLAG) &&
check_segref(xd, 1, ALTREF_FRAME))
return 1;
return 0;
} else {
return (!!(ref_flags & VP8_GOLD_FLAG) +
!!(ref_flags & VP8_LAST_FLAG) +
!!(ref_flags & VP8_ALT_FLAG)) >= 2;
}
Ronald S. Bultje
committed
}
void vp8_encode_frame(VP8_COMP *cpi) {
if (cpi->sf.RD) {
int i, frame_type, pred_type;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames. If the difference is above a certain
* threshold, it will actually re-encode the current frame using
* that different coding mode.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
frame_type = 3;
else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
frame_type = 1;
frame_type = 2;
if (frame_type == 3)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][2] &&
check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
pred_type = COMP_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
int diff = cpi->rd_comp_pred_diff[i] / cpi->common.MBs;
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
single_count_zero += cpi->single_pred_count[i];
comp_count_zero += cpi->comp_pred_count[i];
}
if (comp_count_zero == 0) {
cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
} else if (single_count_zero == 0) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
void vp8_setup_block_ptrs(MACROBLOCK *x) {
int r, c;
int i;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
for (i = 0; i < 25; i++) {
x->block[i].coeff = x->coeff + i * 16;
}
void vp8_build_block_offsets(MACROBLOCK *x) {
int block = 0;
int br, bc;
vp8_build_block_doffsets(&x->e_mbd);
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
++block;
}
// u blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
// v blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
static void sum_intra_stats(VP8_COMP *cpi, MACROBLOCK *x) {
const MACROBLOCKD *xd = & x->e_mbd;
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
++ uv_modes_y[m][uvm];
if (m == B_PRED) {
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
do {
++ bct[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
if (m == I8X8_PRED) {
i8x8_modes[xd->block[0].bmi.as_mode.first]++;
i8x8_modes[xd->block[2].bmi.as_mode.first]++;
i8x8_modes[xd->block[8].bmi.as_mode.first]++;
i8x8_modes[xd->block[10].bmi.as_mode.first]++;
}
++cpi->ymode_count[m];
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
}
if (m == B_PRED) {
int b = 0;
do {
++ cpi->bmode_count[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP8_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
else
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
void vp8cx_encode_intra_macro_block(VP8_COMP *cpi,
MACROBLOCK *x,
TOKENEXTRA **t,
int output_enabled) {
if ((cpi->oxcf.tuning == VP8_TUNE_SSIM) && output_enabled) {
adjust_act_zbin(cpi, x);
vp8_update_zbin_extra(cpi, x);
}
/* test code: set transform size based on mode selection */
if (mbmi->mode <= TM_PRED) {
mbmi->txfm_size = TX_16X16;
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED) {
mbmi->txfm_size = TX_8X8;
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
if (output_enabled) {
// Tokenize
sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t, 0);
else
vp8_tokenize_mb(cpi, &x->e_mbd, t, 1);
#endif
}
#ifdef SPEEDSTATS
extern int cnt_pm;
#endif
void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset, int output_enabled) {
VP8_COMMON *cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
unsigned char *segment_id = &mbmi->segment_id;
int seg_ref_active;
unsigned char ref_pred_flag;
x->skip = 0;
#if CONFIG_SWITCHABLE_INTERP
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
{
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
if (mbmi->ref_frame != INTRA_FRAME) {
if (mbmi->mode == ZEROMV) {
if (mbmi->ref_frame != LAST_FRAME)
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
cpi->zbin_mode_boost = 0;
else
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
vp8_update_zbin_extra(cpi, x);
}
seg_ref_active = segfeature_active(xd, *segment_id, SEG_LVL_REF_FRAME);
// SET VARIOUS PREDICTION FLAGS
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((mbmi->ref_frame == get_pred_ref(cm, xd)));
set_pred_flag(xd, PRED_REF, ref_pred_flag);
/* test code: set transform size based on mode selection */
if (mbmi->mode <= TM_PRED || mbmi->mode == NEWMV || mbmi->mode == ZEROMV ||
mbmi->mode == NEARMV || mbmi->mode == NEARESTMV) {
mbmi->txfm_size = TX_16X16;
&& mbmi->mode != I8X8_PRED
&& mbmi->mode != B_PRED
&& mbmi->mode != SPLITMV) {
mbmi->txfm_size = TX_8X8;
if (mbmi->ref_frame == INTRA_FRAME) {
if (mbmi->mode == B_PRED) {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mby(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra8x8mbuv(IF_RTCD(&cpi->rtcd), x);
} else {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x);
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x);
if (output_enabled)
sum_intra_stats(cpi, x);
} else {
int ref_fb_idx;
ref_fb_idx = cpi->common.alt_fb_idx;
xd->pre.y_buffer = cpi->common.yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = cpi->common.yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = cpi->common.yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
second_ref_fb_idx = cpi->common.gld_fb_idx;
else
second_ref_fb_idx = cpi->common.alt_fb_idx;
xd->second_pre.y_buffer = cpi->common.yv12_fb[second_ref_fb_idx].y_buffer +
recon_yoffset;
xd->second_pre.u_buffer = cpi->common.yv12_fb[second_ref_fb_idx].u_buffer +
recon_uvoffset;
xd->second_pre.v_buffer = cpi->common.yv12_fb[second_ref_fb_idx].v_buffer +
recon_uvoffset;
if (!x->skip) {
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip)
vp8_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride);
printf("Segment=%d [%d, %d]: %d %d:\n", mbmi->segment_id, mb_col_debug,
mb_row_debug, xd->mb_to_left_edge, xd->mb_to_top_edge);
for (i = 0; i < 400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i % 16 == 15) printf("\n");
}
printf("\n");
printf("eobs = ");
for (i = 0; i < 25; i++)
printf("%d:%d ", i, xd->block[i].eob);
printf("\n");
fflush(stdout);
}
vp8_tokenize_mb(cpi, xd, t, !output_enabled);
if (enc_debug) {
printf("Tokenized\n");
fflush(stdout);
#endif
} else {
int mb_skip_context =
cpi->common.mb_no_coeff_skip ?
(x->e_mbd.mode_info_context - 1)->mbmi.mb_skip_coeff +
(x->e_mbd.mode_info_context - cpi->common.mode_info_stride)->mbmi.mb_skip_coeff :
0;
if (cpi->common.mb_no_coeff_skip) {
if (output_enabled)
cpi->skip_true_count[mb_skip_context]++;
vp8_stuff_mb(cpi, xd, t, !output_enabled);
if (output_enabled)
cpi->skip_false_count[mb_skip_context]++;