Newer
Older
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
mb_row = mbrow;
mb_col = mbcol;
/* Encode MBs in raster order within the SB */
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
int offset_unextended = dy * cm->mb_cols + dx;
if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
// MB lies outside frame, move on
mb_row += dy;
mb_col += dx;
x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
x->gf_active_ptr += offset_unextended;
x->partition_info += offset_extended;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
update_state(cpi, x, &x->mb_context[i]);
map_index = (mb_row * cpi->common.mb_cols) + mb_col;
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// reset above block coeffs
xd->above_context = cm->above_context + mb_col;
// Set up distance of MB to edge of the frame in 1/8th pel units
// Set up limit values for MV components to prevent them from
// extending beyond the UMV borders assuming 32x32 block size
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
if (xd->mode_info_context->mbmi.encoded_as_sb) {
x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
xd->mb_to_bottom_edge = ((cm->mb_rows - 2 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 2 - mb_col) * 16) << 3;
} else {
#endif
x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if !CONFIG_SUPERBLOCKS
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
vp9_activity_masking(cpi, x);
// Is segmentation enabled
if (xd->segmentation_enabled) {
vp9_mb_init_quantizer(cpi, x);
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb)
#endif
vp9_intra_prediction_down_copy(xd);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb)
encode_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset,
mb_col, mb_row);
encode_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, 1,
mb_col, mb_row);
// Note the encoder may have changed the segment_id
} else {
unsigned char *segment_id;
int seg_ref_active;
if (xd->mode_info_context->mbmi.ref_frame) {
unsigned char pred_context;
pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
if (xd->mode_info_context->mbmi.second_ref_frame <= INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
else
cpi->comp_pred_count[pred_context]++;
}
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb)
encode_superblock(cpi, x, tp, recon_yoffset, recon_uvoffset,
mb_col, mb_row);
encode_macroblock(cpi, x, tp, recon_yoffset, recon_uvoffset, 1,
mb_col, mb_row);
// Note the encoder may have changed the segment_id
for (b = 0; b < x->partition_info->count; b++) {
inter_b_modes[x->partition_info->bmi[b].mode]++;
}
}
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
seg_ref_active = vp9_segfeature_active(xd, *segment_id,
SEG_LVL_REF_FRAME);
((vp9_check_segref(xd, *segment_id, INTRA_FRAME) +
vp9_check_segref(xd, *segment_id, LAST_FRAME) +
vp9_check_segref(xd, *segment_id, GOLDEN_FRAME) +
vp9_check_segref(xd, *segment_id, ALTREF_FRAME)) > 1)) {
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
x->src.y_buffer += 32;
x->src.u_buffer += 16;
x->src.v_buffer += 16;
x->gf_active_ptr += 2;
x->partition_info += 2;
xd->mode_info_context += 2;
xd->prev_mode_info_context += 2;
(*tp)->Token = EOSB_TOKEN;
(*tp)++;
if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
x->gf_active_ptr += offset_unextended;
x->partition_info += offset_extended;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
(*tp)->Token = EOSB_TOKEN;
(*tp)++;
if (mb_row < cm->mb_rows) cpi->tplist[mb_row].stop = *tp;
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
#endif
void encode_sb_row(VP9_COMP *cpi,
VP9_COMMON *cm,
int mb_row,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int *totalrate) {
int mb_col;
int mb_cols = cm->mb_cols;
// Initialize the left context for the new SB row
vpx_memset(cm->left_context, 0, sizeof(cm->left_context));
// Code each SB in the row
for (mb_col = 0; mb_col < mb_cols; mb_col += 2) {
MODE_INFO *mic = xd->mode_info_context;
PARTITION_INFO *pi = x->partition_info;
signed char *gfa = x->gf_active_ptr;
uint8_t *yb = x->src.y_buffer;
uint8_t *ub = x->src.u_buffer;
uint8_t *vb = x->src.v_buffer;
// Pick modes assuming the SB is coded as 4 independent MBs
xd->mode_info_context->mbmi.encoded_as_sb = 0;
#endif
pick_mb_modes(cpi, cm, mb_row, mb_col, x, xd, tp, &mb_rate, &mb_dist);
#if CONFIG_SUPERBLOCKS
mb_rate += vp9_cost_bit(cm->sb_coded, 0);
x->src.y_buffer -= 32;
x->src.u_buffer -= 16;
x->src.v_buffer -= 16;
x->gf_active_ptr -= 2;
x->partition_info -= 2;
xd->mode_info_context -= 2;
xd->prev_mode_info_context -= 2;
assert(x->gf_active_ptr == gfa);
assert(x->partition_info == pi);
assert(xd->mode_info_context == mic);
assert(x->src.y_buffer == yb);
assert(x->src.u_buffer == ub);
assert(x->src.v_buffer == vb);
if (!((( mb_cols & 1) && mb_col == mb_cols - 1) ||
((cm->mb_rows & 1) && mb_row == cm->mb_rows - 1))) {
/* Pick a mode assuming that it applies to all 4 of the MBs in the SB */
xd->mode_info_context->mbmi.encoded_as_sb = 1;
pick_sb_modes(cpi, cm, mb_row, mb_col, x, xd, tp, &sb_rate, &sb_dist);
sb_rate += vp9_cost_bit(cm->sb_coded, 1);
/* Decide whether to encode as a SB or 4xMBs */
if (sb_rate < INT_MAX &&
RDCOST(x->rdmult, x->rddiv, sb_rate, sb_dist) <
RDCOST(x->rdmult, x->rddiv, mb_rate, mb_dist)) {
xd->mode_info_context->mbmi.encoded_as_sb = 1;
xd->mode_info_context[1].mbmi.encoded_as_sb = 1;
xd->mode_info_context[cm->mode_info_stride].mbmi.encoded_as_sb = 1;
xd->mode_info_context[1 + cm->mode_info_stride].mbmi.encoded_as_sb = 1;
#if CONFIG_SUPERBLOCKS
xd->mode_info_context->mbmi.encoded_as_sb = 0;
if (cm->mb_cols - 1 > mb_col)
xd->mode_info_context[1].mbmi.encoded_as_sb = 0;
if (cm->mb_rows - 1 > mb_row) {
xd->mode_info_context[cm->mode_info_stride].mbmi.encoded_as_sb = 0;
if (cm->mb_cols - 1 > mb_col)
xd->mode_info_context[1 + cm->mode_info_stride].mbmi.encoded_as_sb = 0;
}
#endif
assert(x->gf_active_ptr == gfa + 2);
assert(x->partition_info == pi + 2);
assert(xd->mode_info_context == mic + 2);
assert(x->src.y_buffer == yb + 32);
assert(x->src.u_buffer == ub + 16);
assert(x->src.v_buffer == vb + 16);
// this is to account for the border
x->gf_active_ptr += mb_cols - (mb_cols & 0x1);
x->partition_info += xd->mode_info_stride + 1 - (mb_cols & 0x1);
xd->mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
xd->prev_mode_info_context += xd->mode_info_stride + 1 - (mb_cols & 0x1);
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// GF active flags data structure
x->gf_active_ptr = (signed char *)cpi->gf_active_flags;
// Activity map pointer
x->mb_activity_ptr = cpi->mb_activity_map;
x->act_zbin_adj = 0;
cpi->seg0_idx = 0;
vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
xd->mode_info_context = cm->mi;
xd->mode_info_stride = cm->mode_info_stride;
xd->prev_mode_info_context = cm->prev_mi;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
// Copy data over into macro block data structures.
x->src = * cpi->Source;
xd->pre = cm->yv12_fb[cm->lst_fb_idx];
xd->dst = cm->yv12_fb[cm->new_fb_idx];
vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp9_build_block_offsets(x);
vp9_setup_block_dptrs(&x->e_mbd);
vp9_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->count_mb_ref_frame_usage)
vp9_zero(cpi->bmode_count)
vp9_zero(cpi->ymode_count)
vp9_zero(cpi->i8x8_mode_count)
vp9_zero(cpi->y_uv_mode_count)
vp9_zero(cpi->sub_mv_ref_count)
vp9_zero(cpi->mbsplit_count)
vp9_zero(cpi->common.fc.mv_ref_ct)
#if CONFIG_COMP_INTERINTRA_PRED
vp9_zero(cpi->interintra_count);
vp9_zero(cpi->interintra_select_count);
#endif
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
xd->fullpixel_mask = 0xffffffff;
if (cm->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void encode_frame_internal(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// printf("encode_frame_internal frame %d (%d)\n",
// cpi->common.current_video_frame, cpi->common.show_frame);
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Functions setup for all frame types so we can use MC in AltRef
vp9_setup_interp_filters(xd, cm->mcomp_filter_type, cm);
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->prediction_error = 0;
cpi->intra_error = 0;
cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
if (cm->current_video_frame == 0) {
// Initially assume that we'll signal the prediction filter
// state at the frame level and that it is off.
cpi->common.pred_filter_mode = 0;
cpi->common.prob_pred_filter_off = 128;
}
cpi->pred_filter_on_count = 0;
cpi->pred_filter_off_count = 0;
vp9_zero(cpi->switchable_interp_count);
vp9_zero(cpi->best_switchable_interp_count);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts_4x4);
vp9_zero(cpi->hybrid_coef_counts_4x4);
vp9_zero(cpi->coef_counts_8x8);
vp9_zero(cpi->hybrid_coef_counts_8x8);
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cpi->hybrid_coef_counts_16x16);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
vp9_zero(cpi->coef_counts_32x32);
#endif
#if CONFIG_NEW_MVREF
vp9_zero(cpi->mb_mv_ref_count);
#endif
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp9_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
vpx_memset(cpi->txfm_count_32x32p, 0, sizeof(cpi->txfm_count_32x32p));
vpx_memset(cpi->txfm_count_16x16p, 0, sizeof(cpi->txfm_count_16x16p));
vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
{
// For each row of SBs in the frame
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 2) {
int offset = (cm->mb_cols + 1) & ~0x1;
// adjust to the next row of SBs
x->src.y_buffer += 32 * x->src.y_stride - 16 * offset;
x->src.u_buffer += 16 * x->src.uv_stride - 8 * offset;
x->src.v_buffer += 16 * x->src.uv_stride - 8 * offset;
}
cpi->tok_count = (unsigned int)(tp - cpi->tok);
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
if ((ref_flags & (VP9_LAST_FLAG | VP9_GOLD_FLAG)) == (VP9_LAST_FLAG | VP9_GOLD_FLAG) &&
vp9_check_segref(xd, 1, LAST_FRAME))
if ((ref_flags & (VP9_GOLD_FLAG | VP9_ALT_FLAG)) == (VP9_GOLD_FLAG | VP9_ALT_FLAG) &&
vp9_check_segref(xd, 1, GOLDEN_FRAME))
if ((ref_flags & (VP9_ALT_FLAG | VP9_LAST_FLAG)) == (VP9_ALT_FLAG | VP9_LAST_FLAG) &&
vp9_check_segref(xd, 1, ALTREF_FRAME))
return (!!(ref_flags & VP9_GOLD_FLAG) +
!!(ref_flags & VP9_LAST_FLAG) +
!!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON *cm = &cpi->common;
int mb_row, mb_col, mis = cm->mode_info_stride, segment_id;
MODE_INFO *mi, *mi_ptr = cm->mi;
MODE_INFO *sb_mi_ptr = cm->mi, *sb_mi;
MB_MODE_INFO *sb_mbmi;
#endif
MB_MODE_INFO *mbmi;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &x->e_mbd;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++, mi_ptr += mis) {
mi = mi_ptr;
#if CONFIG_SUPERBLOCKS
sb_mi = sb_mi_ptr;
#endif
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++, mi++) {
mbmi = &mi->mbmi;
#if CONFIG_SUPERBLOCKS
sb_mbmi = &sb_mi->mbmi;
#endif
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
if (mbmi->txfm_size > txfm_max) {
#if CONFIG_SUPERBLOCKS
if (sb_mbmi->encoded_as_sb) {
if (!((mb_col & 1) || (mb_row & 1))) {
segment_id = mbmi->segment_id;
skip = mbmi->mb_skip_coeff;
if (mb_col < cm->mb_cols - 1) {
segment_id = segment_id && mi[1].mbmi.segment_id;
skip = skip && mi[1].mbmi.mb_skip_coeff;
}
if (mb_row < cm->mb_rows - 1) {
segment_id = segment_id &&
mi[cm->mode_info_stride].mbmi.segment_id;
skip = skip && mi[cm->mode_info_stride].mbmi.mb_skip_coeff;
if (mb_col < cm->mb_cols - 1) {
segment_id = segment_id &&
mi[cm->mode_info_stride + 1].mbmi.segment_id;
skip = skip && mi[cm->mode_info_stride + 1].mbmi.mb_skip_coeff;
}
}
xd->mode_info_context = mi;
assert((vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) ||
(cm->mb_no_coeff_skip && skip));
mbmi->txfm_size = txfm_max;
} else {
mbmi->txfm_size = sb_mbmi->txfm_size;
}
} else {
#endif
segment_id = mbmi->segment_id;
xd->mode_info_context = mi;
assert((vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) &&
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB) == 0) ||
(cm->mb_no_coeff_skip && mbmi->mb_skip_coeff));
mbmi->txfm_size = txfm_max;
#if CONFIG_SUPERBLOCKS
if (mb_col & 1)
sb_mi += 2;
#endif
#if CONFIG_SUPERBLOCKS
if (mb_row & 1)
sb_mi_ptr += 2 * mis;
#endif
}
}
void vp9_encode_frame(VP9_COMP *cpi) {
int i, frame_type, pred_type;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->common.refresh_golden_frame)
frame_type = 3;
else if (cpi->common.refresh_golden_frame || cpi->common.refresh_alt_ref_frame)
frame_type = 1;
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][2] &&
check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
pred_type = COMP_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
/* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
#if CONFIG_LOSSLESS
if (cpi->oxcf.lossless) {
txfm_type = ONLY_4X4;
} else
#endif
/* FIXME (rbultje)
* this is a hack (no really), basically to work around the complete
* nonsense coefficient cost prediction for keyframes. The probabilities
* are reset to defaults, and thus we basically have no idea how expensive
* a 4x4 vs. 8x8 will really be. The result is that any estimate at which
* of the two is better is utterly bogus.
* I'd like to eventually remove this hack, but in order to do that, we
* need to move the frame reset code from the frame encode init to the
* bitstream write code, or alternatively keep a backup of the previous
* keyframe's probabilities as an estimate of what the current keyframe's
* coefficient cost distributions may look like. */
if (frame_type == 0) {
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
txfm_type = ALLOW_32X32;
#else
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
} else
#if 0
/* FIXME (rbultje)
* this code is disabled for a similar reason as the code above; the
* problem is that each time we "revert" to 4x4 only (or even 8x8 only),
* the coefficient probabilities for 16x16 (and 8x8) start lagging behind,
* thus leading to them lagging further behind and not being chosen for
* subsequent frames either. This is essentially a local minimum problem
* that we can probably fix by estimating real costs more closely within
* a frame, perhaps by re-calculating costs on-the-fly as frame encoding
* progresses. */
if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = TX_MODE_SELECT;
} else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
&& cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
) {
txfm_type = ONLY_4X4;
} else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = ALLOW_16X16;
} else
txfm_type = ALLOW_8X8;
#else
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
#else
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
#endif
cpi->common.txfm_mode = txfm_type;
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
cpi->common.prob_tx[1] = 128;
}
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
const int diff = (int)(cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
for (i = 0; i < NB_TXFM_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
2048 * (TX_SIZE_MAX_SB - 1), 0);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
single_count_zero += cpi->single_pred_count[i];
comp_count_zero += cpi->comp_pred_count[i];
}
if (comp_count_zero == 0) {
cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
} else if (single_count_zero == 0) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cpi->txfm_count_16x16p[TX_4X4] +
cpi->txfm_count_32x32p[TX_4X4] +
cpi->txfm_count_8x8p[TX_4X4];
const int count8x8_lp = cpi->txfm_count_32x32p[TX_8X8] +
cpi->txfm_count_16x16p[TX_8X8];
const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
#else
const int count32x32 = 0;
#endif
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_4X4);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_32X32;
#endif
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
reset_skip_txfm_size(cpi, TX_16X16);
#endif
// Update interpolation filter strategy for next frame.
if ((cpi->common.frame_type != KEY_FRAME) && (cpi->sf.search_best_filter))
select_interp_filter_type(cpi);
void vp9_setup_block_ptrs(MACROBLOCK *x) {
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
for (i = 0; i < 25; i++) {
x->block[i].coeff = x->coeff + i * 16;
}
void vp9_build_block_offsets(MACROBLOCK *x) {
vp9_build_block_doffsets(&x->e_mbd);
#if !CONFIG_SUPERBLOCKS
// y blocks
x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->thismb_ptr;
this_block->src_stride = 16;
this_block->src = 4 * br * 16 + 4 * bc;
++block;
#else
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->src.y_buffer;
this_block->src_stride = x->src.y_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
#endif
// u blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
// v blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
++ uv_modes_y[m][uvm];
if (m == B_PRED) {
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
do {
++ bct[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
if (m == I8X8_PRED) {
i8x8_modes[xd->block[0].bmi.as_mode.first]++;
i8x8_modes[xd->block[2].bmi.as_mode.first]++;
i8x8_modes[xd->block[8].bmi.as_mode.first]++;
i8x8_modes[xd->block[10].bmi.as_mode.first]++;
}
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
++cpi->sb_ymode_count[m];
} else
#endif
++cpi->ymode_count[m];
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
}
if (m == B_PRED) {
int b = 0;
do {
int m = xd->block[b].bmi.as_mode.first;
#if CONFIG_NEWBINTRAMODES
if (m == B_CONTEXT_PRED) m -= CONTEXT_PRED_REPLACEMENTS;
#endif
++cpi->bmode_count[m];
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;