Newer
Older
(*tp)->Token = EOSB_TOKEN;
(*tp)++;
}
#endif
cpi->partition_count[partition_plane(bsize)][PARTITION_SPLIT]++;
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
if (mb_row + y_idx * 2 >= cm->mb_rows ||
mb_col + x_idx * 2 >= cm->mb_cols) {
// MB lies outside frame, move on
continue;
}
xd->sb_index = i;
encode_sb(cpi, mb_row + 2 * y_idx, mb_col + 2 * x_idx, 1, tp,
static void encode_sb_row(VP9_COMP *cpi,
int mb_row,
TOKENEXTRA **tp,
int *totalrate) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int mb_col;
// Initialize the left context for the new SB row
vpx_memset(cm->left_context, 0, sizeof(cm->left_context));
for (mb_col = cm->cur_tile_mb_col_start;
mb_col < cm->cur_tile_mb_col_end; mb_col += 4) {
BLOCK_SIZE_TYPE sb_partitioning[4];
int sb64_rate = 0, sb64_dist = 0;
ENTROPY_CONTEXT_PLANES l[4], a[4];
TOKENEXTRA *tp_orig = *tp;
memcpy(&a, cm->above_context + mb_col, sizeof(a));
memcpy(&l, cm->left_context, sizeof(l));
for (i = 0; i < 4; i++) {
const int x_idx = (i & 1) << 1, y_idx = i & 2;
int sb32_rate = 0, sb32_dist = 0;
int splitmodes_used = 0;
int sb32_skip = 0;
int j;
ENTROPY_CONTEXT_PLANES l2[2], a2[2];
if (mb_row + y_idx >= cm->mb_rows || mb_col + x_idx >= cm->mb_cols)
continue;
xd->sb_index = i;
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy(l2, cm->left_context + y_idx, sizeof(l2));
vpx_memcpy(a2, cm->above_context + mb_col + x_idx, sizeof(a2));
/* Encode MBs in raster order within the SB */
sb_partitioning[i] = BLOCK_SIZE_MB16X16;
for (j = 0; j < 4; j++) {
const int x_idx_m = x_idx + (j & 1), y_idx_m = y_idx + (j >> 1);
int r, d;
if (mb_row + y_idx_m >= cm->mb_rows ||
mb_col + x_idx_m >= cm->mb_cols) {
// MB lies outside frame, move on
continue;
}
// Index of the MB in the SB 0..3
xd->mb_index = j;
splitmodes_used += pick_mb_mode(cpi, mb_row + y_idx_m,
mb_col + x_idx_m, tp, &r, &d);
sb32_rate += r;
sb32_dist += d;
// Dummy encode, do not do the tokenization
encode_macroblock(cpi, tp, 0, mb_row + y_idx_m,
mb_col + x_idx_m);
}
/* Restore L & A coding context to those in place on entry */
vpx_memcpy(cm->left_context + y_idx, l2, sizeof(l2));
vpx_memcpy(cm->above_context + mb_col + x_idx, a2, sizeof(a2));
sb32_rate += x->partition_cost[partition_plane(BLOCK_SIZE_SB32X32)]
[PARTITION_SPLIT];
if (cpi->sf.splitmode_breakout) {
sb32_skip = splitmodes_used;
sb64_skip += splitmodes_used;
}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
#if CONFIG_SBSEGMENT
// check 32x16
if (mb_col + x_idx + 1 < cm->mb_cols) {
int r, d;
xd->mb_index = 0;
pick_sb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
tp, &r, &d, BLOCK_SIZE_SB32X16,
&x->sb32x16_context[xd->sb_index][xd->mb_index]);
if (mb_row + y_idx + 1 < cm->mb_rows) {
int r2, d2;
update_state(cpi, &x->sb32x16_context[xd->sb_index][xd->mb_index],
BLOCK_SIZE_SB32X16, 0);
encode_superblock(cpi, tp,
0, mb_row + y_idx, mb_col + x_idx,
BLOCK_SIZE_SB32X16);
xd->mb_index = 1;
pick_sb_modes(cpi, mb_row + y_idx + 1, mb_col + x_idx,
tp, &r2, &d2, BLOCK_SIZE_SB32X16,
&x->sb32x16_context[xd->sb_index][xd->mb_index]);
r += r2;
d += d2;
}
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB32X32)]
[PARTITION_HORZ];
/* is this better than MB coding? */
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb32_rate, sb32_dist)) {
sb32_rate = r;
sb32_dist = d;
sb_partitioning[i] = BLOCK_SIZE_SB32X16;
}
vpx_memcpy(cm->left_context + y_idx, l2, sizeof(l2));
vpx_memcpy(cm->above_context + mb_col + x_idx, a2, sizeof(a2));
}
// check 16x32
if (mb_row + y_idx + 1 < cm->mb_rows) {
int r, d;
xd->mb_index = 0;
pick_sb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
tp, &r, &d, BLOCK_SIZE_SB16X32,
&x->sb16x32_context[xd->sb_index][xd->mb_index]);
if (mb_col + x_idx + 1 < cm->mb_cols) {
int r2, d2;
update_state(cpi, &x->sb16x32_context[xd->sb_index][xd->mb_index],
BLOCK_SIZE_SB16X32, 0);
encode_superblock(cpi, tp,
0, mb_row + y_idx, mb_col + x_idx,
BLOCK_SIZE_SB16X32);
xd->mb_index = 1;
pick_sb_modes(cpi, mb_row + y_idx, mb_col + x_idx + 1,
tp, &r2, &d2, BLOCK_SIZE_SB16X32,
&x->sb16x32_context[xd->sb_index][xd->mb_index]);
r += r2;
d += d2;
}
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB32X32)]
[PARTITION_VERT];
/* is this better than MB coding? */
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb32_rate, sb32_dist)) {
sb32_rate = r;
sb32_dist = d;
sb_partitioning[i] = BLOCK_SIZE_SB16X32;
}
vpx_memcpy(cm->left_context + y_idx, l2, sizeof(l2));
vpx_memcpy(cm->above_context + mb_col + x_idx, a2, sizeof(a2));
}
#endif
if (!sb32_skip && !(mb_col + x_idx + 1 >= cm->mb_cols ||
mb_row + y_idx + 1 >= cm->mb_rows)) {
int r, d;
/* Pick a mode assuming that it applies to all 4 of the MBs in the SB */
pick_sb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
tp, &r, &d, BLOCK_SIZE_SB32X32,
&x->sb32_context[xd->sb_index]);
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB32X32)]
[PARTITION_NONE];
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb32_rate, sb32_dist)) {
sb32_rate = r;
sb32_dist = d;
sb_partitioning[i] = BLOCK_SIZE_SB32X32;
}
// If we used 16x16 instead of 32x32 then skip 64x64 (if enabled).
if (cpi->sf.mb16_breakout && sb_partitioning[i] != BLOCK_SIZE_SB32X32) {
++sb64_skip;
sb64_rate += sb32_rate;
sb64_dist += sb32_dist;
/* Encode SB using best computed mode(s) */
// FIXME(rbultje): there really shouldn't be any need to encode_mb/sb
// for each level that we go up, we can just keep tokens and recon
// pixels of the lower level; also, inverting SB/MB order (big->small
// instead of small->big) means we can use as threshold for small, which
// may enable breakouts if RD is not good enough (i.e. faster)
encode_sb(cpi, mb_row + y_idx, mb_col + x_idx, 0, tp,
sb_partitioning[i]);
memcpy(cm->above_context + mb_col, &a, sizeof(a));
memcpy(cm->left_context, &l, sizeof(l));
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
sb64_rate += x->partition_cost[partition_plane(BLOCK_SIZE_SB64X64)]
[PARTITION_SPLIT];
#if CONFIG_SBSEGMENT
// check 64x32
if (mb_col + 3 < cm->mb_cols && !(cm->mb_rows & 1)) {
int r, d;
xd->sb_index = 0;
pick_sb_modes(cpi, mb_row, mb_col,
tp, &r, &d, BLOCK_SIZE_SB64X32,
&x->sb64x32_context[xd->sb_index]);
if (mb_row + 2 != cm->mb_rows) {
int r2, d2;
update_state(cpi, &x->sb64x32_context[xd->sb_index],
BLOCK_SIZE_SB64X32, 0);
encode_superblock(cpi, tp,
0, mb_row, mb_col, BLOCK_SIZE_SB64X32);
xd->sb_index = 1;
pick_sb_modes(cpi, mb_row + 2, mb_col,
tp, &r2, &d2, BLOCK_SIZE_SB64X32,
&x->sb64x32_context[xd->sb_index]);
r += r2;
d += d2;
}
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB64X64)]
[PARTITION_HORZ];
/* is this better than MB coding? */
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB64X32;
}
vpx_memcpy(cm->left_context, l, sizeof(l));
vpx_memcpy(cm->above_context + mb_col, a, sizeof(a));
}
// check 32x64
if (mb_row + 3 < cm->mb_rows && !(cm->mb_cols & 1)) {
int r, d;
xd->sb_index = 0;
pick_sb_modes(cpi, mb_row, mb_col,
tp, &r, &d, BLOCK_SIZE_SB32X64,
&x->sb32x64_context[xd->sb_index]);
if (mb_col + 2 != cm->mb_cols) {
int r2, d2;
update_state(cpi, &x->sb32x64_context[xd->sb_index],
BLOCK_SIZE_SB32X64, 0);
encode_superblock(cpi, tp,
0, mb_row, mb_col, BLOCK_SIZE_SB32X64);
xd->sb_index = 1;
pick_sb_modes(cpi, mb_row, mb_col + 2,
tp, &r2, &d2, BLOCK_SIZE_SB32X64,
&x->sb32x64_context[xd->sb_index]);
r += r2;
d += d2;
}
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB64X64)]
[PARTITION_VERT];
/* is this better than MB coding? */
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB32X64;
}
vpx_memcpy(cm->left_context, l, sizeof(l));
vpx_memcpy(cm->above_context + mb_col, a, sizeof(a));
}
#endif
if (!sb64_skip && !(mb_col + 3 >= cm->mb_cols ||
mb_row + 3 >= cm->mb_rows)) {
int r, d;
pick_sb_modes(cpi, mb_row, mb_col, tp, &r, &d,
BLOCK_SIZE_SB64X64, &x->sb64_context);
r += x->partition_cost[partition_plane(BLOCK_SIZE_SB64X64)]
[PARTITION_NONE];
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, sb64_rate, sb64_dist)) {
sb64_rate = r;
sb64_dist = d;
sb_partitioning[0] = BLOCK_SIZE_SB64X64;
}
encode_sb64(cpi, mb_row, mb_col, tp, sb_partitioning);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
x->act_zbin_adj = 0;
cpi->seg0_idx = 0;
vpx_memset(cpi->ref_pred_count, 0, sizeof(cpi->ref_pred_count));
xd->mode_info_stride = cm->mode_info_stride;
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
xd->pre = cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]];
vp9_setup_intra_recon(&cm->yv12_fb[cm->new_fb_idx]);
vp9_build_block_offsets(x);
vp9_setup_block_dptrs(&x->e_mbd);
vp9_setup_block_ptrs(x);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->count_mb_ref_frame_usage)
vp9_zero(cpi->bmode_count)
vp9_zero(cpi->ymode_count)
vp9_zero(cpi->i8x8_mode_count)
vp9_zero(cpi->y_uv_mode_count)
vp9_zero(cpi->sub_mv_ref_count)
vp9_zero(cpi->mbsplit_count)
vp9_zero(cpi->common.fc.mv_ref_ct)
vp9_zero(cpi->sb_ymode_count)
#if CONFIG_COMP_INTERINTRA_PRED
vp9_zero(cpi->interintra_count);
vp9_zero(cpi->interintra_select_count);
#endif
vpx_memset(cm->above_context, 0,
sizeof(ENTROPY_CONTEXT_PLANES) * cm->mb_cols);
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_iwalsh4x4_1;
cpi->mb.e_mbd.inv_txm4x4 = vp9_short_iwalsh4x4;
cpi->mb.optimize = 0;
cpi->common.filter_level = 0;
cpi->zbin_mode_boost_enabled = 0;
cpi->common.txfm_mode = ONLY_4X4;
} else {
cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
cpi->mb.e_mbd.inv_txm4x4_1 = vp9_short_idct4x4_1;
cpi->mb.e_mbd.inv_txm4x4 = vp9_short_idct4x4;
static void encode_frame_internal(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
// this frame which may be updated with each iteration of the recode loop.
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
vp9_zero(cpi->switchable_interp_count);
vp9_zero(cpi->best_switchable_interp_count);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts_4x4);
vp9_zero(cpi->coef_counts_8x8);
vp9_zero(cpi->coef_counts_16x16);
vp9_zero(cm->fc.eob_branch_counts);
#if CONFIG_CODE_NONZEROCOUNT
vp9_zero(cm->fc.nzc_counts_4x4);
vp9_zero(cm->fc.nzc_counts_8x8);
vp9_zero(cm->fc.nzc_counts_16x16);
vp9_zero(cm->fc.nzc_counts_32x32);
vp9_zero(cm->fc.nzc_pcat_counts);
#if CONFIG_NEW_MVREF
vp9_zero(cpi->mb_mv_ref_count);
#endif
cpi->mb.e_mbd.lossless = (cm->base_qindex == 0 &&
cm->y1dc_delta_q == 0 &&
cm->uvdc_delta_q == 0 &&
cm->uvac_delta_q == 0);
switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y1dc_delta_q);
vp9_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->single_pred_count, 0, sizeof(cpi->single_pred_count));
vpx_memset(cpi->comp_pred_count, 0, sizeof(cpi->comp_pred_count));
vpx_memset(cpi->txfm_count_32x32p, 0, sizeof(cpi->txfm_count_32x32p));
vpx_memset(cpi->txfm_count_16x16p, 0, sizeof(cpi->txfm_count_16x16p));
vpx_memset(cpi->txfm_count_8x8p, 0, sizeof(cpi->txfm_count_8x8p));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
// Take tiles into account and give start/end MB
int tile_col, tile_row;
for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_get_tile_col_offsets(cm, tile_col);
for (mb_row = cm->cur_tile_mb_row_start;
mb_row < cm->cur_tile_mb_row_end; mb_row += 4) {
encode_sb_row(cpi, mb_row, &tp, &totalrate);
}
cpi->tok_count[tile_col] = (unsigned int)(tp - tp_old);
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
if ((ref_flags & (VP9_LAST_FLAG | VP9_GOLD_FLAG)) == (VP9_LAST_FLAG | VP9_GOLD_FLAG) &&
vp9_check_segref(xd, 1, LAST_FRAME))
if ((ref_flags & (VP9_GOLD_FLAG | VP9_ALT_FLAG)) == (VP9_GOLD_FLAG | VP9_ALT_FLAG) &&
vp9_check_segref(xd, 1, GOLDEN_FRAME))
if ((ref_flags & (VP9_ALT_FLAG | VP9_LAST_FLAG)) == (VP9_ALT_FLAG | VP9_LAST_FLAG) &&
vp9_check_segref(xd, 1, ALTREF_FRAME))
return (!!(ref_flags & VP9_GOLD_FLAG) +
!!(ref_flags & VP9_LAST_FLAG) +
!!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi[y * mis + x].mbmi.mb_skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
TX_SIZE txfm_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
mi[y * mis + x].mbmi.txfm_size = txfm_size;
}
}
static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
int mis, TX_SIZE txfm_max,
int mb_rows_left, int mb_cols_left,
BLOCK_SIZE_TYPE bsize) {
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (mbmi->txfm_size > txfm_max) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int segment_id = mbmi->segment_id;
const int bh = 1 << mb_height_log2(bsize), bw = 1 << mb_width_log2(bsize);
const int ymbs = MIN(bh, mb_rows_left);
const int xmbs = MIN(bw, mb_cols_left);
assert(vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) ||
get_skip_flag(mi, mis, ymbs, xmbs));
set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
}
}
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON *const cm = &cpi->common;
int mb_row, mb_col;
const int mis = cm->mode_info_stride;
MODE_INFO *mi, *mi_ptr = cm->mi;
for (mb_row = 0; mb_row < cm->mb_rows; mb_row += 4, mi_ptr += 4 * mis) {
mi = mi_ptr;
for (mb_col = 0; mb_col < cm->mb_cols; mb_col += 4, mi += 4) {
if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
reset_skip_txfm_size_sb(cpi, mi, mis, txfm_max,
cm->mb_rows - mb_row, cm->mb_cols - mb_col,
BLOCK_SIZE_SB64X64);
#if CONFIG_SBSEGMENT
} else if (mi->mbmi.sb_type == BLOCK_SIZE_SB64X32) {
reset_skip_txfm_size_sb(cpi, mi, mis, txfm_max,
cm->mb_rows - mb_row, cm->mb_cols - mb_col,
BLOCK_SIZE_SB64X32);
if (mb_row + 2 != cm->mb_rows)
reset_skip_txfm_size_sb(cpi, mi + 2 * mis, mis, txfm_max,
cm->mb_rows - mb_row - 2,
cm->mb_cols - mb_col,
BLOCK_SIZE_SB64X32);
} else if (mi->mbmi.sb_type == BLOCK_SIZE_SB32X64) {
reset_skip_txfm_size_sb(cpi, mi, mis, txfm_max,
cm->mb_rows - mb_row, cm->mb_cols - mb_col,
BLOCK_SIZE_SB32X64);
if (mb_col + 2 != cm->mb_cols)
reset_skip_txfm_size_sb(cpi, mi + 2, mis, txfm_max,
cm->mb_rows - mb_row,
cm->mb_cols - mb_col - 2,
BLOCK_SIZE_SB32X64);
#endif
int i;
for (i = 0; i < 4; i++) {
const int x_idx_sb = (i & 1) << 1, y_idx_sb = i & 2;
MODE_INFO *sb_mi = mi + y_idx_sb * mis + x_idx_sb;
if (mb_row + y_idx_sb >= cm->mb_rows ||
mb_col + x_idx_sb >= cm->mb_cols)
continue;
if (sb_mi->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
reset_skip_txfm_size_sb(cpi, sb_mi, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb,
cm->mb_cols - mb_col - x_idx_sb,
BLOCK_SIZE_SB32X32);
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
#if CONFIG_SBSEGMENT
} else if (sb_mi->mbmi.sb_type == BLOCK_SIZE_SB32X16) {
reset_skip_txfm_size_sb(cpi, sb_mi, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb,
cm->mb_cols - mb_col - x_idx_sb,
BLOCK_SIZE_SB32X16);
if (mb_row + y_idx_sb + 1 != cm->mb_rows)
reset_skip_txfm_size_sb(cpi, sb_mi + mis, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb - 1,
cm->mb_cols - mb_col - x_idx_sb,
BLOCK_SIZE_SB32X16);
} else if (sb_mi->mbmi.sb_type == BLOCK_SIZE_SB16X32) {
reset_skip_txfm_size_sb(cpi, sb_mi, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb,
cm->mb_cols - mb_col - x_idx_sb,
BLOCK_SIZE_SB16X32);
if (mb_col + x_idx_sb + 1 != cm->mb_cols)
reset_skip_txfm_size_sb(cpi, sb_mi + 1, mis, txfm_max,
cm->mb_rows - mb_row - y_idx_sb,
cm->mb_cols - mb_col - x_idx_sb - 1,
BLOCK_SIZE_SB16X32);
#endif
int m;
for (m = 0; m < 4; m++) {
const int x_idx = x_idx_sb + (m & 1), y_idx = y_idx_sb + (m >> 1);
MODE_INFO *mb_mi;
if (mb_col + x_idx >= cm->mb_cols ||
mb_row + y_idx >= cm->mb_rows)
continue;
mb_mi = mi + y_idx * mis + x_idx;
assert(mb_mi->mbmi.sb_type == BLOCK_SIZE_MB16X16);
reset_skip_txfm_size_sb(cpi, mb_mi, mis, txfm_max,
cm->mb_rows - mb_row - y_idx,
cm->mb_cols - mb_col - x_idx,
BLOCK_SIZE_MB16X16);
}
}
}
}
void vp9_encode_frame(VP9_COMP *cpi) {
int i, frame_type, pred_type;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][2] &&
check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
pred_type = COMP_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
/* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
if (cpi->oxcf.lossless) {
txfm_type = ONLY_4X4;
/* FIXME (rbultje): this code is disabled until we support cost updates
* while a frame is being encoded; the problem is that each time we
* "revert" to 4x4 only (or even 8x8 only), the coefficient probabilities
* for 16x16 (and 8x8) start lagging behind, thus leading to them lagging
* further behind and not being chosen for subsequent frames either. This
* is essentially a local minimum problem that we can probably fix by
* estimating real costs more closely within a frame, perhaps by re-
* calculating costs on-the-fly as frame encoding progresses. */
if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = TX_MODE_SELECT;
} else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
&& cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
) {
txfm_type = ONLY_4X4;
} else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = ALLOW_16X16;
} else
txfm_type = ALLOW_8X8;
#else
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >=
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
#endif
cpi->common.txfm_mode = txfm_type;
if (txfm_type != TX_MODE_SELECT) {
cpi->common.prob_tx[0] = 128;
cpi->common.prob_tx[1] = 128;
}
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
const int diff = (int)(cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
for (i = 0; i < NB_TXFM_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
2048 * (TX_SIZE_MAX_SB - 1), 0);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_PRED_CONTEXTS; i++) {
single_count_zero += cpi->single_pred_count[i];
comp_count_zero += cpi->comp_pred_count[i];
}
if (comp_count_zero == 0) {
cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
} else if (single_count_zero == 0) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
}
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cpi->txfm_count_16x16p[TX_4X4] +
cpi->txfm_count_32x32p[TX_4X4] +
cpi->txfm_count_8x8p[TX_4X4];
const int count8x8_lp = cpi->txfm_count_32x32p[TX_8X8] +
cpi->txfm_count_16x16p[TX_8X8];
const int count8x8_8x8p = cpi->txfm_count_8x8p[TX_8X8];
const int count16x16_16x16p = cpi->txfm_count_16x16p[TX_16X16];
const int count16x16_lp = cpi->txfm_count_32x32p[TX_16X16];
const int count32x32 = cpi->txfm_count_32x32p[TX_32X32];
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_4X4);
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_32X32;
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
reset_skip_txfm_size(cpi, TX_16X16);
// Update interpolation filter strategy for next frame.
if ((cpi->common.frame_type != KEY_FRAME) && (cpi->sf.search_best_filter))
void vp9_setup_block_ptrs(MACROBLOCK *x) {
x->block[r * 4 + c].src_diff = x->src_diff + r * 4 * 16 + c * 4;
}
x->block[16 + r * 2 + c].src_diff = x->src_diff + 256 + r * 4 * 8 + c * 4;
}
x->block[20 + r * 2 + c].src_diff = x->src_diff + 320 + r * 4 * 8 + c * 4;
}
void vp9_build_block_offsets(MACROBLOCK *x) {
vp9_build_block_doffsets(&x->e_mbd);
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->src.y_buffer;
this_block->src_stride = x->src.y_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
// u blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.u_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
// v blocks
for (br = 0; br < 2; br++) {
for (bc = 0; bc < 2; bc++) {
BLOCK *this_block = &x->block[block];
this_block->base_src = &x->src.v_buffer;
this_block->src_stride = x->src.uv_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++ (is_key ? uv_modes : inter_uv_modes)[uvm];
++ uv_modes_y[m][uvm];
unsigned int *const bct = is_key ? b_modes : inter_b_modes;
do {
++ bct[xd->block[b].bmi.as_mode.first];
} while (++b < 16);
}
if (m == I8X8_PRED) {
i8x8_modes[xd->block[0].bmi.as_mode.first]++;
i8x8_modes[xd->block[2].bmi.as_mode.first]++;
i8x8_modes[xd->block[8].bmi.as_mode.first]++;
i8x8_modes[xd->block[10].bmi.as_mode.first]++;
}
if (m != I8X8_PRED)
++cpi->y_uv_mode_count[m][uvm];
else {
cpi->i8x8_mode_count[xd->block[0].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[2].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[8].bmi.as_mode.first]++;
cpi->i8x8_mode_count[xd->block[10].bmi.as_mode.first]++;
}
int m = xd->block[b].bmi.as_mode.first;
#if CONFIG_NEWBINTRAMODES
if (m == B_CONTEXT_PRED) m -= CONTEXT_PRED_REPLACEMENTS;
#endif
++cpi->bmode_count[m];
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
else
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
#if CONFIG_CODE_NONZEROCOUNT
static void gather_nzcs_mb16(VP9_COMMON *const cm,
MACROBLOCKD *xd) {
int i;
vpx_memset(xd->mode_info_context->mbmi.nzcs, 0,
384 * sizeof(xd->mode_info_context->mbmi.nzcs[0]));
switch (xd->mode_info_context->mbmi.txfm_size) {
case TX_4X4:
for (i = 0; i < 24; ++i) {
xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];
}
break;
case TX_8X8:
for (i = 0; i < 16; i += 4) {
xd->mode_info_context->mbmi.nzcs[i] = xd->nzcs[i];