Newer
Older
copy_partitioning(cm, mi_8x8, prev_mi_8x8);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
// If required set upper and lower partition size limits
if (cpi->sf.auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile, mi_row, mi_col,
&cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL);
setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0);
vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mi_8x8[0]->mbmi.mode = DC_PRED;
xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED;
vp9_zero(cm->counts.y_mode);
vp9_zero(cm->counts.uv_mode);
vp9_zero(cm->counts.inter_mode);
vp9_zero(cm->counts.partition);
vp9_zero(cm->counts.intra_inter);
vp9_zero(cm->counts.comp_inter);
vp9_zero(cm->counts.single_ref);
vp9_zero(cm->counts.comp_ref);
vp9_zero(cm->counts.tx);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(cpi->above_context[0], 0,
sizeof(*cpi->above_context[0]) *
2 * aligned_mi_cols * MAX_MB_PLANE);
vpx_memset(cpi->above_seg_context, 0,
sizeof(*cpi->above_seg_context) * aligned_mi_cols);
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm4x4 = vp9_fwht4x4;
cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
cpi->common.lf.filter_level = 0;
cpi->common.tx_mode = ONLY_4X4;
cpi->mb.fwd_txm4x4 = vp9_fdct4x4;
cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
static void switch_tx_mode(VP9_COMP *cpi) {
if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
cpi->common.tx_mode >= ALLOW_32X32)
cpi->common.tx_mode = ALLOW_32X32;
static int check_dual_ref_flags(VP9_COMP *cpi) {
const int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
int x, y;
for (y = 0; y < ymbs; y++) {
mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
static void reset_skip_txfm_size_b(VP9_COMMON *cm, MODE_INFO **mi_8x8,
int mis, TX_SIZE max_tx_size, int bw, int bh,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
return;
} else {
MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
if (mbmi->tx_size > max_tx_size) {
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
get_skip_flag(mi_8x8, mis, ymbs, xmbs));
set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
}
static void reset_skip_txfm_size_sb(VP9_COMMON *cm, MODE_INFO **mi_8x8,
TX_SIZE max_tx_size, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int mis = cm->mode_info_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
} else if (bw == bs && bh < bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
reset_skip_txfm_size_b(cm, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
mi_row + hbs, mi_col, bsize);
} else if (bw < bs && bh == bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
reset_skip_txfm_size_b(cm, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
mi_col + hbs, bsize);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
assert(bw < bs && bh < bs);
for (n = 0; n < 4; n++) {
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
reset_skip_txfm_size_sb(cm, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
}
static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) {
MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
mi_8x8 = mi_ptr;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col,
}
}
static MV_REFERENCE_FRAME get_frame_type(VP9_COMP *cpi) {
if (frame_is_intra_only(&cpi->common))
else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
static void select_tx_mode(VP9_COMP *cpi) {
cpi->common.tx_mode = ONLY_4X4;
} else if (cpi->common.current_video_frame == 0) {
cpi->common.tx_mode = TX_MODE_SELECT;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
cpi->common.tx_mode = ALLOW_32X32;
} else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
cpi->common.tx_mode =
cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
> cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
} else {
unsigned int total = 0;
int i;
double fraction = (double)cpi->tx_stepdown_count[0] / total;
cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
// printf("fraction = %f\n", fraction);
} // else keep unchanged
}
}
}
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
// Start RTC Exploration
typedef enum {
BOTH_ZERO = 0,
ZERO_PLUS_PREDICTED = 1,
BOTH_PREDICTED = 2,
NEW_PLUS_NON_INTRA = 3,
BOTH_NEW = 4,
INTRA_PLUS_NON_INTRA = 5,
BOTH_INTRA = 6,
INVALID_CASE = 9
} motion_vector_context;
static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize,
MB_PREDICTION_MODE mode, int mi_row, int mi_col) {
mbmi->interp_filter = EIGHTTAP;
mbmi->mode = mode;
mbmi->mv[0].as_int = 0;
mbmi->mv[1].as_int = 0;
if (mode < NEARESTMV) {
mbmi->ref_frame[0] = INTRA_FRAME;
} else {
mbmi->ref_frame[0] = LAST_FRAME;
}
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->tx_size = max_txsize_lookup[bsize];
mbmi->uv_mode = mode;
mbmi->skip_coeff = 0;
mbmi->sb_type = bsize;
mbmi->segment_id = 0;
}
static INLINE int get_block_row(int b32i, int b16i, int b8i) {
return ((b32i >> 1) << 2) + ((b16i >> 1) << 1) + (b8i >> 1);
}
static INLINE int get_block_col(int b32i, int b16i, int b8i) {
return ((b32i & 1) << 2) + ((b16i & 1) << 1) + (b8i & 1);
}
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
static void rtc_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int mi_width = num_8x8_blocks_wide_lookup[cpi->sf.always_this_block_size];
int mi_height = num_8x8_blocks_high_lookup[cpi->sf.always_this_block_size];
int i, j;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
MB_PREDICTION_MODE mode = DC_PRED;
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
int b32i;
for (b32i = 0; b32i < 4; b32i++) {
int b16i;
for (b16i = 0; b16i < 4; b16i++) {
int b8i;
int block_row = get_block_row(b32i, b16i, 0);
int block_col = get_block_col(b32i, b16i, 0);
int index = block_row * mis + block_col;
int rate;
int64_t dist;
// Find a partition size that fits
bsize = find_partition_size(cpi->sf.always_this_block_size,
(row8x8_remaining - block_row),
(col8x8_remaining - block_col),
&mi_height, &mi_width);
mi_8x8[index] = mi_8x8[0] + index;
set_mi_row_col(xd, tile, mi_row + block_row, mi_height,
mi_col + block_col, mi_width, cm->mi_rows, cm->mi_cols);
xd->mi_8x8 = mi_8x8 + index;
if (cm->frame_type != KEY_FRAME) {
set_offsets(cpi, tile, mi_row + block_row, mi_col + block_col, bsize);
vp9_pick_inter_mode(cpi, x, tile,
mi_row + block_row, mi_col + block_col,
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
} else {
set_mode_info(&mi_8x8[index]->mbmi, bsize, mode,
mi_row + block_row, mi_col + block_col);
}
for (j = 0; j < mi_height; j++)
for (i = 0; i < mi_width; i++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j) {
mi_8x8[index+ i + j * mis] = mi_8x8[index];
}
for (b8i = 0; b8i < 4; b8i++) {
}
}
}
encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64);
*rate = chosen_rate;
*dist = chosen_dist;
}
static void encode_rtc_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, TOKENEXTRA **tp) {
VP9_COMMON * const cm = &cpi->common;
int mi_col;
// Initialize the left context for the new SB row
vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context));
vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context));
// Code each SB in the row
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
int dummy_rate;
int64_t dummy_dist;
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col);
rtc_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->tx_stepdown_count);
xd->mi_8x8 = cm->mi_grid_visible;
// required for vp9_frame_init_quantizer
xd->mi_8x8[0] = cm->mi;
xd->last_mi = cm->prev_mi;
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
vp9_zero(cpi->coef_counts);
vp9_zero(cm->counts.eob_branch);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, cm->base_qindex);
switch_tx_mode(cpi);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
build_activity_map(cpi);
}
// Re-initialize encode frame context.
init_encode_frame_mb_context(cpi);
vp9_zero(cpi->rd_comp_pred_diff);
vp9_zero(cpi->rd_filter_diff);
vp9_zero(cpi->rd_tx_select_diff);
vp9_zero(cpi->rd_tx_select_threshes);
set_prev_mi(cm);
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
{
// Take tiles into account and give start/end MB
int tile_col, tile_row;
TOKENEXTRA *tp = cpi->tok;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TileInfo tile;
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_tile_init(&tile, cm, tile_row, tile_col);
for (mi_row = tile.mi_row_start;
mi_row < tile.mi_row_end; mi_row += 8) {
encode_rtc_sb_row(cpi, &tile, mi_row, &tp);
else
encode_sb_row(cpi, &tile, mi_row, &tp);
}
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
}
}
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
if (cpi->sf.skip_encode_sb) {
int j;
unsigned int intra_count = 0, inter_count = 0;
for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
intra_count += cm->counts.intra_inter[j][0];
inter_count += cm->counts.intra_inter[j][1];
}
cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count);
cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME);
cpi->sf.skip_encode_frame &= cm->show_frame;
} else {
cpi->sf.skip_encode_frame = 0;
}
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
void vp9_encode_frame(VP9_COMP *cpi) {
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
// different sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
// side behavior is where the ALT ref buffer has opposite sign bias to
if (!frame_is_intra_only(cm)) {
if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
(cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[LAST_FRAME])) {
cm->allow_comp_inter_inter = 0;
} else {
cm->allow_comp_inter_inter = 1;
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
}
if (cpi->sf.frame_parameter_update) {
int i;
REFERENCE_MODE reference_mode;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type];
const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type];
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3 || !cm->allow_comp_inter_inter)
reference_mode = SINGLE_REFERENCE;
else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] &&
mode_thresh[COMPOUND_REFERENCE] >
mode_thresh[REFERENCE_MODE_SELECT] &&
check_dual_ref_flags(cpi) &&
cpi->static_mb_pct == 100)
reference_mode = COMPOUND_REFERENCE;
else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT])
reference_mode = SINGLE_REFERENCE;
if (cm->interp_filter == SWITCHABLE) {
if (frame_type != ALTREF_FRAME &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP_SMOOTH;
} else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] &&
filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP_SHARP;
} else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP;
}
cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
/* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */
select_tx_mode(cpi);
encode_frame_internal(cpi);
for (i = 0; i < REFERENCE_MODES; ++i) {
const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs);
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs;
cpi->rd_filter_threshes[frame_type][i] =
(cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
}
for (i = 0; i < TX_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
single_count_zero += cm->counts.comp_inter[i][0];
comp_count_zero += cm->counts.comp_inter[i][1];
int count4x4 = 0;
int count8x8_lp = 0, count8x8_8x8p = 0;
int count16x16_16x16p = 0, count16x16_lp = 0;
int count32x32 = 0;
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
cm->tx_mode = ALLOW_8X8;
reset_skip_txfm_size(cm, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
cm->tx_mode = ONLY_4X4;
reset_skip_txfm_size(cm, TX_4X4);
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cm->tx_mode = ALLOW_16X16;
reset_skip_txfm_size(cm, TX_16X16);
// Force the usage of the BILINEAR interp_filter.
cm->interp_filter = BILINEAR;
encode_frame_internal(cpi);
static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
const MB_PREDICTION_MODE y_mode = mi->mbmi.mode;
const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide)
++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
++counts->y_mode[size_group_lookup[bsize]][y_mode];
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1;
x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b);
static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
if (enabled) {
if (is_inter_block(mbmi)) {
if (mbmi->mode == ZEROMV) {
return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST
: LF_ZEROMV_ZBIN_BOOST;
} else {
return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST
: MV_ZBIN_BOOST;
}
} else {
return INTRA_ZBIN_BOOST;
}
} else {
return 0;
}
}
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi_8x8;
MODE_INFO *mi = mi_8x8[0];
PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
unsigned int segment_id = mbmi->segment_id;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 &&
x->skip_optimize = ctx->is_coded;
ctx->is_coded = 1;
x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
x->q_index < QIDX_SKIP_THRESH);
if (x->skip_encode)
return;
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
cpi->zbin_mode_boost_enabled);
for (plane = 0; plane < MAX_MB_PLANE; ++plane)
vp9_encode_intra_block_plane(x, MAX(bsize, BLOCK_8X8), plane);
int ref;
const int is_compound = has_second_ref(mbmi);
for (ref = 0; ref < 1 + is_compound; ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
mbmi->ref_frame[ref]);
setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
cm->counts.skip[vp9_get_skip_context(xd)][1]++;
reset_skip_context(xd, MAX(bsize, BLOCK_8X8));
if (cm->tx_mode == TX_MODE_SELECT &&
mbmi->sb_type >= BLOCK_8X8 &&
vp9_segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)))) {
++get_tx_counts(max_txsize_lookup[bsize], vp9_get_tx_size_context(xd),
&cm->counts.tx)[mbmi->tx_size];
TX_SIZE tx_size;
// The new intra coding scheme requires no change of transform size
tx_size = MIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
max_txsize_lookup[bsize]);
tx_size = (bsize >= BLOCK_8X8) ? mbmi->tx_size : TX_4X4;
for (y = 0; y < mi_height; y++)
for (x = 0; x < mi_width; x++)
if (mi_col + x < cm->mi_cols && mi_row + y < cm->mi_rows)
mi_8x8[mis * y + x]->mbmi.tx_size = tx_size;