Newer
Older
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize);
}
if (bsize == BLOCK_64X64) {
assert(best_rate < INT_MAX);
assert(best_dist < INT_MAX);
// Examines 64x64 block and chooses a best reference frame
static void rd_pick_reference_frame(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int pl;
int r;
int64_t d;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
// Default is non mask (all reference frames allowed.
cpi->ref_frame_mask = 0;
// Do RD search for 64x64.
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
cpi->set_ref_frame_mask = 1;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &r, &d, BLOCK_64X64,
get_block_context(x, BLOCK_64X64), INT64_MAX);
pl = partition_plane_context(cpi->above_seg_context, cpi->left_seg_context,
mi_row, mi_col, BLOCK_64X64);
r += x->partition_cost[pl][PARTITION_NONE];
*(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64;
cpi->set_ref_frame_mask = 0;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64);
static void encode_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
// Initialize the left context for the new SB row
vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context));
vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context));
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
int dummy_rate;
int64_t dummy_dist;
BLOCK_SIZE i;
MACROBLOCK *x = &cpi->mb;
for (i = BLOCK_4X4; i < BLOCK_8X8; ++i) {
const int num_4x4_w = num_4x4_blocks_wide_lookup[i];
const int num_4x4_h = num_4x4_blocks_high_lookup[i];
const int num_4x4_blk = MAX(4, num_4x4_w * num_4x4_h);
for (x->sb_index = 0; x->sb_index < 4; ++x->sb_index)
for (x->mb_index = 0; x->mb_index < 4; ++x->mb_index)
for (x->b_index = 0; x->b_index < 16 / num_4x4_blk; ++x->b_index)
get_block_context(x, i)->pred_interp_filter = SWITCHABLE;
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
MODE_INFO **prev_mi_8x8 = cm->prev_mi_grid_visible + idx_str;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
% cpi->sf.last_partitioning_redo_frequency) == 0
|| cm->prev_mi == 0
|| cm->show_frame == 0
|| cm->frame_type == KEY_FRAME
|| cpi->rc.is_src_frame_alt_ref
|| ((cpi->sf.use_lastframe_partitioning ==
LAST_FRAME_PARTITION_LOW_MOTION) &&
// If required set upper and lower partition size limits
if (cpi->sf.auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile, mi_row, mi_col,
&cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
copy_partitioning(cm, mi_8x8, prev_mi_8x8);
rd_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
// If required set upper and lower partition size limits
if (cpi->sf.auto_min_max_partition_size) {
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
rd_auto_partition_range(cpi, tile, mi_row, mi_col,
&cpi->sf.min_partition_size,
&cpi->sf.max_partition_size);
}
rd_pick_partition(cpi, tile, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, 0, get_ref_frame_buffer(cpi, LAST_FRAME), 0, 0, NULL);
setup_dst_planes(xd, get_frame_new_buffer(cm), 0, 0);
vp9_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mi_8x8[0]->mbmi.mode = DC_PRED;
xd->mi_8x8[0]->mbmi.uv_mode = DC_PRED;
vp9_zero(cm->counts.y_mode);
vp9_zero(cm->counts.uv_mode);
vp9_zero(cm->counts.inter_mode);
vp9_zero(cm->counts.partition);
vp9_zero(cm->counts.intra_inter);
vp9_zero(cm->counts.comp_inter);
vp9_zero(cm->counts.single_ref);
vp9_zero(cm->counts.comp_ref);
vp9_zero(cm->counts.tx);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(cpi->above_context[0], 0,
sizeof(*cpi->above_context[0]) *
2 * aligned_mi_cols * MAX_MB_PLANE);
vpx_memset(cpi->above_seg_context, 0,
sizeof(*cpi->above_seg_context) * aligned_mi_cols);
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm4x4 = vp9_fwht4x4;
cpi->mb.e_mbd.itxm_add = vp9_iwht4x4_add;
cpi->common.lf.filter_level = 0;
cpi->common.tx_mode = ONLY_4X4;
cpi->mb.fwd_txm4x4 = vp9_fdct4x4;
cpi->mb.e_mbd.itxm_add = vp9_idct4x4_add;
static void switch_tx_mode(VP9_COMP *cpi) {
if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
cpi->common.tx_mode >= ALLOW_32X32)
cpi->common.tx_mode = ALLOW_32X32;
static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
vp9_zero(cm->counts.switchable_interp);
xd->mi_8x8 = cm->mi_grid_visible;
// required for vp9_frame_init_quantizer
xd->mi_8x8[0] = cm->mi;
xd->last_mi = cm->prev_mi;
vp9_zero(cpi->coef_counts);
vp9_zero(cm->counts.eob_branch);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, cm->base_qindex);
switch_tx_mode(cpi);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// Re-initialize encode frame context.
vp9_zero(cpi->rd_comp_pred_diff);
vp9_zero(cpi->rd_filter_diff);
vp9_zero(cpi->rd_tx_select_diff);
vp9_zero(cpi->rd_tx_select_threshes);
set_prev_mi(cm);
// Take tiles into account and give start/end MB
int tile_col, tile_row;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_tile_init(&tile, cm, tile_row, tile_col);
for (mi_row = tile.mi_row_start;
mi_row < tile.mi_row_end; mi_row += 8)
cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
if (cpi->sf.skip_encode_sb) {
int j;
unsigned int intra_count = 0, inter_count = 0;
for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
intra_count += cm->counts.intra_inter[j][0];
inter_count += cm->counts.intra_inter[j][1];
}
cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count);
cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME);
cpi->sf.skip_encode_frame &= cm->show_frame;
} else {
cpi->sf.skip_encode_frame = 0;
}
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
const int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi_8x8[y * mis + x]->mbmi.skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO **mi_8x8, int mis, int ymbs, int xmbs,
int x, y;
for (y = 0; y < ymbs; y++) {
mi_8x8[y * mis + x]->mbmi.tx_size = tx_size;
static void reset_skip_txfm_size_b(VP9_COMMON *cm, MODE_INFO **mi_8x8,
int mis, TX_SIZE max_tx_size, int bw, int bh,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) {
return;
} else {
MB_MODE_INFO * const mbmi = &mi_8x8[0]->mbmi;
if (mbmi->tx_size > max_tx_size) {
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
assert(vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP) ||
get_skip_flag(mi_8x8, mis, ymbs, xmbs));
set_txfm_flag(mi_8x8, mis, ymbs, xmbs, max_tx_size);
}
static void reset_skip_txfm_size_sb(VP9_COMMON *cm, MODE_INFO **mi_8x8,
TX_SIZE max_tx_size, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int mis = cm->mode_info_stride;
int bw, bh;
const int bs = num_8x8_blocks_wide_lookup[bsize], hbs = bs / 2;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bw = num_8x8_blocks_wide_lookup[mi_8x8[0]->mbmi.sb_type];
bh = num_8x8_blocks_high_lookup[mi_8x8[0]->mbmi.sb_type];
if (bw == bs && bh == bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, bs, bs, mi_row,
} else if (bw == bs && bh < bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, bs, hbs, mi_row,
reset_skip_txfm_size_b(cm, mi_8x8 + hbs * mis, mis, max_tx_size, bs, hbs,
mi_row + hbs, mi_col, bsize);
} else if (bw < bs && bh == bs) {
reset_skip_txfm_size_b(cm, mi_8x8, mis, max_tx_size, hbs, bs, mi_row,
reset_skip_txfm_size_b(cm, mi_8x8 + hbs, mis, max_tx_size, hbs, bs, mi_row,
mi_col + hbs, bsize);
} else {
const BLOCK_SIZE subsize = subsize_lookup[PARTITION_SPLIT][bsize];
int n;
assert(bw < bs && bh < bs);
for (n = 0; n < 4; n++) {
const int mi_dc = hbs * (n & 1);
const int mi_dr = hbs * (n >> 1);
reset_skip_txfm_size_sb(cm, &mi_8x8[mi_dr * mis + mi_dc], max_tx_size,
mi_row + mi_dr, mi_col + mi_dc, subsize);
}
}
}
static void reset_skip_txfm_size(VP9_COMMON *cm, TX_SIZE txfm_max) {
MODE_INFO **mi_8x8, **mi_ptr = cm->mi_grid_visible;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
mi_8x8 = mi_ptr;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi_8x8 += 8) {
reset_skip_txfm_size_sb(cm, mi_8x8, txfm_max, mi_row, mi_col,
}
}
static MV_REFERENCE_FRAME get_frame_type(VP9_COMP *cpi) {
if (frame_is_intra_only(&cpi->common))
else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
static void select_tx_mode(VP9_COMP *cpi) {
cpi->common.tx_mode = ONLY_4X4;
} else if (cpi->common.current_video_frame == 0) {
cpi->common.tx_mode = TX_MODE_SELECT;
if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
cpi->common.tx_mode = ALLOW_32X32;
} else if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
cpi->common.tx_mode =
cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
> cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
} else {
unsigned int total = 0;
int i;
double fraction = (double)cpi->tx_stepdown_count[0] / total;
cpi->common.tx_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
// printf("fraction = %f\n", fraction);
} // else keep unchanged
}
}
}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
// Start RTC Exploration
typedef enum {
BOTH_ZERO = 0,
ZERO_PLUS_PREDICTED = 1,
BOTH_PREDICTED = 2,
NEW_PLUS_NON_INTRA = 3,
BOTH_NEW = 4,
INTRA_PLUS_NON_INTRA = 5,
BOTH_INTRA = 6,
INVALID_CASE = 9
} motion_vector_context;
static void set_mode_info(MB_MODE_INFO *mbmi, BLOCK_SIZE bsize,
MB_PREDICTION_MODE mode, int mi_row, int mi_col) {
mbmi->interp_filter = EIGHTTAP;
mbmi->mode = mode;
mbmi->mv[0].as_int = 0;
mbmi->mv[1].as_int = 0;
if (mode < NEARESTMV) {
mbmi->ref_frame[0] = INTRA_FRAME;
} else {
mbmi->ref_frame[0] = LAST_FRAME;
}
mbmi->ref_frame[1] = INTRA_FRAME;
mbmi->tx_size = max_txsize_lookup[bsize];
mbmi->uv_mode = mode;
mbmi->skip_coeff = 0;
mbmi->sb_type = bsize;
mbmi->segment_id = 0;
}
static INLINE int get_block_row(int b32i, int b16i, int b8i) {
return ((b32i >> 1) << 2) + ((b16i >> 1) << 1) + (b8i >> 1);
}
static INLINE int get_block_col(int b32i, int b16i, int b8i) {
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
return ((b32i & 1) << 2) + ((b16i & 1) << 1) + (b8i & 1);
}
static void rtc_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int mi_width = num_8x8_blocks_wide_lookup[cpi->sf.always_this_block_size];
int mi_height = num_8x8_blocks_high_lookup[cpi->sf.always_this_block_size];
int i, j;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
MB_PREDICTION_MODE mode = DC_PRED;
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
int b32i;
x->fast_ms = 0;
x->subblock_ref = 0;
for (b32i = 0; b32i < 4; b32i++) {
int b16i;
for (b16i = 0; b16i < 4; b16i++) {
int b8i;
int block_row = get_block_row(b32i, b16i, 0);
int block_col = get_block_col(b32i, b16i, 0);
int index = block_row * mis + block_col;
int rate;
int64_t dist;
int_mv frame_nearest_mv[MAX_REF_FRAMES];
int_mv frame_near_mv[MAX_REF_FRAMES];
struct buf_2d yv12_mb[MAX_REF_FRAMES][MAX_MB_PLANE];
// Find a partition size that fits
bsize = find_partition_size(cpi->sf.always_this_block_size,
(row8x8_remaining - block_row),
(col8x8_remaining - block_col),
&mi_height, &mi_width);
mi_8x8[index] = mi_8x8[0] + index;
set_mi_row_col(xd, tile, mi_row + block_row, mi_height,
mi_col + block_col, mi_width, cm->mi_rows, cm->mi_cols);
xd->mi_8x8 = mi_8x8 + index;
if (cm->frame_type != KEY_FRAME) {
set_offsets(cpi, tile, mi_row + block_row, mi_col + block_col, bsize);
vp9_pick_inter_mode(cpi, x, tile,
mi_row + block_row, mi_col + block_col,
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
} else {
set_mode_info(&mi_8x8[index]->mbmi, bsize, mode,
mi_row + block_row, mi_col + block_col);
vp9_setup_buffer_inter(cpi, x, tile,
LAST_FRAME, cpi->sf.always_this_block_size,
mi_row + block_row, mi_col + block_col,
frame_nearest_mv, frame_near_mv, yv12_mb);
}
for (j = 0; j < mi_height; j++)
for (i = 0; i < mi_width; i++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > i
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > j) {
mi_8x8[index+ i + j * mis] = mi_8x8[index];
}
for (b8i = 0; b8i < 4; b8i++) {
}
}
}
encode_sb_rt(cpi, tile, tp, mi_row, mi_col, 1, BLOCK_64X64);
*rate = chosen_rate;
*dist = chosen_dist;
}
static void encode_rtc_sb_row(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, TOKENEXTRA **tp) {
VP9_COMMON * const cm = &cpi->common;
int mi_col;
// Initialize the left context for the new SB row
vpx_memset(&cpi->left_context, 0, sizeof(cpi->left_context));
vpx_memset(cpi->left_seg_context, 0, sizeof(cpi->left_seg_context));
// Code each SB in the row
for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
mi_col += MI_BLOCK_SIZE) {
int dummy_rate;
int64_t dummy_dist;
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO **mi_8x8 = cm->mi_grid_visible + idx_str;
cpi->mb.source_variance = UINT_MAX;
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
set_partitioning(cpi, tile, mi_8x8, mi_row, mi_col);
rtc_use_partition(cpi, tile, mi_8x8, tp, mi_row, mi_col, BLOCK_64X64,
&dummy_rate, &dummy_dist, 1);
}
}
static void encode_rtc_frame_internal(VP9_COMP *cpi) {
int mi_row;
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
// debug output
#if DBG_PRNT_SEGMAP
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
#endif
vp9_zero(cm->counts.switchable_interp);
vp9_zero(cpi->tx_stepdown_count);
xd->mi_8x8 = cm->mi_grid_visible;
// required for vp9_frame_init_quantizer
xd->mi_8x8[0] = cm->mi;
xd->last_mi = cm->prev_mi;
vp9_zero(cpi->common.counts.mv);
vp9_zero(cpi->coef_counts);
vp9_zero(cm->counts.eob_branch);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
switch_lossless_mode(cpi, cpi->mb.e_mbd.lossless);
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi);
vp9_initialize_me_consts(cpi, cm->base_qindex);
switch_tx_mode(cpi);
cpi->sf.always_this_block_size = BLOCK_16X16;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
init_encode_frame_mb_context(cpi);
// Build a frame level activity map
build_activity_map(cpi);
}
// Re-initialize encode frame context.
init_encode_frame_mb_context(cpi);
vp9_zero(cpi->rd_comp_pred_diff);
vp9_zero(cpi->rd_filter_diff);
vp9_zero(cpi->rd_tx_select_diff);
vp9_zero(cpi->rd_tx_select_threshes);
set_prev_mi(cm);
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
{
// Take tiles into account and give start/end MB
int tile_col, tile_row;
TOKENEXTRA *tp = cpi->tok;
const int tile_cols = 1 << cm->log2_tile_cols;
const int tile_rows = 1 << cm->log2_tile_rows;
for (tile_row = 0; tile_row < tile_rows; tile_row++) {
for (tile_col = 0; tile_col < tile_cols; tile_col++) {
TileInfo tile;
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_tile_init(&tile, cm, tile_row, tile_col);
for (mi_row = tile.mi_row_start;
mi_row < tile.mi_row_end; mi_row += 8)
encode_rtc_sb_row(cpi, &tile, mi_row, &tp);
cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <= get_token_alloc(cm->mb_rows, cm->mb_cols));
}
}
}
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_sb_row += vpx_usec_timer_elapsed(&emr_timer);
}
if (cpi->sf.skip_encode_sb) {
int j;
unsigned int intra_count = 0, inter_count = 0;
for (j = 0; j < INTRA_INTER_CONTEXTS; ++j) {
intra_count += cm->counts.intra_inter[j][0];
inter_count += cm->counts.intra_inter[j][1];
}
cpi->sf.skip_encode_frame = ((intra_count << 2) < inter_count);
cpi->sf.skip_encode_frame &= (cm->frame_type != KEY_FRAME);
cpi->sf.skip_encode_frame &= cm->show_frame;
} else {
cpi->sf.skip_encode_frame = 0;
}
#if 0
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
#endif
}
// end RTC play code
void vp9_encode_frame(VP9_COMP *cpi) {
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
// different sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
// side behavior is where the ALT ref buffer has opposite sign bias to
if (!frame_is_intra_only(cm)) {
if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
(cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[LAST_FRAME])) {
cm->allow_comp_inter_inter = 0;
} else {
cm->allow_comp_inter_inter = 1;
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
}
int i;
REFERENCE_MODE reference_mode;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
const MV_REFERENCE_FRAME frame_type = get_frame_type(cpi);
const int64_t *mode_thresh = cpi->rd_prediction_type_threshes[frame_type];
const int64_t *filter_thresh = cpi->rd_filter_threshes[frame_type];
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3 || !cm->allow_comp_inter_inter)
reference_mode = SINGLE_REFERENCE;
else if (mode_thresh[COMPOUND_REFERENCE] > mode_thresh[SINGLE_REFERENCE] &&
mode_thresh[COMPOUND_REFERENCE] >
mode_thresh[REFERENCE_MODE_SELECT] &&
check_dual_ref_flags(cpi) &&
cpi->static_mb_pct == 100)
reference_mode = COMPOUND_REFERENCE;
else if (mode_thresh[SINGLE_REFERENCE] > mode_thresh[REFERENCE_MODE_SELECT])
reference_mode = SINGLE_REFERENCE;
if (cm->interp_filter == SWITCHABLE) {
if (frame_type != ALTREF_FRAME &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP] &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[EIGHTTAP_SHARP] &&
filter_thresh[EIGHTTAP_SMOOTH] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP_SMOOTH;
} else if (filter_thresh[EIGHTTAP_SHARP] > filter_thresh[EIGHTTAP] &&
filter_thresh[EIGHTTAP_SHARP] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP_SHARP;
} else if (filter_thresh[EIGHTTAP] > filter_thresh[SWITCHABLE - 1]) {
cm->interp_filter = EIGHTTAP;
}
cpi->mb.e_mbd.lossless = cpi->oxcf.lossless;
/* transform size selection (4x4, 8x8, 16x16 or select-per-mb) */
select_tx_mode(cpi);
encode_rtc_frame_internal(cpi);
else
encode_frame_internal(cpi);
for (i = 0; i < REFERENCE_MODES; ++i) {
const int diff = (int) (cpi->rd_comp_pred_diff[i] / cm->MBs);
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++) {
const int64_t diff = cpi->rd_filter_diff[i] / cm->MBs;
cpi->rd_filter_threshes[frame_type][i] =
(cpi->rd_filter_threshes[frame_type][i] + diff) / 2;
}
for (i = 0; i < TX_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv, 2048 * (TX_SIZES - 1), 0);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
if (cm->reference_mode == REFERENCE_MODE_SELECT) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
single_count_zero += cm->counts.comp_inter[i][0];
comp_count_zero += cm->counts.comp_inter[i][1];
int count4x4 = 0;
int count8x8_lp = 0, count8x8_8x8p = 0;
int count16x16_16x16p = 0, count16x16_lp = 0;
int count32x32 = 0;
for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
count4x4 += cm->counts.tx.p32x32[i][TX_4X4];
count4x4 += cm->counts.tx.p16x16[i][TX_4X4];
count4x4 += cm->counts.tx.p8x8[i][TX_4X4];
count8x8_lp += cm->counts.tx.p32x32[i][TX_8X8];
count8x8_lp += cm->counts.tx.p16x16[i][TX_8X8];
count8x8_8x8p += cm->counts.tx.p8x8[i][TX_8X8];
count16x16_16x16p += cm->counts.tx.p16x16[i][TX_16X16];
count16x16_lp += cm->counts.tx.p32x32[i][TX_16X16];
count32x32 += cm->counts.tx.p32x32[i][TX_32X32];
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
cm->tx_mode = ALLOW_8X8;
reset_skip_txfm_size(cm, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
cm->tx_mode = ONLY_4X4;
reset_skip_txfm_size(cm, TX_4X4);
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cm->tx_mode = ALLOW_16X16;
reset_skip_txfm_size(cm, TX_16X16);
// Force the usage of the BILINEAR interp_filter.
cm->interp_filter = BILINEAR;
if (cpi->sf.super_fast_rtc)
encode_rtc_frame_internal(cpi);
else
encode_frame_internal(cpi);
static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi) {
const MB_PREDICTION_MODE y_mode = mi->mbmi.mode;
const MB_PREDICTION_MODE uv_mode = mi->mbmi.uv_mode;
const BLOCK_SIZE bsize = mi->mbmi.sb_type;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
for (idy = 0; idy < 2; idy += num_4x4_blocks_high)
for (idx = 0; idx < 2; idx += num_4x4_blocks_wide)
++counts->y_mode[0][mi->bmi[idy * 2 + idx].as_mode];
++counts->y_mode[size_group_lookup[bsize]][y_mode];
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int) (((int64_t) b + (a >> 1)) / a) - 1;
x->act_zbin_adj = 1 - (int) (((int64_t) a + (b >> 1)) / b);
static int get_zbin_mode_boost(const MB_MODE_INFO *mbmi, int enabled) {
if (enabled) {
if (is_inter_block(mbmi)) {
if (mbmi->mode == ZEROMV) {
return mbmi->ref_frame[0] != LAST_FRAME ? GF_ZEROMV_ZBIN_BOOST
: LF_ZEROMV_ZBIN_BOOST;
} else {
return mbmi->sb_type < BLOCK_8X8 ? SPLIT_MV_ZBIN_BOOST
: MV_ZBIN_BOOST;
}
} else {
return INTRA_ZBIN_BOOST;
}
} else {
return 0;
}
}
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO **mi_8x8 = xd->mi_8x8;
MODE_INFO *mi = mi_8x8[0];
PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
unsigned int segment_id = mbmi->segment_id;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
x->skip_recode = !x->select_txfm_size && mbmi->sb_type >= BLOCK_8X8 &&
x->skip_optimize = ctx->is_coded;
ctx->is_coded = 1;
x->use_lp32x32fdct = cpi->sf.use_lp32x32fdct;
x->skip_encode = (!output_enabled && cpi->sf.skip_encode_frame &&
x->q_index < QIDX_SKIP_THRESH);
if (x->skip_encode)
return;
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]);
xd->interp_kernel = vp9_get_interp_kernel(mbmi->interp_filter);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = get_zbin_mode_boost(mbmi,
cpi->zbin_mode_boost_enabled);
vp9_encode_intra_block_y(x, MAX(bsize, BLOCK_8X8));
vp9_encode_intra_block_uv(x, MAX(bsize, BLOCK_8X8));
int ref;
const int is_compound = has_second_ref(mbmi);
for (ref = 0; ref < 1 + is_compound; ++ref) {
YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi,
mbmi->ref_frame[ref]);
setup_pre_planes(xd, ref, cfg, mi_row, mi_col, &xd->block_refs[ref]->sf);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));
vp9_encode_sb(x, MAX(bsize, BLOCK_8X8));
vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8));