diff --git a/vp9/common/vp9_enums.h b/vp9/common/vp9_enums.h index 3208b7270695c5ef31a2a00345151cc065886bf4..5af52c66ea55378234f3f32cb722d64196acd625 100644 --- a/vp9/common/vp9_enums.h +++ b/vp9/common/vp9_enums.h @@ -22,20 +22,20 @@ #define MI_MASK (MI_BLOCK_SIZE - 1) typedef enum BLOCK_SIZE_TYPE { - BLOCK_SIZE_AB4X4, BLOCK_4X4 = BLOCK_SIZE_AB4X4, - BLOCK_SIZE_SB4X8, BLOCK_4X8 = BLOCK_SIZE_SB4X8, - BLOCK_SIZE_SB8X4, BLOCK_8X4 = BLOCK_SIZE_SB8X4, - BLOCK_SIZE_SB8X8, BLOCK_8X8 = BLOCK_SIZE_SB8X8, - BLOCK_SIZE_SB8X16, BLOCK_8X16 = BLOCK_SIZE_SB8X16, - BLOCK_SIZE_SB16X8, BLOCK_16X8 = BLOCK_SIZE_SB16X8, - BLOCK_SIZE_MB16X16, BLOCK_16X16 = BLOCK_SIZE_MB16X16, - BLOCK_SIZE_SB16X32, BLOCK_16X32 = BLOCK_SIZE_SB16X32, - BLOCK_SIZE_SB32X16, BLOCK_32X16 = BLOCK_SIZE_SB32X16, - BLOCK_SIZE_SB32X32, BLOCK_32X32 = BLOCK_SIZE_SB32X32, - BLOCK_SIZE_SB32X64, BLOCK_32X64 = BLOCK_SIZE_SB32X64, - BLOCK_SIZE_SB64X32, BLOCK_64X32 = BLOCK_SIZE_SB64X32, - BLOCK_SIZE_SB64X64, BLOCK_64X64 = BLOCK_SIZE_SB64X64, - BLOCK_SIZE_TYPES, BLOCK_MAX_SB_SEGMENTS = BLOCK_SIZE_TYPES + BLOCK_4X4, + BLOCK_4X8, + BLOCK_8X4, + BLOCK_8X8, + BLOCK_8X16, + BLOCK_16X8, + BLOCK_16X16, + BLOCK_16X32, + BLOCK_32X16, + BLOCK_32X32, + BLOCK_32X64, + BLOCK_64X32, + BLOCK_64X64, + BLOCK_SIZE_TYPES } BLOCK_SIZE_TYPE; typedef enum PARTITION_TYPE { diff --git a/vp9/common/vp9_findnearmv.h b/vp9/common/vp9_findnearmv.h index e5221ed67622ceb0ff6f211cde566e16f6bf0a98..178ad871de410a015ced4243895aaa78c179e489 100644 --- a/vp9/common/vp9_findnearmv.h +++ b/vp9/common/vp9_findnearmv.h @@ -52,8 +52,8 @@ static MB_PREDICTION_MODE left_block_mode(const MODE_INFO *cur_mb, int b) { if (is_inter_block(&cur_mb->mbmi)) { return DC_PRED; - } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) { - return ((cur_mb->bmi + 1 + b)->as_mode); + } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) { + return (cur_mb->bmi + 1 + b)->as_mode; } else { return cur_mb->mbmi.mode; } @@ -70,8 +70,8 @@ static MB_PREDICTION_MODE above_block_mode(const MODE_INFO *cur_mb, if (is_inter_block(&cur_mb->mbmi)) { return DC_PRED; - } else if (cur_mb->mbmi.sb_type < BLOCK_SIZE_SB8X8) { - return ((cur_mb->bmi + 2 + b)->as_mode); + } else if (cur_mb->mbmi.sb_type < BLOCK_8X8) { + return (cur_mb->bmi + 2 + b)->as_mode; } else { return cur_mb->mbmi.mode; } diff --git a/vp9/common/vp9_mvref_common.c b/vp9/common/vp9_mvref_common.c index 3b72f41c295d43d52b2f1c32bb3cb190341c15f5..89f9af61a88bdb3bcb8a134e2e8d3449703cf15a 100644 --- a/vp9/common/vp9_mvref_common.c +++ b/vp9/common/vp9_mvref_common.c @@ -121,7 +121,7 @@ static void clamp_mv_ref(const MACROBLOCKD *xd, int_mv *mv) { static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int check_sub_blocks, int which_mv, int search_col, int block_idx) { - return (check_sub_blocks && candidate->mbmi.sb_type < BLOCK_SIZE_SB8X8 + return (check_sub_blocks && candidate->mbmi.sb_type < BLOCK_8X8 ? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]] .as_mv[which_mv] : candidate->mbmi.mv[which_mv]); diff --git a/vp9/common/vp9_onyxc_int.h b/vp9/common/vp9_onyxc_int.h index 152a9329320c4e005d4e73106e2456c513c407f4..46b729d2cf85bb7db7f3472bda53f617883070e7 100644 --- a/vp9/common/vp9_onyxc_int.h +++ b/vp9/common/vp9_onyxc_int.h @@ -250,7 +250,7 @@ static int check_bsize_coverage(VP9_COMMON *cm, int mi_row, int mi_col, // frame width/height are multiples of 8, hence 8x8 block should always // pass the above check - assert(bsize > BLOCK_SIZE_SB8X8); + assert(bsize > BLOCK_8X8); // return the node index in the prob tree for binary coding // only allow horizontal/split partition types diff --git a/vp9/common/vp9_reconinter.c b/vp9/common/vp9_reconinter.c index 0b65e06108d7db101a703f226be3cf7c4cc6a98d..1dbc935c7fdd9da664502d01d60dfec5469a62ff 100644 --- a/vp9/common/vp9_reconinter.c +++ b/vp9/common/vp9_reconinter.c @@ -285,8 +285,8 @@ static void build_inter_predictors(int plane, int block, assert(x < (4 << bwl)); assert(y < (4 << bhl)); - assert(mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 || 4 << pred_w == (4 << bwl)); - assert(mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 || 4 << pred_h == (4 << bhl)); + assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_w == (4 << bwl)); + assert(mi->mbmi.sb_type < BLOCK_8X8 || 4 << pred_h == (4 << bhl)); for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) { // source @@ -303,7 +303,7 @@ static void build_inter_predictors(int plane, int block, // same MV (the average of the 4 luma MVs) but we could do something // smarter for non-4:2:0. Just punt for now, pending the changes to get // rid of SPLITMV mode entirely. - const MV mv = mi->mbmi.sb_type < BLOCK_SIZE_SB8X8 + const MV mv = mi->mbmi.sb_type < BLOCK_8X8 ? (plane == 0 ? mi->bmi[block].as_mv[which_mv].as_mv : mi_mv_pred_q4(mi, which_mv)) : mi->mbmi.mv[which_mv].as_mv; diff --git a/vp9/decoder/vp9_decodemv.c b/vp9/decoder/vp9_decodemv.c index a3e2ad39d2e607ee6e7eca74fde05b815f7c5d73..cc2346553d12aa1257bbb7c8216d500115ce702c 100644 --- a/vp9/decoder/vp9_decodemv.c +++ b/vp9/decoder/vp9_decodemv.c @@ -164,7 +164,7 @@ static void read_intra_frame_mode_info(VP9D_COMP *pbi, MODE_INFO *m, mbmi->ref_frame[0] = INTRA_FRAME; mbmi->ref_frame[1] = NONE; - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? left_block_mode(m, 0) : DC_PRED; @@ -386,7 +386,7 @@ static void read_intra_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, mbmi->ref_frame[0] = INTRA_FRAME; mbmi->ref_frame[1] = NONE; - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { const int size_group = size_group_lookup[bsize]; mbmi->mode = read_intra_mode(r, cm->fc.y_mode_prob[size_group]); cm->counts.y_mode[size_group][mbmi->mode]++; @@ -459,13 +459,13 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, if (vp9_segfeature_active(&xd->seg, mbmi->segment_id, SEG_LVL_SKIP)) mbmi->mode = ZEROMV; - else if (bsize >= BLOCK_SIZE_SB8X8) + else if (bsize >= BLOCK_8X8) mbmi->mode = read_inter_mode(cm, r, inter_mode_ctx); mbmi->uv_mode = DC_PRED; // nearest, nearby - if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) { + if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref0], &nearest, &nearby); best_mv.as_int = mbmi->ref_mvs[ref0][0].as_int; } @@ -479,14 +479,14 @@ static void read_inter_block_mode_info(VP9D_COMP *pbi, MODE_INFO *mi, ref1, mbmi->ref_mvs[ref1], cm->ref_frame_sign_bias, mi_row, mi_col); - if (bsize < BLOCK_SIZE_SB8X8 || mbmi->mode != ZEROMV) { + if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) { vp9_find_best_ref_mvs(xd, mbmi->ref_mvs[ref1], &nearest_second, &nearby_second); best_mv_second.as_int = mbmi->ref_mvs[ref1][0].as_int; } } - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; // 1 or 2 const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; // 1 or 2 int idx, idy; diff --git a/vp9/encoder/vp9_bitstream.c b/vp9/encoder/vp9_bitstream.c index 98ef42074e5eac5f12187800303412c1d5547905..9d60046044e8a6e78b1e03b356f577262dff8885 100644 --- a/vp9/encoder/vp9_bitstream.c +++ b/vp9/encoder/vp9_bitstream.c @@ -434,7 +434,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { vp9_write(bc, rf != INTRA_FRAME, vp9_get_pred_prob_intra_inter(pc, xd)); - if (bsize >= BLOCK_SIZE_SB8X8 && pc->tx_mode == TX_MODE_SELECT && + if (bsize >= BLOCK_8X8 && pc->tx_mode == TX_MODE_SELECT && !(rf != INTRA_FRAME && (skip_coeff || vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)))) { write_selected_tx_size(cpi, mi->txfm_size, bsize, bc); @@ -445,7 +445,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { active_section = 6; #endif - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { write_intra_mode(bc, mode, pc->fc.y_mode_prob[size_group_lookup[bsize]]); } else { int idx, idy; @@ -470,7 +470,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { // If segment skip is not enabled code the mode. if (!vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP)) { - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { write_sb_mv_ref(bc, mode, mv_ref_p); ++pc->counts.inter_mode[mi->mb_mode_context[rf]] [inter_mode_offset(mode)]; @@ -486,7 +486,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc) { assert(mi->interp_filter == cpi->common.mcomp_filter_type); } - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { int j; MB_PREDICTION_MODE blockmode; int_mv blockmv; @@ -544,10 +544,10 @@ static void write_mb_modes_kf(const VP9_COMP *cpi, MODE_INFO *m, write_skip_coeff(cpi, segment_id, m, bc); - if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8 && c->tx_mode == TX_MODE_SELECT) + if (m->mbmi.sb_type >= BLOCK_8X8 && c->tx_mode == TX_MODE_SELECT) write_selected_tx_size(cpi, m->mbmi.txfm_size, m->mbmi.sb_type, bc); - if (m->mbmi.sb_type >= BLOCK_SIZE_SB8X8) { + if (m->mbmi.sb_type >= BLOCK_8X8) { const MB_PREDICTION_MODE A = above_block_mode(m, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? left_block_mode(m, 0) : DC_PRED; @@ -580,7 +580,7 @@ static void write_modes_b(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->mb.e_mbd; - if (m->mbmi.sb_type < BLOCK_SIZE_SB8X8) + if (m->mbmi.sb_type < BLOCK_8X8) if (xd->ab_index > 0) return; xd->mode_info_context = m; @@ -621,11 +621,11 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, partition = partition_lookup[bsl][m->mbmi.sb_type]; - if (bsize < BLOCK_SIZE_SB8X8) + if (bsize < BLOCK_8X8) if (xd->ab_index > 0) return; - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { int pl; const int idx = check_bsize_coverage(cm, mi_row, mi_col, bsize); set_partition_seg_context(cm, xd, mi_row, mi_col); @@ -672,8 +672,8 @@ static void write_modes_sb(VP9_COMP *cpi, MODE_INFO *m, vp9_writer *bc, } // update partition context - if (bsize >= BLOCK_SIZE_SB8X8 && - (bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) { + if (bsize >= BLOCK_8X8 && + (bsize == BLOCK_8X8 || partition != PARTITION_SPLIT)) { set_partition_seg_context(cm, xd, mi_row, mi_col); update_partition_context(xd, subsize, bsize); } diff --git a/vp9/encoder/vp9_encodeframe.c b/vp9/encoder/vp9_encodeframe.c index f4490aed936d1790a6c85dc991d31eb83c01c3d4..4b74bc8360f0989feee2f5f22930e9642f3ee499 100644 --- a/vp9/encoder/vp9_encodeframe.c +++ b/vp9/encoder/vp9_encodeframe.c @@ -412,7 +412,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, // Note how often each mode chosen as best cpi->mode_chosen_counts[mb_mode_index]++; if (is_inter_block(mbmi) - && (mbmi->sb_type < BLOCK_SIZE_SB8X8 || mbmi->mode == NEWMV)) { + && (mbmi->sb_type < BLOCK_8X8 || mbmi->mode == NEWMV)) { int_mv best_mv, best_second_mv; const MV_REFERENCE_FRAME rf1 = mbmi->ref_frame[0]; const MV_REFERENCE_FRAME rf2 = mbmi->ref_frame[1]; @@ -427,7 +427,7 @@ static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx, vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv); } - if (bsize > BLOCK_SIZE_SB8X8 && mbmi->mode == NEWMV) { + if (bsize > BLOCK_8X8 && mbmi->mode == NEWMV) { int i, j; for (j = 0; j < mi_height; ++j) for (i = 0; i < mi_width; ++i) @@ -572,7 +572,7 @@ static void pick_sb_modes(VP9_COMP *cpi, int mi_row, int mi_col, x->rd_search = 1; - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. if (xd->ab_index != 0) @@ -769,7 +769,7 @@ static void encode_b(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, if (sub_index != -1) *(get_sb_index(xd, bsize)) = sub_index; - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. if (xd->ab_index > 0) @@ -792,7 +792,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, VP9_COMMON * const cm = &cpi->common; MACROBLOCK * const x = &cpi->mb; MACROBLOCKD * const xd = &x->e_mbd; - BLOCK_SIZE_TYPE c1 = BLOCK_SIZE_SB8X8; + BLOCK_SIZE_TYPE c1 = BLOCK_8X8; const int bsl = b_width_log2(bsize), bs = (1 << bsl) / 4; int UNINITIALIZED_IS_SAFE(pl); PARTITION_TYPE partition; @@ -803,7 +803,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, return; c1 = BLOCK_4X4; - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { set_partition_seg_context(cm, xd, mi_row, mi_col); pl = partition_plane_context(xd, bsize); c1 = *(get_sb_partitioning(x, bsize)); @@ -812,7 +812,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, switch (partition) { case PARTITION_NONE: - if (output_enabled && bsize >= BLOCK_SIZE_SB8X8) + if (output_enabled && bsize >= BLOCK_8X8) cpi->partition_count[pl][PARTITION_NONE]++; encode_b(cpi, tp, mi_row, mi_col, output_enabled, c1, -1); break; @@ -847,7 +847,7 @@ static void encode_sb(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int mi_col, break; } - if (partition != PARTITION_SPLIT || bsize == BLOCK_SIZE_SB8X8) { + if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) { set_partition_seg_context(cm, xd, mi_row, mi_col); update_partition_context(xd, c1, bsize); } @@ -1093,7 +1093,7 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row, int pixels_wide = 64, pixels_high = 64; vp9_zero(vt); - set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64); + set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); if (xd->mb_to_right_edge < 0) pixels_wide += (xd->mb_to_right_edge >> 3); @@ -1121,12 +1121,12 @@ static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row, setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col, &xd->scale_factor[1]); xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME; - xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64; + xd->mode_info_context->mbmi.sb_type = BLOCK_64X64; vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]], &nearest_mv, &near_mv); xd->mode_info_context->mbmi.mv[0] = nearest_mv; - vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64); + vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64); d = xd->plane[0].dst.buf; dp = xd->plane[0].dst.stride; } @@ -1228,7 +1228,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, subsize = get_subsize(bsize, partition); - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. if (xd->ab_index != 0) { @@ -1247,7 +1247,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, if (cpi->sf.adjust_partitioning_from_last_frame) { // Check if any of the sub blocks are further split. - if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) { + if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) { sub_subsize = get_subsize(subsize, PARTITION_SPLIT); splits_below = 1; for (i = 0; i < 4; i++) { @@ -1287,7 +1287,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist, subsize, get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && - bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) { + bsize >= BLOCK_8X8 && mi_row + (mh >> 1) < cm->mi_rows) { int rt = 0; int64_t dt = 0; update_state(cpi, get_block_context(x, subsize), subsize, 0); @@ -1310,7 +1310,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist, subsize, get_block_context(x, subsize), INT64_MAX); if (last_part_rate != INT_MAX && - bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) { + bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) { int rt = 0; int64_t dt = 0; update_state(cpi, get_block_context(x, subsize), subsize, 0); @@ -1363,7 +1363,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, last_part_rate += x->partition_cost[pl][partition]; if (cpi->sf.adjust_partitioning_from_last_frame - && partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8 + && partition != PARTITION_SPLIT && bsize > BLOCK_8X8 && (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows) && (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) { BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT); @@ -1426,7 +1426,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist) < RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) { m->mbmi.sb_type = bsize; - if (bsize >= BLOCK_SIZE_SB8X8) + if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = subsize; chosen_rate = last_part_rate; chosen_dist = last_part_dist; @@ -1434,7 +1434,7 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, // If none was better set the partitioning to that... if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist) > RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) { - if (bsize >= BLOCK_SIZE_SB8X8) + if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = bsize; chosen_rate = none_rate; chosen_dist = none_dist; @@ -1444,11 +1444,11 @@ static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp, // We must have chosen a partitioning and encoding or we'll fail later on. // No other opportunities for success. - if ( bsize == BLOCK_SIZE_SB64X64) + if ( bsize == BLOCK_64X64) assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX); if (do_recon) - encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize); + encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize); *rate = chosen_rate; *dist = chosen_dist; @@ -1526,7 +1526,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, (void) *tp_orig; - if (bsize < BLOCK_SIZE_SB8X8) { + if (bsize < BLOCK_8X8) { // When ab_index = 0 all sub-blocks are handled, so for ab_index != 0 // there is nothing to be done. if (xd->ab_index != 0) { @@ -1542,7 +1542,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, // PARTITION_SPLIT if (!cpi->sf.auto_min_max_partition_size || bsize >= cpi->sf.min_partition_size) { - if (bsize > BLOCK_SIZE_SB8X8) { + if (bsize > BLOCK_8X8) { int r4 = 0; int64_t d4 = 0, sum_rd = 0; subsize = get_subsize(bsize, PARTITION_SPLIT); @@ -1608,7 +1608,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, block_context = x->sb8x8_context[xd->sb_index][xd->mb_index]; } else if (bsize == BLOCK_32X32) { block_context = x->mb_context[xd->sb_index]; - } else if (bsize == BLOCK_SIZE_SB64X64) { + } else if (bsize == BLOCK_64X64) { block_context = x->sb32_context; } @@ -1695,26 +1695,26 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, int64_t d; pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize, get_block_context(x, bsize), best_rd); - if (r != INT_MAX && bsize >= BLOCK_SIZE_SB8X8) { + if (r != INT_MAX && bsize >= BLOCK_8X8) { set_partition_seg_context(cm, xd, mi_row, mi_col); pl = partition_plane_context(xd, bsize); r += x->partition_cost[pl][PARTITION_NONE]; } if (r != INT_MAX && - (bsize == BLOCK_SIZE_SB8X8 || + (bsize == BLOCK_8X8 || RDCOST(x->rdmult, x->rddiv, r, d) < RDCOST(x->rdmult, x->rddiv, srate, sdist))) { best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d)); srate = r; sdist = d; larger_is_better = 1; - if (bsize >= BLOCK_SIZE_SB8X8) + if (bsize >= BLOCK_8X8) *(get_sb_partitioning(x, bsize)) = bsize; } } - if (bsize == BLOCK_SIZE_SB8X8) { + if (bsize == BLOCK_8X8) { int r4 = 0; int64_t d4 = 0, sum_rd = 0; subsize = get_subsize(bsize, PARTITION_SPLIT); @@ -1760,7 +1760,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, if (!cpi->sf.use_square_partition_only && (!cpi->sf.less_rectangular_check ||!larger_is_better)) { // PARTITION_HORZ - if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) { + if (bsize >= BLOCK_8X8 && mi_col + (ms >> 1) < cm->mi_cols) { int r2, r = 0; int64_t d2, d = 0, h_rd; subsize = get_subsize(bsize, PARTITION_HORZ); @@ -1799,7 +1799,7 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, } // PARTITION_VERT - if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) { + if (bsize >= BLOCK_8X8 && mi_row + (ms >> 1) < cm->mi_rows) { int r2; int64_t d2, v_rd; subsize = get_subsize(bsize, PARTITION_VERT); @@ -1845,9 +1845,9 @@ static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row, restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize); if (srate < INT_MAX && sdist < INT_MAX && do_recon) - encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize); + encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_64X64, bsize); - if (bsize == BLOCK_SIZE_SB64X64) { + if (bsize == BLOCK_64X64) { assert(tp_orig < *tp); assert(srate < INT_MAX); assert(sdist < INT_MAX); @@ -1861,7 +1861,7 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) { VP9_COMMON * const cm = &cpi->common; MACROBLOCK * const x = &cpi->mb; MACROBLOCKD * const xd = &x->e_mbd; - int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl; + int bsl = b_width_log2(BLOCK_64X64), bs = 1 << bsl; int ms = bs / 2; ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE]; PARTITION_CONTEXT sl[8], sa[8]; @@ -1869,7 +1869,7 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) { int r; int64_t d; - save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64); + save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); // Default is non mask (all reference frames allowed. cpi->ref_frame_mask = 0; @@ -1878,17 +1878,17 @@ static void rd_pick_reference_frame(VP9_COMP *cpi, int mi_row, int mi_col) { if ((mi_row + (ms >> 1) < cm->mi_rows) && (mi_col + (ms >> 1) < cm->mi_cols)) { cpi->set_ref_frame_mask = 1; - pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_SIZE_SB64X64, - get_block_context(x, BLOCK_SIZE_SB64X64), INT64_MAX); + pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_64X64, + get_block_context(x, BLOCK_64X64), INT64_MAX); set_partition_seg_context(cm, xd, mi_row, mi_col); - pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64); + pl = partition_plane_context(xd, BLOCK_64X64); r += x->partition_cost[pl][PARTITION_NONE]; - *(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64; + *(get_sb_partitioning(x, BLOCK_64X64)) = BLOCK_64X64; cpi->set_ref_frame_mask = 0; } - restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64); + restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_64X64); } static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, @@ -1923,13 +1923,13 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, MODE_INFO *p = cm->prev_mi + idx_str; if (cpi->sf.use_one_partition_size_always) { - set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64); + set_offsets(cpi, mi_row, mi_col, BLOCK_64X64); set_partitioning(cpi, m, cpi->sf.always_this_block_size); - rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64, + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } else if (cpi->sf.partition_by_variance) { choose_partitioning(cpi, cm->mi, mi_row, mi_col); - rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64, + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } else { if ((cpi->common.current_video_frame @@ -1944,11 +1944,11 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, &cpi->sf.min_partition_size, &cpi->sf.max_partition_size); } - rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64, + rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1, INT64_MAX); } else { copy_partitioning(cpi, m, p); - rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64, + rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1); } } @@ -1959,7 +1959,7 @@ static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp, &cpi->sf.max_partition_size); } - rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64, + rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_64X64, &dummy_rate, &dummy_dist, 1, INT64_MAX); } } @@ -2276,10 +2276,8 @@ static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) { for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) { mi = mi_ptr; - for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) { - reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, - BLOCK_SIZE_SB64X64); - } + for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) + reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col, BLOCK_64X64); } } @@ -2498,7 +2496,7 @@ static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) { const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode; ++cpi->y_uv_mode_count[m][uvm]; - if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) { + if (xd->mode_info_context->mbmi.sb_type >= BLOCK_8X8) { const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type; const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize); const int bsl = MIN(bwl, bhl); @@ -2579,7 +2577,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST; else cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST; - } else if (mbmi->sb_type < BLOCK_SIZE_SB8X8) { + } else if (mbmi->sb_type < BLOCK_8X8) { cpi->zbin_mode_boost = SPLIT_MV_ZBIN_BOOST; } else { cpi->zbin_mode_boost = MV_ZBIN_BOOST; @@ -2593,10 +2591,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, } if (mbmi->ref_frame[0] == INTRA_FRAME) { - vp9_encode_intra_block_y( - cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); - vp9_encode_intra_block_uv( - cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); + vp9_encode_intra_block_y(cm, x, MAX(bsize, BLOCK_8X8)); + vp9_encode_intra_block_uv(cm, x, MAX(bsize, BLOCK_8X8)); if (output_enabled) sum_intra_stats(cpi, x); } else { @@ -2616,18 +2612,14 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, &xd->scale_factor[1]); - vp9_build_inter_predictors_sb( - xd, mi_row, mi_col, - bsize < BLOCK_SIZE_SB8X8 ? BLOCK_SIZE_SB8X8 : bsize); + vp9_build_inter_predictors_sb(xd, mi_row, mi_col, MAX(bsize, BLOCK_8X8)); } - if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) { - vp9_tokenize_sb(cpi, t, !output_enabled, - (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); + if (mbmi->ref_frame[0] == INTRA_FRAME) { + vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); } else if (!x->skip) { - vp9_encode_sb(cm, x, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); - vp9_tokenize_sb(cpi, t, !output_enabled, - (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); + vp9_encode_sb(cm, x, MAX(bsize, BLOCK_8X8)); + vp9_tokenize_sb(cpi, t, !output_enabled, MAX(bsize, BLOCK_8X8)); } else { int mb_skip_context = xd->left_available ? (mi - 1)->mbmi.mb_skip_coeff : 0; mb_skip_context += (mi - mis)->mbmi.mb_skip_coeff; @@ -2635,8 +2627,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, mbmi->mb_skip_coeff = 1; if (output_enabled) cm->counts.mbskip[mb_skip_context][1]++; - vp9_reset_sb_tokens_context( - xd, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); + vp9_reset_sb_tokens_context(xd, MAX(bsize, BLOCK_8X8)); } // copy skip flag on all mb_mode_info contexts in this SB @@ -2645,7 +2636,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled, if (output_enabled) { if (cm->tx_mode == TX_MODE_SELECT && - mbmi->sb_type >= BLOCK_SIZE_SB8X8 && + mbmi->sb_type >= BLOCK_8X8 && !(is_inter_block(mbmi) && (mbmi->mb_skip_coeff || vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)))) { diff --git a/vp9/encoder/vp9_rdopt.c b/vp9/encoder/vp9_rdopt.c index f9c8f6e1478fc134ba8418411a09041a0c4ae9ab..9f0c75b2d0fb1b46650691fdaf5b124ddb711efb 100644 --- a/vp9/encoder/vp9_rdopt.c +++ b/vp9/encoder/vp9_rdopt.c @@ -1190,9 +1190,9 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, struct macroblockd_plane *pd = &xd->plane[0]; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; - uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib, + uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib, p->src.buf, src_stride); - uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib, + uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_8X8, 0, ib, pd->dst.buf, dst_stride); int16_t *src_diff, *coeff; @@ -1235,7 +1235,7 @@ static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, block = ib + idy * 2 + idx; xd->mode_info_context->bmi[block].as_mode = mode; - src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, block, + src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, block, p->src_diff); coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16); vp9_predict_intra_block(xd, block, 1, @@ -1489,7 +1489,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, int this_rate_tokenonly, this_rate, s; int64_t this_distortion; - MB_PREDICTION_MODE last_mode = bsize <= BLOCK_SIZE_SB8X8 ? + MB_PREDICTION_MODE last_mode = bsize <= BLOCK_8X8 ? TM_PRED : cpi->sf.last_chroma_intra_mode; for (mode = DC_PRED; mode <= last_mode; mode++) { @@ -1541,15 +1541,13 @@ static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, // appropriate speed flag is set. if (cpi->sf.use_uv_intra_rd_estimate) { rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : - bsize); + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); // Else do a proper rd search for each possible transform size that may // be considered in the main rd loop. } else { rd_pick_intra_sbuv_mode(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, - (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 - : bsize); + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); } *mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode; } @@ -1674,16 +1672,16 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, const int height = plane_block_height(bsize, pd); int idx, idy; const int src_stride = x->plane[0].src.stride; - uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i, + uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i, x->plane[0].src.buf, src_stride); - int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i, + int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, i, x->plane[0].src_diff); int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i); - uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i, + uint8_t* const pre = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i, pd->pre[0].buf, pd->pre[0].stride); - uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i, + uint8_t* const dst = raster_block_offset_uint8(xd, BLOCK_8X8, 0, i, pd->dst.buf, pd->dst.stride); int64_t thisdistortion = 0, thissse = 0; @@ -1697,7 +1695,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, if (mi->mbmi.ref_frame[1] > 0) { uint8_t* const second_pre = - raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i, + raster_block_offset_uint8(xd, BLOCK_8X8, 0, i, pd->pre[1].buf, pd->pre[1].stride); vp9_build_inter_predictor(second_pre, pd->pre[1].stride, dst, pd->dst.stride, @@ -1715,7 +1713,7 @@ static int64_t encode_inter_mb_segment(VP9_COMP *cpi, int64_t ssz, rd, rd1, rd2; k += (idy * 2 + idx); - src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, k, + src_diff = raster_block_offset_int16(xd, BLOCK_8X8, 0, k, x->plane[0].src_diff); coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, k); x->fwd_txm4x4(src_diff, coeff, 16); @@ -1780,17 +1778,17 @@ static INLINE int mv_check_bounds(MACROBLOCK *x, int_mv *mv) { static INLINE void mi_buf_shift(MACROBLOCK *x, int i) { MB_MODE_INFO *mbmi = &x->e_mbd.mode_info_context->mbmi; x->plane[0].src.buf = - raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i, + raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i, x->plane[0].src.buf, x->plane[0].src.stride); assert(((intptr_t)x->e_mbd.plane[0].pre[0].buf & 0x7) == 0); x->e_mbd.plane[0].pre[0].buf = - raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i, + raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i, x->e_mbd.plane[0].pre[0].buf, x->e_mbd.plane[0].pre[0].stride); if (mbmi->ref_frame[1]) x->e_mbd.plane[0].pre[1].buf = - raster_block_offset_uint8(&x->e_mbd, BLOCK_SIZE_SB8X8, 0, i, + raster_block_offset_uint8(&x->e_mbd, BLOCK_8X8, 0, i, x->e_mbd.plane[0].pre[1].buf, x->e_mbd.plane[0].pre[1].stride); } @@ -2217,7 +2215,7 @@ static int64_t rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x, *returntotrate = bsi->r; *returndistortion = bsi->d; *returnyrate = bsi->segment_yrate; - *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_SIZE_SB8X8); + *skippable = vp9_sby_is_skippable(&x->e_mbd, BLOCK_8X8); *psse = bsi->sse; mbmi->mode = bsi->modes[3]; @@ -3132,7 +3130,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, x->skip_encode = 0; ctx->skip = 0; xd->mode_info_context->mbmi.ref_frame[0] = INTRA_FRAME; - if (bsize >= BLOCK_SIZE_SB8X8) { + if (bsize >= BLOCK_8X8) { if (rd_pick_intra_sby_mode(cpi, x, &rate_y, &rate_y_tokenonly, &dist_y, &y_skip, bsize, tx_cache, best_rd) >= best_rd) { @@ -3149,7 +3147,7 @@ void vp9_rd_pick_intra_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, return; } rd_pick_intra_sbuv_mode(cpi, x, &rate_uv, &rate_uv_tokenonly, - &dist_uv, &uv_skip, BLOCK_SIZE_SB8X8); + &dist_uv, &uv_skip, BLOCK_8X8); } if (y_skip && uv_skip) { @@ -3355,7 +3353,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, if (x->fast_ms > 2 && ref_frame != x->subblock_ref) continue; - if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_SIZE_SB8X8) { + if (cpi->sf.use_avoid_tested_higherror && bsize >= BLOCK_8X8) { if (!(ref_frame_mask & (1 << ref_frame))) { continue; } @@ -3413,10 +3411,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, mbmi->interp_filter = cm->mcomp_filter_type; vp9_setup_interp_filters(xd, mbmi->interp_filter, &cpi->common); - if (bsize >= BLOCK_SIZE_SB8X8 && + if (bsize >= BLOCK_8X8 && (this_mode == I4X4_PRED || this_mode == SPLITMV)) continue; - if (bsize < BLOCK_SIZE_SB8X8 && + if (bsize < BLOCK_8X8 && !(this_mode == I4X4_PRED || this_mode == SPLITMV)) continue; @@ -3706,11 +3704,10 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, // If even the 'Y' rd value of split is higher than best so far // then dont bother looking at UV vp9_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, - BLOCK_SIZE_SB8X8); - vp9_subtract_sbuv(x, BLOCK_SIZE_SB8X8); + BLOCK_8X8); + vp9_subtract_sbuv(x, BLOCK_8X8); super_block_uvrd_for_txfm(cm, x, &rate_uv, &distortion_uv, - &uv_skippable, &uv_sse, - BLOCK_SIZE_SB8X8, TX_4X4); + &uv_skippable, &uv_sse, BLOCK_8X8, TX_4X4); rate2 += rate_uv; distortion2 += distortion_uv; skippable = skippable && uv_skippable; @@ -3756,7 +3753,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, const int mb_skip_allowed = !vp9_segfeature_active(seg, segment_id, SEG_LVL_SKIP); - if (skippable && bsize >= BLOCK_SIZE_SB8X8) { + if (skippable && bsize >= BLOCK_8X8) { // Back out the coefficient coding costs rate2 -= (rate_y + rate_uv); // for best yrd calculation @@ -3985,8 +3982,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, &rate_uv_tokenonly[uv_tx_size], &dist_uv[uv_tx_size], &skip_uv[uv_tx_size], - (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 - : bsize); + bsize < BLOCK_8X8 ? BLOCK_8X8 : bsize); } } @@ -4021,7 +4017,7 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, } } - if (best_rd == INT64_MAX && bsize < BLOCK_SIZE_SB8X8) { + if (best_rd == INT64_MAX && bsize < BLOCK_8X8) { *returnrate = INT_MAX; *returndistortion = INT_MAX; return best_rd; @@ -4070,13 +4066,13 @@ int64_t vp9_rd_pick_inter_mode_sb(VP9_COMP *cpi, MACROBLOCK *x, *mbmi = best_mbmode; x->skip |= best_skip2; if (best_mbmode.ref_frame[0] == INTRA_FRAME && - best_mbmode.sb_type < BLOCK_SIZE_SB8X8) { + best_mbmode.sb_type < BLOCK_8X8) { for (i = 0; i < 4; i++) xd->mode_info_context->bmi[i].as_mode = best_bmodes[i].as_mode; } if (best_mbmode.ref_frame[0] != INTRA_FRAME && - best_mbmode.sb_type < BLOCK_SIZE_SB8X8) { + best_mbmode.sb_type < BLOCK_8X8) { for (i = 0; i < 4; i++) xd->mode_info_context->bmi[i].as_mv[0].as_int = best_bmodes[i].as_mv[0].as_int;