Commit 52323267 authored by Scott LaVarnway's avatar Scott LaVarnway

VP9: Eliminate MB_MODE_INFO

Change-Id: Ifa607dd2bb366ce09fa16dfcad3cc45a2440c185
parent c0307e6c
......@@ -13,7 +13,7 @@
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b) {
if (b == 0 || b == 2) {
if (!left_mi || is_inter_block(&left_mi->mbmi))
if (!left_mi || is_inter_block(left_mi))
return DC_PRED;
return get_y_mode(left_mi, b + 1);
......@@ -26,7 +26,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *above_mi, int b) {
if (b == 0 || b == 1) {
if (!above_mi || is_inter_block(&above_mi->mbmi))
if (!above_mi || is_inter_block(above_mi))
return DC_PRED;
return get_y_mode(above_mi, b + 2);
......@@ -40,12 +40,12 @@ void vp9_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
const MODE_INFO* mi = xd->mi[0];
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way.
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd)
: mbmi->tx_size;
const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd)
: mi->tx_size;
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
......
......@@ -64,7 +64,7 @@ typedef struct {
typedef int8_t MV_REFERENCE_FRAME;
// This structure now relates to 8x8 block regions.
typedef struct {
typedef struct MODE_INFO {
// Common for both INTER and INTRA blocks
BLOCK_SIZE sb_type;
PREDICTION_MODE mode;
......@@ -82,24 +82,21 @@ typedef struct {
// TODO(slavarnway): Delete and use bmi[3].as_mv[] instead.
int_mv mv[2];
} MB_MODE_INFO;
typedef struct MODE_INFO {
MB_MODE_INFO mbmi;
b_mode_info bmi[4];
} MODE_INFO;
static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
: mi->mbmi.mode;
return mi->sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
: mi->mode;
}
static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[0] > INTRA_FRAME;
static INLINE int is_inter_block(const MODE_INFO *mi) {
return mi->ref_frame[0] > INTRA_FRAME;
}
static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[1] > INTRA_FRAME;
static INLINE int has_second_ref(const MODE_INFO *mi) {
return mi->ref_frame[1] > INTRA_FRAME;
}
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
......@@ -160,8 +157,6 @@ typedef struct macroblockd {
MODE_INFO **mi;
MODE_INFO *left_mi;
MODE_INFO *above_mi;
MB_MODE_INFO *left_mbmi;
MB_MODE_INFO *above_mbmi;
int up_available;
int left_available;
......@@ -212,19 +207,19 @@ extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES];
static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi))
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
return intra_mode_to_tx_type_lookup[mbmi->mode];
return intra_mode_to_tx_type_lookup[mi->mode];
}
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) {
const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi))
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
return intra_mode_to_tx_type_lookup[get_y_mode(mi, ib)];
......@@ -242,9 +237,9 @@ static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
}
}
static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi,
static INLINE TX_SIZE get_uv_tx_size(const MODE_INFO *mi,
const struct macroblockd_plane *pd) {
return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x,
return get_uv_tx_size_impl(mi->tx_size, mi->sb_type, pd->subsampling_x,
pd->subsampling_y);
}
......
......@@ -35,7 +35,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
*((int*) ((char *) (&mi[0]->mbmi) +
*((int*) ((char *) (mi[0]) +
member_offset)));
mi++;
}
......@@ -53,18 +53,18 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
int rows = cm->mi_rows;
int cols = cm->mi_cols;
print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type));
print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode));
print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0]));
print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size));
print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode));
print_mi_data(cm, mvs, "Partitions:", offsetof(MODE_INFO, sb_type));
print_mi_data(cm, mvs, "Modes:", offsetof(MODE_INFO, mode));
print_mi_data(cm, mvs, "Ref frame:", offsetof(MODE_INFO, ref_frame[0]));
print_mi_data(cm, mvs, "Transform:", offsetof(MODE_INFO, tx_size));
print_mi_data(cm, mvs, "UV Modes:", offsetof(MODE_INFO, uv_mode));
// output skip infomation.
log_frame_info(cm, "Skips:", mvs);
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "S ");
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%2d ", mi[0]->mbmi.skip);
fprintf(mvs, "%2d ", mi[0]->skip);
mi++;
}
fprintf(mvs, "\n");
......@@ -78,8 +78,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%4d:%4d ", mi[0]->mbmi.mv[0].as_mv.row,
mi[0]->mbmi.mv[0].as_mv.col);
fprintf(mvs, "%4d:%4d ", mi[0]->mv[0].as_mv.row,
mi[0]->mv[0].as_mv.col);
mi++;
}
fprintf(mvs, "\n");
......
......@@ -232,9 +232,9 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
}
static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
const MB_MODE_INFO *mbmi) {
return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
[mode_lf_lut[mbmi->mode]];
const MODE_INFO *mi) {
return lfi_n->lvl[mi->segment_id][mi->ref_frame[0]]
[mode_lf_lut[mi->mode]];
}
void vp9_loop_filter_init(VP9_COMMON *cm) {
......@@ -709,11 +709,10 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
const int shift_uv,
LOOP_FILTER_MASK *lfm) {
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
const int filter_level = get_filter_level(lfi_n, mbmi);
const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
......@@ -754,7 +753,7 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
// Here we are adding a mask for the transform size. The transform
......@@ -788,10 +787,9 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
static void build_y_mask(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
LOOP_FILTER_MASK *lfm) {
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const int filter_level = get_filter_level(lfi_n, mbmi);
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
......@@ -812,7 +810,7 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
*above_y |= above_prediction_mask[block_size] << shift_y;
*left_y |= left_prediction_mask[block_size] << shift_y;
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
*above_y |= (size_mask[block_size] &
......@@ -980,7 +978,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
// TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times.
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_64X64:
build_masks(lfi_n, mip[0] , 0, 0, lfm);
break;
......@@ -1006,7 +1004,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_32X32:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
......@@ -1036,7 +1034,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue;
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_16X16:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
......@@ -1186,8 +1184,8 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
const MODE_INFO *mi = mi_8x8[c];
const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
const BLOCK_SIZE sb_type = mi[0].sb_type;
const int skip_this = mi[0].skip && is_inter_block(mi);
// left edge of current unit is block/partition edge -> no skip
const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
!(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
......@@ -1196,13 +1194,13 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
!(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
const int skip_this_r = skip_this && !block_edge_above;
const TX_SIZE tx_size = get_uv_tx_size(&mi[0].mbmi, plane);
const TX_SIZE tx_size = get_uv_tx_size(mi, plane);
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
// Filter level can vary per MI
if (!(lfl[(r << 3) + (c >> ss_x)] =
get_filter_level(&cm->lf_info, &mi[0].mbmi)))
get_filter_level(&cm->lf_info, mi)))
continue;
// Build masks based on the transform size of each block
......@@ -1640,12 +1638,12 @@ static const uint8_t first_block_in_16x16[8][8] = {
// This function sets up the bit masks for a block represented
// by mi_row, mi_col in a 64x64 region.
// TODO(SJL): This function only works for yv12.
void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh) {
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
const int filter_level = get_filter_level(lfi_n, mbmi);
const int filter_level = get_filter_level(lfi_n, mi);
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
LOOP_FILTER_MASK *const lfm = get_lfm(&cm->lf, mi_row, mi_col);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
......@@ -1693,7 +1691,7 @@ void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
// Add a mask for the transform size. The transform size mask is set to
......
......@@ -134,7 +134,7 @@ static INLINE LOOP_FILTER_MASK *get_lfm(const struct loopfilter *lf,
return &lf->lfm[(mi_col >> 3) + ((mi_row >> 3) * lf->lfm_stride)];
}
void vp9_build_mask(struct VP9Common *cm, const MB_MODE_INFO *mbmi, int mi_row,
void vp9_build_mask(struct VP9Common *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh);
void vp9_adjust_mask(struct VP9Common *const cm, const int mi_row,
const int mi_col, LOOP_FILTER_MASK *lfm);
......
......@@ -203,12 +203,12 @@ static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u,
static int mfqe_decision(MODE_INFO *mi, BLOCK_SIZE cur_bs) {
// Check the motion in current block(for inter frame),
// or check the motion in the correlated block in last frame (for keyframe).
const int mv_len_square = mi->mbmi.mv[0].as_mv.row *
mi->mbmi.mv[0].as_mv.row +
mi->mbmi.mv[0].as_mv.col *
mi->mbmi.mv[0].as_mv.col;
const int mv_len_square = mi->mv[0].as_mv.row *
mi->mv[0].as_mv.row +
mi->mv[0].as_mv.col *
mi->mv[0].as_mv.col;
const int mv_threshold = 100;
return mi->mbmi.mode >= NEARESTMV && // Not an intra block
return mi->mode >= NEARESTMV && // Not an intra block
cur_bs >= BLOCK_16X16 &&
mv_len_square <= mv_threshold;
}
......@@ -220,7 +220,7 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs,
uint8_t *yd, uint8_t *ud, uint8_t *vd,
int yd_stride, int uvd_stride) {
int mi_offset, y_offset, uv_offset;
const BLOCK_SIZE cur_bs = mi->mbmi.sb_type;
const BLOCK_SIZE cur_bs = mi->sb_type;
const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex;
const int bsl = b_width_log2_lookup[bs];
PARTITION_TYPE partition = partition_lookup[bsl][cur_bs];
......
......@@ -20,7 +20,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
uint8_t *mode_context) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
int different_ref_found = 0;
int context_counter = 0;
const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
......@@ -38,15 +38,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
context_counter += mode_2_counter[candidate_mi->mode];
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
else if (candidate->ref_frame[1] == ref_frame)
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
}
......@@ -58,14 +57,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate =
&xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
const MODE_INFO *const candidate_mi =
xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, Done);
else if (candidate->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list, Done);
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(candidate_mi->mv[1], refmv_count, mv_ref_list, Done);
}
}
......@@ -85,11 +84,11 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate =
&xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
const MODE_INFO *const candidate_mi =
xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
refmv_count, mv_ref_list, Done);
}
}
......@@ -163,7 +162,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block,
find_mv_refs_idx(cm, xd, mi, mi->ref_frame[ref], mv_list, block,
mi_row, mi_col, mode_context);
near_mv->as_int = 0;
......
......@@ -136,19 +136,19 @@ static INLINE void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) {
// on whether the block_size < 8x8 and we have check_sub_blocks set.
static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv,
int search_col, int block_idx) {
return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8
return block_idx >= 0 && candidate->sb_type < BLOCK_8X8
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
.as_mv[which_mv]
: candidate->mbmi.mv[which_mv];
: candidate->mv[which_mv];
}
// Performs mv sign inversion if indicated by the reference frame combination.
static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref,
const MV_REFERENCE_FRAME this_ref_frame,
const int *ref_sign_bias) {
int_mv mv = mbmi->mv[ref];
if (ref_sign_bias[mbmi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
int_mv mv = mi->mv[ref];
if (ref_sign_bias[mi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
mv.as_mv.row *= -1;
mv.as_mv.col *= -1;
}
......
......@@ -406,22 +406,17 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
// Are edges available for intra prediction?
xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start);
// TODO(slavarnway): eliminate up/left available ???
if (xd->up_available) {
xd->above_mi = xd->mi[-xd->mi_stride];
// above_mi may be NULL in VP9 encoder's first pass.
xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
} else {
xd->above_mi = NULL;
xd->above_mbmi = NULL;
}
if (xd->left_available) {
xd->left_mi = xd->mi[-1];
// left_mi may be NULL in VP9 encoder's first pass.
xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
} else {
xd->left_mi = NULL;
xd->left_mbmi = NULL;
}
}
......
This diff is collapsed.
......@@ -42,8 +42,8 @@ static INLINE int vp9_get_pred_context_seg_id(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_sip = (above_mi != NULL) ?
above_mi->mbmi.seg_id_predicted : 0;
const int left_sip = (left_mi != NULL) ? left_mi->mbmi.seg_id_predicted : 0;
above_mi->seg_id_predicted : 0;
const int left_sip = (left_mi != NULL) ? left_mi->seg_id_predicted : 0;
return above_sip + left_sip;
}
......@@ -56,8 +56,8 @@ static INLINE vpx_prob vp9_get_pred_prob_seg_id(const struct segmentation *seg,
static INLINE int vp9_get_skip_context(const MACROBLOCKD *xd) {
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
const int left_skip = (left_mi != NULL) ? left_mi->mbmi.skip : 0;
const int above_skip = (above_mi != NULL) ? above_mi->skip : 0;
const int left_skip = (left_mi != NULL) ? left_mi->skip : 0;
return above_skip + left_skip;
}
......@@ -110,15 +110,15 @@ static INLINE vpx_prob vp9_get_pred_prob_single_ref_p2(const VP9_COMMON *cm,
// left of the entries corresponding to real blocks.
// The prediction flags in these dummy entries are initialized to 0.
static INLINE int get_tx_size_context(const MACROBLOCKD *xd) {
const int max_tx_size = max_txsize_lookup[xd->mi[0]->mbmi.sb_type];
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int max_tx_size = max_txsize_lookup[xd->mi[0]->sb_type];
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
int above_ctx = (has_above && !above_mbmi->skip) ? (int)above_mbmi->tx_size
: max_tx_size;
int left_ctx = (has_left && !left_mbmi->skip) ? (int)left_mbmi->tx_size
: max_tx_size;
int above_ctx = (has_above && !above_mi->skip) ? (int)above_mi->tx_size
: max_tx_size;
int left_ctx = (has_left && !left_mi->skip) ? (int)left_mi->tx_size
: max_tx_size;
if (!has_left)
left_ctx = above_ctx;
......
......@@ -159,8 +159,8 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const MODE_INFO *mi = xd->mi[0];
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_filter_kernels[mi->mbmi.interp_filter];
const int is_compound = has_second_ref(mi);
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) {
......@@ -168,9 +168,9 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
const MV mv = mi->mbmi.sb_type < BLOCK_8X8
const MV mv = mi->sb_type < BLOCK_8X8
? average_split_mvs(pd, mi, ref, block)
: mi->mbmi.mv[ref].as_mv;
: mi->mv[ref].as_mv;
// TODO(jkoleszar): This clamping is done in the incorrect place for the
// scaling case. It needs to be done on the scaled MV, not the pre-scaling
......@@ -191,8 +191,8 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
const int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
const int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
#if CONFIG_BETTER_HW_COMPATIBILITY
assert(xd->mi[0]->mbmi.sb_type != BLOCK_4X8 &&
xd->mi[0]->mbmi.sb_type != BLOCK_8X4);
assert(xd->mi[0]->sb_type != BLOCK_4X8 &&
xd->mi[0]->sb_type != BLOCK_8X4);
assert(mv_q4.row == mv.row * (1 << (1 - pd->subsampling_y)) &&
mv_q4.col == mv.col * (1 << (1 - pd->subsampling_x)));
#endif
......@@ -250,7 +250,7 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h;
if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
if (xd->mi[0]->sb_type < BLOCK_8X8) {
int i = 0, x, y;
assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y)
......
......@@ -42,7 +42,7 @@ static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size,
PLANE_TYPE type, int block_idx) {
const MODE_INFO *const mi = xd->mi[0];
if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) {
if (is_inter_block(mi) || type != PLANE_TYPE_Y || xd->lossless) {
return &vp9_default_scan_orders[tx_size];
} else {
const PREDICTION_MODE mode = get_y_mode(mi, block_idx);
......
......@@ -365,16 +365,16 @@ static void inverse_transform_block_intra(MACROBLOCKD* xd, int plane,
static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
vpx_reader *r,
MB_MODE_INFO *const mbmi,
MODE_INFO *const mi,
int plane,
int row, int col,
TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
PREDICTION_MODE mode = (plane == 0) ? mbmi->mode : mbmi->uv_mode;
PREDICTION_MODE mode = (plane == 0) ? mi->mode : mi->uv_mode;
uint8_t *dst;
dst = &pd->dst.buf[4 * row * pd->dst.stride + 4 * col];
if (mbmi->sb_type < BLOCK_8X8)
if (mi->sb_type < BLOCK_8X8)
if (plane == 0)
mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
......@@ -382,25 +382,25 @@ static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
dst, pd->dst.stride, dst, pd->dst.stride,
col, row, plane);
if (!mbmi->skip) {
if (!mi->skip) {
const TX_TYPE tx_type = (plane || xd->lossless) ?
DCT_DCT : intra_mode_to_tx_type_lookup[mode];
const scan_order *sc = (plane || xd->lossless) ?
&vp9_default_scan_orders[tx_size] : &vp9_scan_orders[tx_size][tx_type];
const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size,
r, mbmi->segment_id);
r, mi->segment_id);
inverse_transform_block_intra(xd, plane, tx_type, tx_size,
dst, pd->dst.stride, eob);
}
}
static int reconstruct_inter_block(MACROBLOCKD *const xd, vpx_reader *r,
MB_MODE_INFO *const mbmi, int plane,
MODE_INFO *const mi, int plane,
int row, int col, TX_SIZE tx_size) {
struct macroblockd_plane *const pd = &xd->plane[plane];
const scan_order *sc = &vp9_default_scan_orders[tx_size];
const int eob = vp9_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
mbmi->segment_id);
mi->segment_id);
inverse_transform_block_inter(xd, plane, tx_size,
&pd->dst.buf[4 * row * pd->dst.stride + 4 * col],
......@@ -588,8 +588,8 @@ static void dec_build_inter_predictors(VP9Decoder *const pbi, MACROBLOCKD *xd,
int x_start = (-xd->mb_to_left_edge >> (3 + pd->subsampling_x));
int y_start = (-xd->mb_to_top_edge >> (3 + pd->subsampling_y));
#if CONFIG_BETTER_HW_COMPATIBILITY
assert(xd->mi[0]->mbmi.sb_type != BLOCK_4X8 &&
xd->mi[0]->mbmi.sb_type != BLOCK_8X4);
assert(xd->mi[0]->sb_type != BLOCK_4X8 &&
xd->mi[0]->sb_type != BLOCK_8X4);
assert(mv_q4.row == mv->row * (1 << (1 - pd->subsampling_y)) &&
mv_q4.col == mv->col * (1 << (1 - pd->subsampling_x)));
#endif
......@@ -716,13 +716,13 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
const int mi_x = mi_col * MI_SIZE;
const int mi_y = mi_row * MI_SIZE;
const MODE_INFO *mi = xd->mi[0];
const InterpKernel *kernel = vp9_filter_kernels[mi->mbmi.interp_filter];
const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_filter_kernels[mi->interp_filter];
const BLOCK_SIZE sb_type = mi->sb_type;
const int is_compound = has_second_ref(mi);
int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) {
const MV_REFERENCE_FRAME frame = mi->mbmi.ref_frame[ref];
const MV_REFERENCE_FRAME frame = mi->ref_frame[ref];
RefBuffer *ref_buf = &pbi->common.frame_refs[frame - LAST_FRAME];
xd->block_refs[ref] = ref_buf;
......@@ -762,7 +762,7 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
}
}
} else {
const MV mv = mi->mbmi.mv[ref].as_mv;
const MV mv = mi->mv[ref].as_mv;
dec_build_inter_predictors(pbi, xd, plane, n4w_x4, n4h_x4,
0, 0, n4w_x4, n4h_x4, mi_x, mi_y, kernel,
sf, pre_buf, dst_buf, &mv, ref_frame_buf,
......@@ -772,11 +772,11 @@ static void dec_build_inter_predictors_sb(VP9Decoder *const pbi,
}
}
static INLINE TX_SIZE dec_get_uv_tx_size(const MB_MODE_INFO *mbmi,
static INLINE TX_SIZE dec_get_uv_tx_size(const MODE_INFO *mi,
int n4_wl, int n4_hl) {
// get minimum log2 num4x4s dimension
const int x = VPXMIN(n4_wl, n4_hl);
return VPXMIN(mbmi->tx_size, x);
return VPXMIN(mi->tx_size, x);
}
static INLINE void dec_reset_skip_context(MACROBLOCKD *xd) {
......@@ -799,10 +799,10 @@ static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
}
}
static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int bw, int bh, int x_mis, int y_mis,
int bwl, int bhl) {
static MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
BLOCK_SIZE bsize, int mi_row, int mi_col,
int bw, int bh, int x_mis, int y_mis,
int bwl, int bhl) {
const int offset = mi_row * cm->mi_stride + mi_col;
int x, y;
const TileInfo *const tile = &xd->tile;
......@@ -811,7 +811,7 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
xd->mi[0] = &cm->mi[offset];
// TODO(slavarnway): Generate sb_type based on bwl and bhl, instead of
// passing bsize from decode_partition().