Commit a7e0b1ea authored by Scott LaVarnway's avatar Scott LaVarnway Committed by Gerrit Code Review

Merge "VP9: Eliminate MB_MODE_INFO"

parents 3277d481 52323267
......@@ -13,7 +13,7 @@
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *left_mi, int b) {
if (b == 0 || b == 2) {
if (!left_mi || is_inter_block(&left_mi->mbmi))
if (!left_mi || is_inter_block(left_mi))
return DC_PRED;
return get_y_mode(left_mi, b + 1);
......@@ -26,7 +26,7 @@ PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
PREDICTION_MODE vp9_above_block_mode(const MODE_INFO *cur_mi,
const MODE_INFO *above_mi, int b) {
if (b == 0 || b == 1) {
if (!above_mi || is_inter_block(&above_mi->mbmi))
if (!above_mi || is_inter_block(above_mi))
return DC_PRED;
return get_y_mode(above_mi, b + 2);
......@@ -40,12 +40,12 @@ void vp9_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane];
const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
const MODE_INFO* mi = xd->mi[0];
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way.
const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd)
: mbmi->tx_size;
const TX_SIZE tx_size = plane ? get_uv_tx_size(mi, pd)
: mi->tx_size;
const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize];
const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize];
......
......@@ -64,7 +64,7 @@ typedef struct {
typedef int8_t MV_REFERENCE_FRAME;
// This structure now relates to 8x8 block regions.
typedef struct {
typedef struct MODE_INFO {
// Common for both INTER and INTRA blocks
BLOCK_SIZE sb_type;
PREDICTION_MODE mode;
......@@ -82,24 +82,21 @@ typedef struct {
// TODO(slavarnway): Delete and use bmi[3].as_mv[] instead.
int_mv mv[2];
} MB_MODE_INFO;
typedef struct MODE_INFO {
MB_MODE_INFO mbmi;
b_mode_info bmi[4];
} MODE_INFO;
static INLINE PREDICTION_MODE get_y_mode(const MODE_INFO *mi, int block) {
return mi->mbmi.sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
: mi->mbmi.mode;
return mi->sb_type < BLOCK_8X8 ? mi->bmi[block].as_mode
: mi->mode;
}
static INLINE int is_inter_block(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[0] > INTRA_FRAME;
static INLINE int is_inter_block(const MODE_INFO *mi) {
return mi->ref_frame[0] > INTRA_FRAME;
}
static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
return mbmi->ref_frame[1] > INTRA_FRAME;
static INLINE int has_second_ref(const MODE_INFO *mi) {
return mi->ref_frame[1] > INTRA_FRAME;
}
PREDICTION_MODE vp9_left_block_mode(const MODE_INFO *cur_mi,
......@@ -160,8 +157,6 @@ typedef struct macroblockd {
MODE_INFO **mi;
MODE_INFO *left_mi;
MODE_INFO *above_mi;
MB_MODE_INFO *left_mbmi;
MB_MODE_INFO *above_mbmi;
int up_available;
int left_available;
......@@ -212,19 +207,19 @@ extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES];
static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi))
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
return intra_mode_to_tx_type_lookup[mbmi->mode];
return intra_mode_to_tx_type_lookup[mi->mode];
}
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) {
const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi))
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mi))
return DCT_DCT;
return intra_mode_to_tx_type_lookup[get_y_mode(mi, ib)];
......@@ -242,9 +237,9 @@ static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
}
}
static INLINE TX_SIZE get_uv_tx_size(const MB_MODE_INFO *mbmi,
static INLINE TX_SIZE get_uv_tx_size(const MODE_INFO *mi,
const struct macroblockd_plane *pd) {
return get_uv_tx_size_impl(mbmi->tx_size, mbmi->sb_type, pd->subsampling_x,
return get_uv_tx_size_impl(mi->tx_size, mi->sb_type, pd->subsampling_x,
pd->subsampling_y);
}
......
......@@ -35,7 +35,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ",
*((int*) ((char *) (&mi[0]->mbmi) +
*((int*) ((char *) (mi[0]) +
member_offset)));
mi++;
}
......@@ -53,18 +53,18 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
int rows = cm->mi_rows;
int cols = cm->mi_cols;
print_mi_data(cm, mvs, "Partitions:", offsetof(MB_MODE_INFO, sb_type));
print_mi_data(cm, mvs, "Modes:", offsetof(MB_MODE_INFO, mode));
print_mi_data(cm, mvs, "Ref frame:", offsetof(MB_MODE_INFO, ref_frame[0]));
print_mi_data(cm, mvs, "Transform:", offsetof(MB_MODE_INFO, tx_size));
print_mi_data(cm, mvs, "UV Modes:", offsetof(MB_MODE_INFO, uv_mode));
print_mi_data(cm, mvs, "Partitions:", offsetof(MODE_INFO, sb_type));
print_mi_data(cm, mvs, "Modes:", offsetof(MODE_INFO, mode));
print_mi_data(cm, mvs, "Ref frame:", offsetof(MODE_INFO, ref_frame[0]));
print_mi_data(cm, mvs, "Transform:", offsetof(MODE_INFO, tx_size));
print_mi_data(cm, mvs, "UV Modes:", offsetof(MODE_INFO, uv_mode));
// output skip infomation.
log_frame_info(cm, "Skips:", mvs);
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "S ");
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%2d ", mi[0]->mbmi.skip);
fprintf(mvs, "%2d ", mi[0]->skip);
mi++;
}
fprintf(mvs, "\n");
......@@ -78,8 +78,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%4d:%4d ", mi[0]->mbmi.mv[0].as_mv.row,
mi[0]->mbmi.mv[0].as_mv.col);
fprintf(mvs, "%4d:%4d ", mi[0]->mv[0].as_mv.row,
mi[0]->mv[0].as_mv.col);
mi++;
}
fprintf(mvs, "\n");
......
......@@ -232,9 +232,9 @@ static void update_sharpness(loop_filter_info_n *lfi, int sharpness_lvl) {
}
static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
const MB_MODE_INFO *mbmi) {
return lfi_n->lvl[mbmi->segment_id][mbmi->ref_frame[0]]
[mode_lf_lut[mbmi->mode]];
const MODE_INFO *mi) {
return lfi_n->lvl[mi->segment_id][mi->ref_frame[0]]
[mode_lf_lut[mi->mode]];
}
void vp9_loop_filter_init(VP9_COMMON *cm) {
......@@ -709,11 +709,10 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
const int shift_uv,
LOOP_FILTER_MASK *lfm) {
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
const int filter_level = get_filter_level(lfi_n, mbmi);
const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
......@@ -754,7 +753,7 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
// Here we are adding a mask for the transform size. The transform
......@@ -788,10 +787,9 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
static void build_y_mask(const loop_filter_info_n *const lfi_n,
const MODE_INFO *mi, const int shift_y,
LOOP_FILTER_MASK *lfm) {
const MB_MODE_INFO *mbmi = &mi->mbmi;
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const int filter_level = get_filter_level(lfi_n, mbmi);
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const int filter_level = get_filter_level(lfi_n, mi);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
uint64_t *const above_y = &lfm->above_y[tx_size_y];
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
......@@ -812,7 +810,7 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
*above_y |= above_prediction_mask[block_size] << shift_y;
*left_y |= left_prediction_mask[block_size] << shift_y;
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
*above_y |= (size_mask[block_size] &
......@@ -980,7 +978,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
// TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times.
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_64X64:
build_masks(lfi_n, mip[0] , 0, 0, lfm);
break;
......@@ -1006,7 +1004,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_32X32:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
......@@ -1036,7 +1034,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue;
switch (mip[0]->mbmi.sb_type) {
switch (mip[0]->sb_type) {
case BLOCK_16X16:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break;
......@@ -1186,8 +1184,8 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
// Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
const MODE_INFO *mi = mi_8x8[c];
const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
const BLOCK_SIZE sb_type = mi[0].sb_type;
const int skip_this = mi[0].skip && is_inter_block(mi);
// left edge of current unit is block/partition edge -> no skip
const int block_edge_left = (num_4x4_blocks_wide_lookup[sb_type] > 1) ?
!(c & (num_8x8_blocks_wide_lookup[sb_type] - 1)) : 1;
......@@ -1196,13 +1194,13 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
const int block_edge_above = (num_4x4_blocks_high_lookup[sb_type] > 1) ?
!(r & (num_8x8_blocks_high_lookup[sb_type] - 1)) : 1;
const int skip_this_r = skip_this && !block_edge_above;
const TX_SIZE tx_size = get_uv_tx_size(&mi[0].mbmi, plane);
const TX_SIZE tx_size = get_uv_tx_size(mi, plane);
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
// Filter level can vary per MI
if (!(lfl[(r << 3) + (c >> ss_x)] =
get_filter_level(&cm->lf_info, &mi[0].mbmi)))
get_filter_level(&cm->lf_info, mi)))
continue;
// Build masks based on the transform size of each block
......@@ -1640,12 +1638,12 @@ static const uint8_t first_block_in_16x16[8][8] = {
// This function sets up the bit masks for a block represented
// by mi_row, mi_col in a 64x64 region.
// TODO(SJL): This function only works for yv12.
void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
void vp9_build_mask(VP9_COMMON *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh) {
const BLOCK_SIZE block_size = mbmi->sb_type;
const TX_SIZE tx_size_y = mbmi->tx_size;
const BLOCK_SIZE block_size = mi->sb_type;
const TX_SIZE tx_size_y = mi->tx_size;
const loop_filter_info_n *const lfi_n = &cm->lf_info;
const int filter_level = get_filter_level(lfi_n, mbmi);
const int filter_level = get_filter_level(lfi_n, mi);
const TX_SIZE tx_size_uv = get_uv_tx_size_impl(tx_size_y, block_size, 1, 1);
LOOP_FILTER_MASK *const lfm = get_lfm(&cm->lf, mi_row, mi_col);
uint64_t *const left_y = &lfm->left_y[tx_size_y];
......@@ -1693,7 +1691,7 @@ void vp9_build_mask(VP9_COMMON *cm, const MB_MODE_INFO *mbmi, int mi_row,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
if (mbmi->skip && is_inter_block(mbmi))
if (mi->skip && is_inter_block(mi))
return;
// Add a mask for the transform size. The transform size mask is set to
......
......@@ -134,7 +134,7 @@ static INLINE LOOP_FILTER_MASK *get_lfm(const struct loopfilter *lf,
return &lf->lfm[(mi_col >> 3) + ((mi_row >> 3) * lf->lfm_stride)];
}
void vp9_build_mask(struct VP9Common *cm, const MB_MODE_INFO *mbmi, int mi_row,
void vp9_build_mask(struct VP9Common *cm, const MODE_INFO *mi, int mi_row,
int mi_col, int bw, int bh);
void vp9_adjust_mask(struct VP9Common *const cm, const int mi_row,
const int mi_col, LOOP_FILTER_MASK *lfm);
......
......@@ -203,12 +203,12 @@ static void mfqe_block(BLOCK_SIZE bs, const uint8_t *y, const uint8_t *u,
static int mfqe_decision(MODE_INFO *mi, BLOCK_SIZE cur_bs) {
// Check the motion in current block(for inter frame),
// or check the motion in the correlated block in last frame (for keyframe).
const int mv_len_square = mi->mbmi.mv[0].as_mv.row *
mi->mbmi.mv[0].as_mv.row +
mi->mbmi.mv[0].as_mv.col *
mi->mbmi.mv[0].as_mv.col;
const int mv_len_square = mi->mv[0].as_mv.row *
mi->mv[0].as_mv.row +
mi->mv[0].as_mv.col *
mi->mv[0].as_mv.col;
const int mv_threshold = 100;
return mi->mbmi.mode >= NEARESTMV && // Not an intra block
return mi->mode >= NEARESTMV && // Not an intra block
cur_bs >= BLOCK_16X16 &&
mv_len_square <= mv_threshold;
}
......@@ -220,7 +220,7 @@ static void mfqe_partition(VP9_COMMON *cm, MODE_INFO *mi, BLOCK_SIZE bs,
uint8_t *yd, uint8_t *ud, uint8_t *vd,
int yd_stride, int uvd_stride) {
int mi_offset, y_offset, uv_offset;
const BLOCK_SIZE cur_bs = mi->mbmi.sb_type;
const BLOCK_SIZE cur_bs = mi->sb_type;
const int qdiff = cm->base_qindex - cm->postproc_state.last_base_qindex;
const int bsl = b_width_log2_lookup[bs];
PARTITION_TYPE partition = partition_lookup[bsl][cur_bs];
......
......@@ -20,7 +20,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
uint8_t *mode_context) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
const POSITION *const mv_ref_search = mv_ref_blocks[mi->sb_type];
int different_ref_found = 0;
int context_counter = 0;
const MV_REF *const prev_frame_mvs = cm->use_prev_frame_mvs ?
......@@ -38,15 +38,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode];
context_counter += mode_2_counter[candidate_mi->mode];
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 0, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
else if (candidate->ref_frame[1] == ref_frame)
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(get_sub_block_mv(candidate_mi, 1, mv_ref->col, block),
refmv_count, mv_ref_list, Done);
}
......@@ -58,14 +57,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate =
&xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
const MODE_INFO *const candidate_mi =
xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(candidate->mv[0], refmv_count, mv_ref_list, Done);
else if (candidate->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(candidate->mv[1], refmv_count, mv_ref_list, Done);
if (candidate_mi->ref_frame[0] == ref_frame)
ADD_MV_REF_LIST(candidate_mi->mv[0], refmv_count, mv_ref_list, Done);
else if (candidate_mi->ref_frame[1] == ref_frame)
ADD_MV_REF_LIST(candidate_mi->mv[1], refmv_count, mv_ref_list, Done);
}
}
......@@ -85,11 +84,11 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
for (i = 0; i < MVREF_NEIGHBOURS; ++i) {
const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate =
&xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride]->mbmi;
const MODE_INFO *const candidate_mi =
xd->mi[mv_ref->col + mv_ref->row * xd->mi_stride];
// If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
IF_DIFF_REF_FRAME_ADD_MV(candidate_mi, ref_frame, ref_sign_bias,
refmv_count, mv_ref_list, Done);
}
}
......@@ -163,7 +162,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
find_mv_refs_idx(cm, xd, mi, mi->mbmi.ref_frame[ref], mv_list, block,
find_mv_refs_idx(cm, xd, mi, mi->ref_frame[ref], mv_list, block,
mi_row, mi_col, mode_context);
near_mv->as_int = 0;
......
......@@ -136,19 +136,19 @@ static INLINE void clamp_mv_ref(MV *mv, const MACROBLOCKD *xd) {
// on whether the block_size < 8x8 and we have check_sub_blocks set.
static INLINE int_mv get_sub_block_mv(const MODE_INFO *candidate, int which_mv,
int search_col, int block_idx) {
return block_idx >= 0 && candidate->mbmi.sb_type < BLOCK_8X8
return block_idx >= 0 && candidate->sb_type < BLOCK_8X8
? candidate->bmi[idx_n_column_to_subblock[block_idx][search_col == 0]]
.as_mv[which_mv]
: candidate->mbmi.mv[which_mv];
: candidate->mv[which_mv];
}
// Performs mv sign inversion if indicated by the reference frame combination.
static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
static INLINE int_mv scale_mv(const MODE_INFO *mi, int ref,
const MV_REFERENCE_FRAME this_ref_frame,
const int *ref_sign_bias) {
int_mv mv = mbmi->mv[ref];
if (ref_sign_bias[mbmi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
int_mv mv = mi->mv[ref];
if (ref_sign_bias[mi->ref_frame[ref]] != ref_sign_bias[this_ref_frame]) {
mv.as_mv.row *= -1;
mv.as_mv.col *= -1;
}
......
......@@ -406,22 +406,17 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
// Are edges available for intra prediction?
xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start);
// TODO(slavarnway): eliminate up/left available ???
if (xd->up_available) {
xd->above_mi = xd->mi[-xd->mi_stride];
// above_mi may be NULL in VP9 encoder's first pass.
xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
} else {
xd->above_mi = NULL;
xd->above_mbmi = NULL;
}
if (xd->left_available) {
xd->left_mi = xd->mi[-1];
// left_mi may be NULL in VP9 encoder's first pass.
xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
} else {
xd->left_mi = NULL;
xd->left_mbmi = NULL;
}
}
......
......@@ -19,12 +19,12 @@ int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// The mode info data structure has a one element border above and to the
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int left_type = xd->left_available && is_inter_block(left_mbmi) ?
left_mbmi->interp_filter : SWITCHABLE_FILTERS;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const int above_type = xd->up_available && is_inter_block(above_mbmi) ?
above_mbmi->interp_filter : SWITCHABLE_FILTERS;
const MODE_INFO *const left_mi = xd->left_mi;
const int left_type = xd->left_available && is_inter_block(left_mi) ?
left_mi->interp_filter : SWITCHABLE_FILTERS;
const MODE_INFO *const above_mi = xd->above_mi;
const int above_type = xd->up_available && is_inter_block(above_mi) ?
above_mi->interp_filter : SWITCHABLE_FILTERS;
if (left_type == above_type)
return left_type;
......@@ -44,18 +44,18 @@ int vp9_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
// 2 - intra/--, --/intra
// 3 - intra/intra
int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
if (has_above && has_left) { // both edges available
const int above_intra = !is_inter_block(above_mbmi);
const int left_intra = !is_inter_block(left_mbmi);
const int above_intra = !is_inter_block(above_mi);
const int left_intra = !is_inter_block(left_mi);
return left_intra && above_intra ? 3
: left_intra || above_intra;
} else if (has_above || has_left) { // one edge available
return 2 * !is_inter_block(has_above ? above_mbmi : left_mbmi);
return 2 * !is_inter_block(has_above ? above_mi : left_mi);
} else {
return 0;
}
......@@ -64,8 +64,8 @@ int vp9_get_intra_inter_context(const MACROBLOCKD *xd) {
int vp9_get_reference_mode_context(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int ctx;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
// Note:
......@@ -73,26 +73,26 @@ int vp9_get_reference_mode_context(const VP9_COMMON *cm,
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (has_above && has_left) { // both edges available
if (!has_second_ref(above_mbmi) && !has_second_ref(left_mbmi))
if (!has_second_ref(above_mi) && !has_second_ref(left_mi))
// neither edge uses comp pred (0/1)
ctx = (above_mbmi->ref_frame[0] == cm->comp_fixed_ref) ^
(left_mbmi->ref_frame[0] == cm->comp_fixed_ref);
else if (!has_second_ref(above_mbmi))
ctx = (above_mi->ref_frame[0] == cm->comp_fixed_ref) ^
(left_mi->ref_frame[0] == cm->comp_fixed_ref);
else if (!has_second_ref(above_mi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (above_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(above_mbmi));
else if (!has_second_ref(left_mbmi))
ctx = 2 + (above_mi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(above_mi));
else if (!has_second_ref(left_mi))
// one of two edges uses comp pred (2/3)
ctx = 2 + (left_mbmi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(left_mbmi));
ctx = 2 + (left_mi->ref_frame[0] == cm->comp_fixed_ref ||
!is_inter_block(left_mi));
else // both edges use comp pred (4)
ctx = 4;
} else if (has_above || has_left) { // one edge available
const MB_MODE_INFO *edge_mbmi = has_above ? above_mbmi : left_mbmi;
const MODE_INFO *edge_mi = has_above ? above_mi : left_mi;
if (!has_second_ref(edge_mbmi))
if (!has_second_ref(edge_mi))
// edge does not use comp pred (0/1)
ctx = edge_mbmi->ref_frame[0] == cm->comp_fixed_ref;
ctx = edge_mi->ref_frame[0] == cm->comp_fixed_ref;
else
// edge uses comp pred (3)
ctx = 3;
......@@ -107,8 +107,8 @@ int vp9_get_reference_mode_context(const VP9_COMMON *cm,
int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int above_in_image = xd->up_available;
const int left_in_image = xd->left_available;
......@@ -120,26 +120,26 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
const int var_ref_idx = !fix_ref_idx;
if (above_in_image && left_in_image) { // both edges available
const int above_intra = !is_inter_block(above_mbmi);
const int left_intra = !is_inter_block(left_mbmi);
const int above_intra = !is_inter_block(above_mi);
const int left_intra = !is_inter_block(left_mi);
if (above_intra && left_intra) { // intra/intra (2)
pred_context = 2;
} else if (above_intra || left_intra) { // intra/inter
const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
const MODE_INFO *edge_mi = above_intra ? left_mi : above_mi;
if (!has_second_ref(edge_mbmi)) // single pred (1/3)
pred_context = 1 + 2 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
if (!has_second_ref(edge_mi)) // single pred (1/3)
pred_context = 1 + 2 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]);
else // comp pred (1/3)
pred_context = 1 + 2 * (edge_mbmi->ref_frame[var_ref_idx]
pred_context = 1 + 2 * (edge_mi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1]);
} else { // inter/inter
const int l_sg = !has_second_ref(left_mbmi);
const int a_sg = !has_second_ref(above_mbmi);
const MV_REFERENCE_FRAME vrfa = a_sg ? above_mbmi->ref_frame[0]
: above_mbmi->ref_frame[var_ref_idx];
const MV_REFERENCE_FRAME vrfl = l_sg ? left_mbmi->ref_frame[0]
: left_mbmi->ref_frame[var_ref_idx];
const int l_sg = !has_second_ref(left_mi);
const int a_sg = !has_second_ref(above_mi);
const MV_REFERENCE_FRAME vrfa = a_sg ? above_mi->ref_frame[0]
: above_mi->ref_frame[var_ref_idx];
const MV_REFERENCE_FRAME vrfl = l_sg ? left_mi->ref_frame[0]
: left_mi->ref_frame[var_ref_idx];
if (vrfa == vrfl && cm->comp_var_ref[1] == vrfa) {
pred_context = 0;
......@@ -167,16 +167,16 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
}
}
} else if (above_in_image || left_in_image) { // one edge available
const MB_MODE_INFO *edge_mbmi = above_in_image ? above_mbmi : left_mbmi;
const MODE_INFO *edge_mi = above_in_image ? above_mi : left_mi;
if (!is_inter_block(edge_mbmi)) {
if (!is_inter_block(edge_mi)) {
pred_context = 2;
} else {
if (has_second_ref(edge_mbmi))
pred_context = 4 * (edge_mbmi->ref_frame[var_ref_idx]
if (has_second_ref(edge_mi))
pred_context = 4 * (edge_mi->ref_frame[var_ref_idx]
!= cm->comp_var_ref[1]);
else
pred_context = 3 * (edge_mbmi->ref_frame[0] != cm->comp_var_ref[1]);
pred_context = 3 * (edge_mi->ref_frame[0] != cm->comp_var_ref[1]);
}
} else { // no edges available (2)
pred_context = 2;
......@@ -188,8 +188,8 @@ int vp9_get_pred_context_comp_ref_p(const VP9_COMMON *cm,
int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
int pred_context;
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const MODE_INFO *const above_mi = xd->above_mi;
const MODE_INFO *const left_mi = xd->left_mi;
const int has_above = xd->up_available;
const int has_left = xd->left_available;
// Note:
......@@ -197,25 +197,25 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
// left of the entries correpsonding to real macroblocks.
// The prediction flags in these dummy entries are initialised to 0.
if (has_above && has_left) { // both edges available
const int above_intra = !is_inter_block(above_mbmi);
const int left_intra = !is_inter_block(left_mbmi);
const int above_intra = !is_inter_block(above_mi);
const int left_intra = !is_inter_block(left_mi);
if (above_intra && left_intra) { // intra/intra
pred_context = 2;
} else if (above_intra || left_intra) { // intra/inter or inter/intra
const MB_MODE_INFO *edge_mbmi = above_intra ? left_mbmi : above_mbmi;
if (!has_second_ref(edge_mbmi))
pred_context = 4 * (edge_mbmi->ref_frame[0] == LAST_FRAME);
const MODE_INFO *edge_mi = above_intra ? left_mi : above_mi;
if (!has_second_ref(edge_mi))
pred_context = 4 * (edge_mi->ref_frame[0] == LAST_FRAME);
else
pred_context = 1 + (edge_mbmi->ref_frame[0] == LAST_FRAME ||
edge_mbmi->ref_frame[1] == LAST_FRAME);
pred_context = 1 + (edge_mi->ref_frame[0] == LAST_FRAME ||
edge_mi->ref_frame[1] == LAST_FRAME);
} else { // inter/inter
const int above_has_second = has_second_ref(above_mbmi);
const int left_has_second = has_second_ref(left_mbmi);
const MV_REFERENCE_FRAME above0 = above_mbmi->ref_frame[0];
const MV_REFERENCE_FRAME above1 = above_mbmi->ref_frame[1];
const MV_REFERENCE_FRAME left0 = left_mbmi->ref_frame[0];
const MV_REFERENCE_FRAME left1 = left_mbmi->ref_frame[1];
const int above_has_second = has_second_ref(above_mi);
const int left_has_second = has_second_ref(left_mi);
const MV_REFERENCE_FRAME above0 = above_mi->ref_frame[0];
const MV_REFERENCE_FRAME above1 = above_mi->ref_frame[1];
const MV_REFERENCE_FRAME left0 = left_mi->ref_frame[0];
const MV_REFERENCE_FRAME left1 = left_mi->ref_frame[1];
if (above_has_second && left_has_second) {
pred_context = 1 + (above0 == LAST_FRAME || above1 == LAST_FRAME ||
......@@ -234,15 +234,15 @@ int vp9_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
}
}