Commit d1577427 authored by Scott LaVarnway's avatar Scott LaVarnway Committed by Gerrit Code Review
Browse files

Merge "VP9: Move ref_mvs[][] and mode_context[] from MB_MODE_INFO"

parents a42df86c c06d56cc
......@@ -81,12 +81,6 @@ typedef struct {
// TODO(slavarnway): Delete and use bmi[3].as_mv[] instead.
int_mv mv[2];
#if CONFIG_VP9_ENCODER
// TODO(slavarnway): Move to encoder
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
uint8_t mode_context[MAX_REF_FRAMES];
#endif
} MB_MODE_INFO;
typedef struct MODE_INFO {
......
......@@ -242,6 +242,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
const MACROBLOCKD *const xd = &x->e_mbd;
const struct segmentation *const seg = &cm->seg;
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const PREDICTION_MODE mode = mbmi->mode;
const int segment_id = mbmi->segment_id;
const BLOCK_SIZE bsize = mbmi->sb_type;
......@@ -288,7 +289,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
}
write_intra_mode(w, mbmi->uv_mode, cm->fc->uv_mode_prob[mode]);
} else {
const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
const vp9_prob *const inter_probs = cm->fc->inter_mode_probs[mode_ctx];
write_ref_frames(cm, xd, w);
......@@ -321,7 +322,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
if (b_mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp9_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
&mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
nmvc, allow_hp);
}
}
......@@ -330,7 +331,7 @@ static void pack_inter_mode_mvs(VP9_COMP *cpi, const MODE_INFO *mi,
if (mode == NEWMV) {
for (ref = 0; ref < 1 + is_compound; ++ref)
vp9_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
&mbmi->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
&mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
allow_hp);
}
}
......@@ -384,6 +385,9 @@ static void write_modes_b(VP9_COMP *cpi, const TileInfo *const tile,
xd->mi = cm->mi_grid_visible + (mi_row * cm->mi_stride + mi_col);
m = xd->mi[0];
cpi->td.mb.mbmi_ext = cpi->td.mb.mbmi_ext_base +
(mi_row * cm->mi_cols + mi_col);
set_mi_row_col(xd, tile,
mi_row, num_8x8_blocks_high_lookup[m->mbmi.sb_type],
mi_col, num_8x8_blocks_wide_lookup[m->mbmi.sb_type],
......
......@@ -47,11 +47,18 @@ struct macroblock_plane {
typedef unsigned int vp9_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
[COEFF_CONTEXTS][ENTROPY_TOKENS];
typedef struct {
int_mv ref_mvs[MAX_REF_FRAMES][MAX_MV_REF_CANDIDATES];
uint8_t mode_context[MAX_REF_FRAMES];
} MB_MODE_INFO_EXT;
typedef struct macroblock MACROBLOCK;
struct macroblock {
struct macroblock_plane plane[MAX_MB_PLANE];
MACROBLOCKD e_mbd;
MB_MODE_INFO_EXT *mbmi_ext;
MB_MODE_INFO_EXT *mbmi_ext_base;
int skip_block;
int select_tx_size;
int skip_recode;
......
......@@ -12,6 +12,7 @@
#define VP9_ENCODER_VP9_CONTEXT_TREE_H_
#include "vp9/common/vp9_blockd.h"
#include "vp9/encoder/vp9_block.h"
struct VP9_COMP;
struct VP9Common;
......@@ -20,6 +21,7 @@ struct ThreadData;
// Structure to hold snapshot of coding context during the mode picking process
typedef struct {
MODE_INFO mic;
MB_MODE_INFO_EXT mbmi_ext;
uint8_t *zcoeff_blk;
tran_low_t *coeff[MAX_MB_PLANE][3];
tran_low_t *qcoeff[MAX_MB_PLANE][3];
......
......@@ -170,12 +170,14 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi, MACROBLOCK *x,
// Lighter version of set_offsets that only sets the mode info
// pointers.
static INLINE void set_mode_info_offsets(VP9_COMMON *const cm,
MACROBLOCK *const x,
MACROBLOCKD *const xd,
int mi_row,
int mi_col) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
x->mbmi_ext = x->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
}
static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
......@@ -190,7 +192,8 @@ static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
set_skip_context(xd, mi_row, mi_col);
set_mode_info_offsets(cm, xd, mi_row, mi_col);
set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
mbmi = &xd->mi[0]->mbmi;
......@@ -249,11 +252,12 @@ static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
}
static void set_block_size(VP9_COMP * const cpi,
MACROBLOCK *const x,
MACROBLOCKD *const xd,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
set_mode_info_offsets(&cpi->common, xd, mi_row, mi_col);
set_mode_info_offsets(&cpi->common, x, xd, mi_row, mi_col);
xd->mi[0]->mbmi.sb_type = bsize;
}
}
......@@ -384,6 +388,7 @@ static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
}
static int set_vt_partitioning(VP9_COMP *cpi,
MACROBLOCK *const x,
MACROBLOCKD *const xd,
void *data,
BLOCK_SIZE bsize,
......@@ -414,7 +419,7 @@ static int set_vt_partitioning(VP9_COMP *cpi,
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
set_block_size(cpi, xd, mi_row, mi_col, bsize);
set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
return 1;
}
return 0;
......@@ -432,7 +437,7 @@ static int set_vt_partitioning(VP9_COMP *cpi,
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
set_block_size(cpi, xd, mi_row, mi_col, bsize);
set_block_size(cpi, x, xd, mi_row, mi_col, bsize);
return 1;
}
......@@ -444,8 +449,8 @@ static int set_vt_partitioning(VP9_COMP *cpi,
if (vt.part_variances->vert[0].variance < threshold &&
vt.part_variances->vert[1].variance < threshold &&
get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
set_block_size(cpi, xd, mi_row, mi_col, subsize);
set_block_size(cpi, xd, mi_row, mi_col + block_width / 2, subsize);
set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
set_block_size(cpi, x, xd, mi_row, mi_col + block_width / 2, subsize);
return 1;
}
}
......@@ -457,8 +462,8 @@ static int set_vt_partitioning(VP9_COMP *cpi,
if (vt.part_variances->horz[0].variance < threshold &&
vt.part_variances->horz[1].variance < threshold &&
get_plane_block_size(subsize, &xd->plane[1]) < BLOCK_INVALID) {
set_block_size(cpi, xd, mi_row, mi_col, subsize);
set_block_size(cpi, xd, mi_row + block_height / 2, mi_col, subsize);
set_block_size(cpi, x, xd, mi_row, mi_col, subsize);
set_block_size(cpi, x, xd, mi_row + block_height / 2, mi_col, subsize);
return 1;
}
}
......@@ -770,7 +775,7 @@ static int choose_partitioning(VP9_COMP *cpi,
const int block_height = num_8x8_blocks_high_lookup[BLOCK_64X64];
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows) {
set_block_size(cpi, xd, mi_row, mi_col, BLOCK_64X64);
set_block_size(cpi, x, xd, mi_row, mi_col, BLOCK_64X64);
return 0;
}
}
......@@ -903,13 +908,13 @@ static int choose_partitioning(VP9_COMP *cpi,
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold.
if ( mi_col + 8 > cm->mi_cols || mi_row + 8 > cm->mi_rows ||
!set_vt_partitioning(cpi, xd, &vt, BLOCK_64X64, mi_row, mi_col,
!set_vt_partitioning(cpi, x, xd, &vt, BLOCK_64X64, mi_row, mi_col,
thresholds[0], BLOCK_16X16, force_split[0])) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
const int i2 = i << 2;
if (!set_vt_partitioning(cpi, xd, &vt.split[i], BLOCK_32X32,
if (!set_vt_partitioning(cpi, x, xd, &vt.split[i], BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx),
thresholds[1], BLOCK_16X16,
force_split[i + 1])) {
......@@ -922,7 +927,7 @@ static int choose_partitioning(VP9_COMP *cpi,
v16x16 *vtemp = (!is_key_frame &&
variance4x4downsample[i2 + j] == 1) ?
&vt2[i2 + j] : &vt.split[i].split[j];
if (!set_vt_partitioning(cpi, xd, vtemp, BLOCK_16X16,
if (!set_vt_partitioning(cpi, x, xd, vtemp, BLOCK_16X16,
mi_row + y32_idx + y16_idx,
mi_col + x32_idx + x16_idx,
thresholds[2],
......@@ -932,18 +937,18 @@ static int choose_partitioning(VP9_COMP *cpi,
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
if (use_4x4_partition) {
if (!set_vt_partitioning(cpi, xd, &vtemp->split[k],
if (!set_vt_partitioning(cpi, x, xd, &vtemp->split[k],
BLOCK_8X8,
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx,
thresholds[3], BLOCK_8X8, 0)) {
set_block_size(cpi, xd,
set_block_size(cpi, x, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
BLOCK_4X4);
}
} else {
set_block_size(cpi, xd,
set_block_size(cpi, x, xd,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
BLOCK_8X8);
......@@ -988,6 +993,7 @@ static void update_state(VP9_COMP *cpi, ThreadData *td,
assert(mi->mbmi.sb_type == bsize);
*mi_addr = *mi;
*x->mbmi_ext = ctx->mbmi_ext;
// If segmentation in use
if (seg->enabled) {
......@@ -1289,6 +1295,7 @@ static void update_stats(VP9_COMMON *cm, ThreadData *td) {
const MACROBLOCKD *const xd = &x->e_mbd;
const MODE_INFO *const mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
const BLOCK_SIZE bsize = mbmi->sb_type;
if (!frame_is_intra_only(cm)) {
......@@ -1321,7 +1328,7 @@ static void update_stats(VP9_COMMON *cm, ThreadData *td) {
}
if (inter_block &&
!segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
const int mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
const int mode_ctx = mbmi_ext->mode_context[mbmi->ref_frame[0]];
if (bsize >= BLOCK_8X8) {
const PREDICTION_MODE mode = mbmi->mode;
++counts->inter_mode[mode_ctx][INTER_OFFSET(mode)];
......@@ -1682,6 +1689,7 @@ static void update_state_rt(VP9_COMP *cpi, ThreadData *td,
const int y_mis = MIN(bh, cm->mi_rows - mi_row);
*(xd->mi[0]) = ctx->mic;
*(x->mbmi_ext) = ctx->mbmi_ext;
if (seg->enabled && cpi->oxcf.aq_mode) {
// For in frame complexity AQ or variance AQ, copy segment_id from
......@@ -2960,28 +2968,33 @@ static void fill_mode_info_sb(VP9_COMMON *cm, MACROBLOCK *x,
switch (partition) {
case PARTITION_NONE:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
*(xd->mi[0]) = pc_tree->none.mic;
*(x->mbmi_ext) = pc_tree->none.mbmi_ext;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, bsize);
break;
case PARTITION_VERT:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
*(xd->mi[0]) = pc_tree->vertical[0].mic;
*(x->mbmi_ext) = pc_tree->vertical[0].mbmi_ext;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
if (mi_col + hbs < cm->mi_cols) {
set_mode_info_offsets(cm, xd, mi_row, mi_col + hbs);
set_mode_info_offsets(cm, x, xd, mi_row, mi_col + hbs);
*(xd->mi[0]) = pc_tree->vertical[1].mic;
*(x->mbmi_ext) = pc_tree->vertical[1].mbmi_ext;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col + hbs, subsize);
}
break;
case PARTITION_HORZ:
set_mode_info_offsets(cm, xd, mi_row, mi_col);
set_mode_info_offsets(cm, x, xd, mi_row, mi_col);
*(xd->mi[0]) = pc_tree->horizontal[0].mic;
*(x->mbmi_ext) = pc_tree->horizontal[0].mbmi_ext;
duplicate_mode_info_in_sb(cm, xd, mi_row, mi_col, subsize);
if (mi_row + hbs < cm->mi_rows) {
set_mode_info_offsets(cm, xd, mi_row + hbs, mi_col);
set_mode_info_offsets(cm, x, xd, mi_row + hbs, mi_col);
*(xd->mi[0]) = pc_tree->horizontal[1].mic;
*(x->mbmi_ext) = pc_tree->horizontal[1].mbmi_ext;
duplicate_mode_info_in_sb(cm, xd, mi_row + hbs, mi_col, subsize);
}
break;
......@@ -3082,6 +3095,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col,
&this_rdc, bsize, ctx);
ctx->mic.mbmi = xd->mi[0]->mbmi;
ctx->mbmi_ext = *x->mbmi_ext;
ctx->skip_txfm[0] = x->skip_txfm[0];
ctx->skip = x->skip;
......@@ -3164,6 +3178,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
&pc_tree->horizontal[0]);
pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
......@@ -3175,6 +3190,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
&pc_tree->horizontal[1]);
pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
......@@ -3207,6 +3223,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, subsize,
&pc_tree->vertical[0]);
pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
......@@ -3217,6 +3234,7 @@ static void nonrd_pick_partition(VP9_COMP *cpi, ThreadData *td,
&this_rdc, subsize,
&pc_tree->vertical[1]);
pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
......@@ -3308,6 +3326,7 @@ static void nonrd_select_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->none);
pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
pc_tree->none.mbmi_ext = *x->mbmi_ext;
pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
pc_tree->none.skip = x->skip;
break;
......@@ -3316,6 +3335,7 @@ static void nonrd_select_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->vertical[0]);
pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
if (mi_col + hbs < cm->mi_cols) {
......@@ -3323,6 +3343,7 @@ static void nonrd_select_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
&this_rdc, subsize, &pc_tree->vertical[1]);
pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
......@@ -3337,6 +3358,7 @@ static void nonrd_select_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, rd_cost,
subsize, &pc_tree->horizontal[0]);
pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
if (mi_row + hbs < cm->mi_rows) {
......@@ -3344,6 +3366,7 @@ static void nonrd_select_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
&this_rdc, subsize, &pc_tree->horizontal[1]);
pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
if (this_rdc.rate != INT_MAX && this_rdc.dist != INT64_MAX &&
......@@ -3428,6 +3451,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->none);
pc_tree->none.mic.mbmi = xd->mi[0]->mbmi;
pc_tree->none.mbmi_ext = *x->mbmi_ext;
pc_tree->none.skip_txfm[0] = x->skip_txfm[0];
pc_tree->none.skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
......@@ -3438,6 +3462,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->vertical[0]);
pc_tree->vertical[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[0].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[0].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
......@@ -3447,6 +3472,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs,
dummy_cost, subsize, &pc_tree->vertical[1]);
pc_tree->vertical[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->vertical[1].mbmi_ext = *x->mbmi_ext;
pc_tree->vertical[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->vertical[1].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col + hbs,
......@@ -3458,6 +3484,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, dummy_cost,
subsize, &pc_tree->horizontal[0]);
pc_tree->horizontal[0].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[0].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[0].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[0].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row, mi_col, output_enabled,
......@@ -3468,6 +3495,7 @@ static void nonrd_use_partition(VP9_COMP *cpi,
nonrd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col,
dummy_cost, subsize, &pc_tree->horizontal[1]);
pc_tree->horizontal[1].mic.mbmi = xd->mi[0]->mbmi;
pc_tree->horizontal[1].mbmi_ext = *x->mbmi_ext;
pc_tree->horizontal[1].skip_txfm[0] = x->skip_txfm[0];
pc_tree->horizontal[1].skip = x->skip;
encode_b_rt(cpi, td, tile_info, tp, mi_row + hbs, mi_col,
......
......@@ -229,12 +229,13 @@ void vp9_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
}
static void inc_mvs(const MB_MODE_INFO *mbmi, const int_mv mvs[2],
static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
const int_mv mvs[2],
nmv_context_counts *counts) {
int i;
for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
const MV *ref = &mbmi->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
const MV diff = {mvs[i].as_mv.row - ref->row,
mvs[i].as_mv.col - ref->col};
vp9_inc_mv(&diff, counts);
......@@ -245,6 +246,7 @@ void vp9_update_mv_count(ThreadData *td) {
const MACROBLOCKD *xd = &td->mb.e_mbd;
const MODE_INFO *mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
const MB_MODE_INFO_EXT *mbmi_ext = td->mb.mbmi_ext;
if (mbmi->sb_type < BLOCK_8X8) {
const int num_4x4_w = num_4x4_blocks_wide_lookup[mbmi->sb_type];
......@@ -255,12 +257,12 @@ void vp9_update_mv_count(ThreadData *td) {
for (idx = 0; idx < 2; idx += num_4x4_w) {
const int i = idy * 2 + idx;
if (mi->bmi[i].as_mode == NEWMV)
inc_mvs(mbmi, mi->bmi[i].as_mv, &td->counts->mv);
inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv);
}
}
} else {
if (mbmi->mode == NEWMV)
inc_mvs(mbmi, mbmi->mv, &td->counts->mv);
inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
}
}
......@@ -335,6 +335,9 @@ static void dealloc_compressor_data(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
int i;
vpx_free(cpi->mbmi_ext_base);
cpi->mbmi_ext_base = NULL;
vpx_free(cpi->tile_data);
cpi->tile_data = NULL;
......@@ -670,11 +673,25 @@ static void alloc_util_frame_buffers(VP9_COMP *cpi) {
"Failed to allocate scaled last source buffer");
}
static int alloc_context_buffers_ext(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
int mi_size = cm->mi_cols * cm->mi_rows;
cpi->mbmi_ext_base = vpx_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
if (!cpi->mbmi_ext_base)
return 1;
return 0;
}
void vp9_alloc_compressor_data(VP9_COMP *cpi) {
VP9_COMMON *cm = &cpi->common;
vp9_alloc_context_buffers(cm, cm->width, cm->height);
alloc_context_buffers_ext(cpi);
vpx_free(cpi->tile_tok[0][0]);
{
......@@ -716,6 +733,9 @@ static void update_frame_size(VP9_COMP *cpi) {
vp9_set_mb_mi(cm, cm->width, cm->height);
vp9_init_context_buffers(cm);
init_macroblockd(cm, xd);
cpi->td.mb.mbmi_ext_base = cpi->mbmi_ext_base;
memset(cpi->mbmi_ext_base, 0,
cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
set_tile_limits(cpi);
......
......@@ -291,6 +291,7 @@ typedef struct IMAGE_STAT {
typedef struct VP9_COMP {
QUANTS quants;
ThreadData td;
MB_MODE_INFO_EXT *mbmi_ext_base;
DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
VP9_COMMON common;
......
......@@ -39,7 +39,8 @@ typedef struct {
int in_use;
} PRED_BUFFER;
static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCK *x,
const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
......@@ -111,7 +112,7 @@ static int mv_refs_rt(const VP9_COMMON *cm, const MACROBLOCKD *xd,
Done:
mi->mbmi.mode_context[ref_frame] = counter_to_context[context_counter];
x->mbmi_ext->mode_context[ref_frame] = counter_to_context[context_counter];
// Clamp vectors
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
......@@ -131,7 +132,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
const int sadpb = x->sadperbit16;
MV mvp_full;
const int ref = mbmi->ref_frame[0];
const MV ref_mv = mbmi->ref_mvs[ref][0].as_mv;
const MV ref_mv = x->mbmi_ext->ref_mvs[ref][0].as_mv;
int dis;
int rate_mode;
const int tmp_col_min = x->mv_col_min;
......@@ -155,7 +156,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
assert(x->mv_best_ref_index[ref] <= 2);
if (x->mv_best_ref_index[ref] < 2)
mvp_full = mbmi->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
mvp_full = x->mbmi_ext->ref_mvs[ref][x->mv_best_ref_index[ref]].as_mv;
else
mvp_full = x->pred_mv[ref];
......@@ -178,7 +179,7 @@ static int combined_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
*rate_mv = vp9_mv_bit_cost(&mvp_full, &ref_mv,
x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
rate_mode = cpi->inter_mode_cost[mbmi->mode_context[ref]]
rate_mode = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref]]
[INTER_OFFSET(NEWMV)];
rv = !(RDCOST(x->rdmult, x->rddiv, (*rate_mv + rate_mode), 0) >
best_rd_sofar);
......@@ -776,7 +777,6 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
struct buf_2d yv12_mb[][MAX_MB_PLANE],
int *rate, int64_t *dist) {
MACROBLOCKD *xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
const BLOCK_SIZE uv_size = get_plane_block_size(bsize, &xd->plane[1]);
unsigned int var = var_y, sse = sse_y;
......@@ -850,7 +850,7 @@ static void encode_breakout_test(VP9_COMP *cpi, MACROBLOCK *x,
x->skip = 1;
// The cost of skip bit needs to be added.
*rate = cpi->inter_mode_cost[mbmi->mode_context[ref_frame]]
*rate = cpi->inter_mode_cost[x->mbmi_ext->mode_context[ref_frame]]
[INTER_OFFSET(this_mode)];
// More on this part of rate
......@@ -1172,7 +1172,7 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
frame_mv[ZEROMV][ref_frame].as_int = 0;
if ((cpi->ref_frame_flags & flag_list[ref_frame]) && (yv12 != NULL)) {
int_mv *const candidates = mbmi->ref_mvs[ref_frame];
int_mv *const candidates = x->mbmi_ext->ref_mvs[ref_frame];
const struct scale_factors *const sf = &cm->frame_refs[ref_frame - 1].sf;
vp9_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col,
......@@ -1181,9 +1181,9 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if (cm->use_prev_frame_mvs)
vp9_find_mv_refs(cm, xd, xd->mi[0], ref_frame,
candidates, mi_row, mi_col, NULL, NULL,
xd->mi[0]->mbmi.mode_context);
x->mbmi_ext->mode_context);
else
const_motion[ref_frame] = mv_refs_rt(cm, xd, tile_info,
const_motion[ref_frame] = mv_refs_rt(cm, x, xd, tile_info,
xd->mi[0],
ref_frame, candidates,
mi_row, mi_col);
......@@ -1257,13 +1257,13 @@ void vp9_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,