Newer
Older
static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
subsize = get_subsize(bsize, pc_tree->partitioning);
} else {
ctx = 0;
subsize = BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
if (output_enabled && bsize != BLOCK_4X4)
cm->counts.partition[ctx][partition]++;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->none);
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->vertical[0]);
if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
&pc_tree->vertical[1]);
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->horizontal[0]);
if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
&pc_tree->horizontal[1]);
if (bsize == BLOCK_8X8) {
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
pc_tree->leaf_split[0]);
} else {
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
pc_tree->split[0]);
encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize,
pc_tree->split[1]);
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize,
pc_tree->split[2]);
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
subsize, pc_tree->split[3]);
}
assert("Invalid partition type.");
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
// Check to see if the given partition size is allowed for a specified number
// of 8x8 block rows and columns remaining in the image.
// If not then return the largest allowed partition size
static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
int rows_left, int cols_left,
int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
for (; bsize > 0; bsize -= 3) {
*bh = num_8x8_blocks_high_lookup[bsize];
*bw = num_8x8_blocks_wide_lookup[bsize];
if ((*bh <= rows_left) && (*bw <= cols_left)) {
break;
}
}
}
return bsize;
}
static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
int bh = bh_in;
int r, c;
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
int bw = bw_in;
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
const int index = r * mis + c;
mi_8x8[index].src_mi = mi + index;
mi_8x8[index].src_mi->mbmi.sb_type = find_partition_size(bsize,
row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
}
// This function attempts to set all mode info entries in a given SB64
// to the same block partition size.
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
int bh = num_8x8_blocks_high_lookup[bsize];
int bw = num_8x8_blocks_wide_lookup[bsize];
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// Apply the requested partition size to the SB64 if it is all "in image"
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
int index = block_row * mis + block_col;
mi_8x8[index].src_mi = mi_upper_left + index;
mi_8x8[index].src_mi->mbmi.sb_type = bsize;
}
}
} else {
// Else this is a partial SB64.
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
col8x8_remaining, bsize, mi_8x8);
static void copy_partitioning(VP9_COMMON *cm, MODE_INFO *mi_8x8,
MODE_INFO *prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
MODE_INFO *const prev_mi =
prev_mi_8x8[block_row * mis + block_col].src_mi;
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[block_row * mis + block_col].src_mi = cm->mi + offset;
mi_8x8[block_row * mis + block_col].src_mi->mbmi.sb_type = sb_type;
static void constrain_copy_partitioning(VP9_COMP *const cpi,
const TileInfo *const tile,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
const int bh = num_8x8_blocks_high_lookup[bsize];
const int bw = num_8x8_blocks_wide_lookup[bsize];
int block_row, block_col;
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// If the SB64 if it is all "in image".
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
const int index = block_row * mis + block_col;
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
// Use previous partition if block size is not larger than bsize.
if (prev_mi && sb_type <= bsize) {
int block_row2, block_col2;
for (block_row2 = 0; block_row2 < bh; ++block_row2) {
for (block_col2 = 0; block_col2 < bw; ++block_col2) {
const int index2 = (block_row + block_row2) * mis +
block_col + block_col2;
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[index2].src_mi = cm->mi + offset;
mi_8x8[index2].src_mi->mbmi.sb_type = prev_mi->mbmi.sb_type;
}
}
}
} else {
// Otherwise, use fixed partition of size bsize.
mi_8x8[index].src_mi = mi_upper_left + index;
mi_8x8[index].src_mi->mbmi.sb_type = bsize;
}
}
}
} else {
// Else this is a partial SB64, copy previous partition.
const struct {
int row;
int col;
} coord_lookup[16] = {
// 32x32 index = 0
{0, 0}, {0, 2}, {2, 0}, {2, 2},
// 32x32 index = 1
{0, 4}, {0, 6}, {2, 4}, {2, 6},
// 32x32 index = 2
{4, 0}, {4, 2}, {6, 0}, {6, 2},
// 32x32 index = 3
{4, 4}, {4, 6}, {6, 4}, {6, 6},
};
static void set_source_var_based_partition(VP9_COMP *cpi,
const TileInfo *const tile,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// In-image SB64
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
int i, j;
int index;
diff d32[4];
const int offset = (mi_row >> 1) * cm->mb_cols + (mi_col >> 1);
int is_larger_better = 0;
int use32x32 = 0;
unsigned int thr = cpi->source_var_thresh;
vpx_memset(d32, 0, 4 * sizeof(diff));
for (i = 0; i < 4; i++) {
diff *d16[4];
for (j = 0; j < 4; j++) {
int b_mi_row = coord_lookup[i * 4 + j].row;
int b_mi_col = coord_lookup[i * 4 + j].col;
int boffset = b_mi_row / 2 * cm->mb_cols +
b_mi_col / 2;
d16[j] = cpi->source_diff_var + offset + boffset;
index = b_mi_row * mis + b_mi_col;
mi_8x8[index].src_mi = mi_upper_left + index;
mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_16X16;
// TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
// size to further improve quality.
}
is_larger_better = (d16[0]->var < thr) && (d16[1]->var < thr) &&
(d16[2]->var < thr) && (d16[3]->var < thr);
// Use 32x32 partition
if (is_larger_better) {
use32x32 += 1;
for (j = 0; j < 4; j++) {
d32[i].sse += d16[j]->sse;
d32[i].sum += d16[j]->sum;
}
d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
mi_8x8[index].src_mi = mi_upper_left + index;
mi_8x8[index].src_mi->mbmi.sb_type = BLOCK_32X32;
if (use32x32 == 4) {
thr <<= 1;
is_larger_better = (d32[0].var < thr) && (d32[1].var < thr) &&
(d32[2].var < thr) && (d32[3].var < thr);
// Use 64x64 partition
if (is_larger_better) {
mi_8x8[0].src_mi = mi_upper_left;
mi_8x8[0].src_mi->mbmi.sb_type = BLOCK_64X64;
}
}
} else { // partial in-image SB64
int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
static int is_background(const VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col) {
// This assumes the input source frames are of the same dimension.
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
const int x = mi_col * MI_SIZE;
const int y = mi_row * MI_SIZE;
const int src_stride = cpi->Source->y_stride;
const uint8_t *const src = &cpi->Source->y_buffer[y * src_stride + x];
const int pre_stride = cpi->Last_Source->y_stride;
const uint8_t *const pre = &cpi->Last_Source->y_buffer[y * pre_stride + x];
int this_sad = 0;
int threshold = 0;
if (row8x8_remaining >= MI_BLOCK_SIZE &&
col8x8_remaining >= MI_BLOCK_SIZE) {
this_sad = cpi->fn_ptr[BLOCK_64X64].sdf(src, src_stride, pre, pre_stride);
threshold = (1 << 12);
} else {
int r, c;
for (r = 0; r < row8x8_remaining; r += 2)
for (c = 0; c < col8x8_remaining; c += 2)
this_sad += cpi->fn_ptr[BLOCK_16X16].sdf(src, src_stride,
pre, pre_stride);
threshold = (row8x8_remaining * col8x8_remaining) << 6;
}
static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO *prev_mi_8x8,
const int mis = cm->mi_stride;
int block_row, block_col;
if (cm->prev_mi) {
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
const MODE_INFO *prev_mi =
prev_mi_8x8[block_row * mis + block_col].src_mi;
if (abs(prev_mi->mbmi.mv[0].as_mv.row) > motion_thresh ||
abs(prev_mi->mbmi.mv[0].as_mv.col) > motion_thresh)
return 1;
}
}
}
}
return 0;
}
static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, int bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi;
const struct segmentation *const seg = &cm->seg;
*(xd->mi[0].src_mi) = ctx->mic;
xd->mi[0].src_mi = &xd->mi[0];
if (seg->enabled && cpi->oxcf.aq_mode) {
// For in frame complexity AQ or variance AQ, copy segment_id from
// segmentation_map.
if (cpi->oxcf.aq_mode == COMPLEXITY_AQ ||
cpi->oxcf.aq_mode == VARIANCE_AQ ) {
const uint8_t *const map = seg->update_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
} else {
// Setting segmentation map for cyclic_refresh
vp9_cyclic_refresh_update_segment(cpi, mbmi, mi_row, mi_col, bsize, 1);
}
vp9_init_plane_quantizers(cpi, x);
}
vp9_update_mv_count(cm, xd);
if (cm->interp_filter == SWITCHABLE) {
const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
x->skip_txfm[0] = mbmi->segment_id ? 0 : ctx->skip_txfm[0];
}
static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
update_state_rt(cpi, ctx, mi_row, mi_col, bsize);
#if CONFIG_VP9_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity > 0 && output_enabled) {
vp9_denoiser_denoise(&cpi->denoiser, &cpi->mb, mi_row, mi_col,
MAX(BLOCK_8X8, bsize), ctx);
}
#endif
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize, ctx);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize,
PC_TREE *pc_tree) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = b_width_log2_lookup[bsize], hbs = (1 << bsl) / 4;
int ctx;
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
if (output_enabled && bsize != BLOCK_4X4)
cm->counts.partition[ctx][partition]++;
switch (partition) {
case PARTITION_NONE:
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->none);
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->vertical[0]);
if (mi_col + hbs < cm->mi_cols && bsize > BLOCK_8X8) {
encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
subsize, &pc_tree->vertical[1]);
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
&pc_tree->horizontal[0]);
if (mi_row + hbs < cm->mi_rows && bsize > BLOCK_8X8) {
encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
subsize, &pc_tree->horizontal[1]);
}
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize,
pc_tree->split[0]);
encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
static void rd_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int mis = cm->mi_stride;
const int bsl = b_width_log2_lookup[bsize];
const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int64_t last_part_dist = INT64_MAX;
int64_t last_part_rd = INT64_MAX;
int64_t none_dist = INT64_MAX;
int64_t chosen_dist = INT64_MAX;
int64_t chosen_rd = INT64_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
BLOCK_SIZE bs_type = mi_8x8[0].src_mi->mbmi.sb_type;
int do_partition_search = 1;
PICK_MODE_CONTEXT *ctx = &pc_tree->none;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
pc_tree->partitioning = partition;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
x->mb_energy = vp9_block_energy(cpi, x, bsize);
}
if (do_partition_search &&
cpi->sf.partition_search_type == SEARCH_PARTITION &&
cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
MODE_INFO *this_mi = mi_8x8[jj * bss * mis + ii * bss].src_mi;
if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
pc_tree->partitioning = PARTITION_NONE;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
ctx, INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rate < INT_MAX) {
none_rate += cpi->partition_cost[pl][PARTITION_NONE];
none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->partitioning = partition;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, bsize, ctx, INT64_MAX);
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize, &pc_tree->horizontal[0],
bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
subsize, &pc_tree->horizontal[1], INT64_MAX);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize, &pc_tree->vertical[0],
bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
update_state(cpi, ctx, mi_row, mi_col, subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize, ctx);
rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
subsize, &pc_tree->vertical[bsize > BLOCK_8X8],
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
if (bsize == BLOCK_8X8) {
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize, pc_tree->leaf_split[0],
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rate < INT_MAX) {
last_part_rate += cpi->partition_cost[pl][partition];
last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
}
if (do_partition_search
&& cpi->sf.adjust_partitioning_from_last_frame
&& cpi->sf.partition_search_type == SEARCH_PARTITION
&& partition != PARTITION_SPLIT && bsize > BLOCK_8X8
&& (mi_row + mi_step < cm->mi_rows ||
mi_row + (mi_step >> 1) == cm->mi_rows)
&& (mi_col + mi_step < cm->mi_cols ||
mi_col + (mi_step >> 1) == cm->mi_cols)) {
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rate = 0;
chosen_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->partitioning = PARTITION_SPLIT;
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pc_tree->split[i]->partitioning = PARTITION_NONE;
rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
split_subsize, &pc_tree->split[i]->none,
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt == INT_MAX || dt == INT64_MAX) {
chosen_rate += rt;
chosen_dist += dt;
encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize, pc_tree->split[i]);
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
chosen_rate += cpi->partition_cost[pl][PARTITION_NONE];
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rate < INT_MAX) {
chosen_rate += cpi->partition_cost[pl][PARTITION_SPLIT];
chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
// If last_part is better set the partitioning to that.
if (last_part_rd < chosen_rd) {
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = partition;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
// If none was better set the partitioning to that.
if (bsize >= BLOCK_8X8)
pc_tree->partitioning = PARTITION_NONE;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
if (bsize == BLOCK_64X64)
assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
if (do_recon) {
int output_enabled = (bsize == BLOCK_64X64);
// Check the projected output rate for this SB against it's target
// and and if necessary apply a Q delta using segmentation to get
// closer to the target.
if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
output_enabled, chosen_rate);
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
chosen_rate, chosen_dist);
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize,
pc_tree);
*rate = chosen_rate;
*dist = chosen_dist;
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
BLOCK_16X16
};
static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
BLOCK_64X64
// Look at all the mode_info entries for blocks that are part of this
// partition and find the min and max values for sb_type.
// At the moment this is designed to work on a 64x64 SB but could be
// adjusted to use a size parameter.
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
static void get_sb_partition_size_range(MACROBLOCKD *xd, MODE_INFO *mi_8x8,
BLOCK_SIZE *max_block_size,
int bs_hist[BLOCK_SIZES]) {
int sb_width_in_blocks = MI_BLOCK_SIZE;
int sb_height_in_blocks = MI_BLOCK_SIZE;
int i, j;
int index = 0;
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
bs_hist[sb_type]++;
*min_block_size = MIN(*min_block_size, sb_type);
*max_block_size = MAX(*max_block_size, sb_type);
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
BLOCK_64X64
};
// Look at neighboring blocks and set a min and max partition size based on
static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
MODE_INFO *mi = xd->mi[0].src_mi;
const int left_in_image = xd->left_available && mi[-1].src_mi;
const int above_in_image = xd->up_available && mi[-xd->mi_stride].src_mi;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
BLOCK_SIZE min_size = BLOCK_4X4;
BLOCK_SIZE max_size = BLOCK_64X64;
int i = 0;
int bs_hist[BLOCK_SIZES] = {0};
// Trap case where we do not have a prediction.
if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
min_size = BLOCK_64X64;
max_size = BLOCK_4X4;
// NOTE: each call to get_sb_partition_size_range() uses the previous
// passed in values for min and max as a starting point.
// Find the min and max partition used in previous frame at this location
if (cm->frame_type != KEY_FRAME) {
MODE_INFO *prev_mi =
cm->prev_mip + cm->mi_stride + 1 + mi_row * xd->mi_stride + mi_col;
get_sb_partition_size_range(xd, prev_mi, &min_size, &max_size, bs_hist);
}
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
MODE_INFO *left_sb64_mi = mi[-MI_BLOCK_SIZE].src_mi;
get_sb_partition_size_range(xd, left_sb64_mi, &min_size, &max_size,
bs_hist);
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
MODE_INFO *above_sb64_mi = mi[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
get_sb_partition_size_range(xd, above_sb64_mi, &min_size, &max_size,
bs_hist);
// adjust observed min and max
if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
min_size = min_partition_size[min_size];
max_size = max_partition_size[max_size];
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
} else if (cpi->sf.auto_min_max_partition_size ==
CONSTRAIN_NEIGHBORING_MIN_MAX) {
// adjust the search range based on the histogram of the observed
// partition sizes from left, above the previous co-located blocks
int sum = 0;
int first_moment = 0;
int second_moment = 0;
int var_unnormalized = 0;
for (i = 0; i < BLOCK_SIZES; i++) {
sum += bs_hist[i];
first_moment += bs_hist[i] * i;
second_moment += bs_hist[i] * i * i;
}
// if variance is small enough,
// adjust the range around its mean size, which gives a tighter range
var_unnormalized = second_moment - first_moment * first_moment / sum;
if (var_unnormalized <= 4 * sum) {
int mean = first_moment / sum;
min_size = min_partition_size[mean];
max_size = max_partition_size[mean];
} else {
min_size = min_partition_size[min_size];
max_size = max_partition_size[max_size];
}
// Check border cases where max and min from neighbors may not be legal.
max_size = find_partition_size(max_size,
row8x8_remaining, col8x8_remaining,
&bh, &bw);
min_size = MIN(min_size, max_size);
// When use_square_partition_only is true, make sure at least one square
// partition is allowed by selecting the next smaller square size as
// *min_block_size.
if (cpi->sf.use_square_partition_only &&
next_square_size[max_size] < min_size) {
min_size = next_square_size[max_size];
*min_block_size = min_size;
*max_block_size = max_size;
static void auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
MODE_INFO *mi_8x8 = xd->mi;
const int left_in_image = xd->left_available && mi_8x8[-1].src_mi;
const int above_in_image = xd->up_available &&
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
int bh, bw;
BLOCK_SIZE min_size = BLOCK_32X32;
BLOCK_SIZE max_size = BLOCK_8X8;
int bsl = mi_width_log2_lookup[BLOCK_64X64];
const int search_range_ctrl = (((mi_row + mi_col) >> bsl) +
get_chessboard_index(cm->current_video_frame)) & 0x1;
// Trap case where we do not have a prediction.
if (search_range_ctrl &&
(left_in_image || above_in_image || cm->frame_type != KEY_FRAME)) {
int block;
BLOCK_SIZE sb_type;
// Find the min and max partition sizes used in the left SB64.
if (left_in_image) {
MODE_INFO *cur_mi;
for (block = 0; block < MI_BLOCK_SIZE; ++block) {
sb_type = cur_mi ? cur_mi->mbmi.sb_type : 0;
min_size = MIN(min_size, sb_type);
max_size = MAX(max_size, sb_type);
}
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
mi = mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE].src_mi;
for (block = 0; block < MI_BLOCK_SIZE; ++block) {
sb_type = mi[block].src_mi ? mi[block].src_mi->mbmi.sb_type : 0;
min_size = MIN(min_size, sb_type);
max_size = MAX(max_size, sb_type);
}
}
min_size = min_partition_size[min_size];
max_size = find_partition_size(max_size, row8x8_remaining, col8x8_remaining,
&bh, &bw);
min_size = MIN(min_size, max_size);
min_size = MAX(min_size, BLOCK_8X8);
max_size = MIN(max_size, BLOCK_32X32);
} else {
min_size = BLOCK_8X8;
max_size = BLOCK_32X32;
}
*min_block_size = min_size;
*max_block_size = max_size;
}