Newer
Older
BLOCK_SIZE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
subsize = *get_sb_partitioning(x, bsize);
} else {
ctx = 0;
subsize = BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
if (output_enabled && bsize >= BLOCK_8X8)
cm->counts.partition[ctx][PARTITION_NONE]++;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
break;
case PARTITION_VERT:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_VERT]++;
*get_sb_index(x, subsize) = 0;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_col + hbs < cm->mi_cols) {
*get_sb_index(x, subsize) = 1;
encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
}
break;
case PARTITION_HORZ:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_HORZ]++;
*get_sb_index(x, subsize) = 0;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_row + hbs < cm->mi_rows) {
*get_sb_index(x, subsize) = 1;
encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
}
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cm->counts.partition[ctx][PARTITION_SPLIT]++;
*get_sb_index(x, subsize) = 0;
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 1;
encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
*get_sb_index(x, subsize) = 2;
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 3;
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
subsize);
assert("Invalid partition type.");
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
// Check to see if the given partition size is allowed for a specified number
// of 8x8 block rows and columns remaining in the image.
// If not then return the largest allowed partition size
static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
int rows_left, int cols_left,
int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
for (; bsize > 0; bsize -= 3) {
*bh = num_8x8_blocks_high_lookup[bsize];
*bw = num_8x8_blocks_wide_lookup[bsize];
if ((*bh <= rows_left) && (*bw <= cols_left)) {
break;
}
}
}
return bsize;
}
static void set_partial_b64x64_partition(MODE_INFO *mi, int mis,
int bh_in, int bw_in, int row8x8_remaining, int col8x8_remaining,
BLOCK_SIZE bsize, MODE_INFO **mi_8x8) {
int bh = bh_in;
int r, c;
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
int bw = bw_in;
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
const int index = r * mis + c;
mi_8x8[index] = mi + index;
mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
}
// This function attempts to set all mode info entries in a given SB64
// to the same block partition size.
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mi_8x8, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
int bh = num_8x8_blocks_high_lookup[bsize];
int bw = num_8x8_blocks_wide_lookup[bsize];
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// Apply the requested partition size to the SB64 if it is all "in image"
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
int index = block_row * mis + block_col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = bsize;
}
}
} else {
// Else this is a partial SB64.
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw, row8x8_remaining,
col8x8_remaining, bsize, mi_8x8);
static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[block_row * mis + block_col] = cm->mi + offset;
mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
}
}
}
}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
static void constrain_copy_partitioning(VP9_COMP *const cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
MODE_INFO **prev_mi_8x8,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
const int bh = num_8x8_blocks_high_lookup[bsize];
const int bw = num_8x8_blocks_wide_lookup[bsize];
int block_row, block_col;
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// If the SB64 if it is all "in image".
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
const int index = block_row * mis + block_col;
MODE_INFO *prev_mi = prev_mi_8x8[index];
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
// Use previous partition if block size is not larger than bsize.
if (prev_mi && sb_type <= bsize) {
int block_row2, block_col2;
for (block_row2 = 0; block_row2 < bh; ++block_row2) {
for (block_col2 = 0; block_col2 < bw; ++block_col2) {
const int index2 = (block_row + block_row2) * mis +
block_col + block_col2;
prev_mi = prev_mi_8x8[index2];
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[index2] = cm->mi + offset;
mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type;
}
}
}
} else {
// Otherwise, use fixed partition of size bsize.
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = bsize;
}
}
}
} else {
// Else this is a partial SB64, copy previous partition.
const struct {
int row;
int col;
} coord_lookup[16] = {
// 32x32 index = 0
{0, 0}, {0, 2}, {2, 0}, {2, 2},
// 32x32 index = 1
{0, 4}, {0, 6}, {2, 4}, {2, 6},
// 32x32 index = 2
{4, 0}, {4, 2}, {6, 0}, {6, 2},
// 32x32 index = 3
{4, 4}, {4, 6}, {6, 4}, {6, 6},
};
static void set_source_var_based_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// In-image SB64
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
const int src_stride = x->plane[0].src.stride;
const int pre_stride = cpi->Last_Source->y_stride;
const uint8_t *src = x->plane[0].src.buf;
const int pre_offset = (mi_row * MI_SIZE) * pre_stride +
(mi_col * MI_SIZE);
const uint8_t *pre_src = cpi->Last_Source->y_buffer + pre_offset;
const unsigned int thr_32x32 = cpi->sf.source_var_thresh;
const unsigned int thr_64x64 = thr_32x32 << 1;
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
int i, j;
int index;
diff d32[4];
int use16x16 = 0;
for (i = 0; i < 4; i++) {
diff d16[4];
for (j = 0; j < 4; j++) {
int b_mi_row = coord_lookup[i * 4 + j].row;
int b_mi_col = coord_lookup[i * 4 + j].col;
int b_offset = b_mi_row * MI_SIZE * src_stride +
b_mi_col * MI_SIZE;
vp9_get_sse_sum_16x16(src + b_offset,
src_stride,
pre_src + b_offset,
pre_stride, &d16[j].sse, &d16[j].sum);
d16[j].var = d16[j].sse -
(((uint32_t)d16[j].sum * d16[j].sum) >> 8);
index = b_mi_row * mis + b_mi_col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
// TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
// size to further improve quality.
}
if (d16[0].var < thr_32x32 && d16[1].var < thr_32x32 &&
d16[2].var < thr_32x32 && d16[3].var < thr_32x32) {
d32[i].sse = d16[0].sse;
d32[i].sum = d16[0].sum;
for (j = 1; j < 4; j++) {
d32[i].sse += d16[j].sse;
d32[i].sum += d16[j].sum;
}
d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
if (!((cm->current_video_frame - 1) %
cpi->sf.search_type_check_frequency))
cpi->use_large_partition_rate += 1;
} else {
use16x16 = 1;
}
}
if (!use16x16) {
if (d32[0].var < thr_64x64 && d32[1].var < thr_64x64 &&
d32[2].var < thr_64x64 && d32[3].var < thr_64x64) {
mi_8x8[0] = mi_upper_left;
mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
}
}
} else { // partial in-image SB64
int bh = num_8x8_blocks_high_lookup[BLOCK_16X16];
int bw = num_8x8_blocks_wide_lookup[BLOCK_16X16];
set_partial_b64x64_partition(mi_upper_left, mis, bh, bw,
row8x8_remaining, col8x8_remaining, BLOCK_16X16, mi_8x8);
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
static int is_background(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col) {
MACROBLOCK *const x = &cpi->mb;
uint8_t *src, *pre;
int src_stride, pre_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
int this_sad = 0;
int threshold = 0;
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
src_stride = x->plane[0].src.stride;
src = x->plane[0].src.buf;
pre_stride = cpi->Last_Source->y_stride;
pre = cpi->Last_Source->y_buffer + (mi_row * MI_SIZE) * pre_stride +
(mi_col * MI_SIZE);
if (row8x8_remaining >= MI_BLOCK_SIZE &&
col8x8_remaining >= MI_BLOCK_SIZE) {
this_sad = cpi->fn_ptr[BLOCK_64X64].sdf(src, src_stride,
pre, pre_stride, 0x7fffffff);
threshold = (1 << 12);
} else {
int r, c;
for (r = 0; r < row8x8_remaining; r += 2)
for (c = 0; c < col8x8_remaining; c += 2)
this_sad += cpi->fn_ptr[BLOCK_16X16].sdf(src, src_stride, pre,
pre_stride, 0x7fffffff);
threshold = (row8x8_remaining * col8x8_remaining) << 6;
}
return (this_sad < 2 * threshold);
}
static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
if (cm->prev_mi) {
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col];
if (prev_mi) {
if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 ||
abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8)
return 1;
}
}
}
}
return 0;
}
static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, int bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
// For in frame adaptive Q, check for reseting the segment_id and updating
// the cyclic refresh map.
if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) {
vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
mi_row, mi_col, bsize, 1);
vp9_init_plane_quantizers(cpi, x);
}
vp9_update_mv_count(cm, xd);
if (cm->interp_filter == SWITCHABLE) {
const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
}
static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
set_offsets(cpi, tile, mi_row, mi_col, bsize);
update_state_rt(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize);
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
update_stats(cpi);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
int ctx;
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int idx_str = xd->mi_stride * mi_row + mi_col;
MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
partition = partition_lookup[bsl][subsize];
switch (partition) {
case PARTITION_NONE:
if (output_enabled && bsize >= BLOCK_8X8)
cm->counts.partition[ctx][PARTITION_NONE]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
case PARTITION_VERT:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_VERT]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_col + hbs < cm->mi_cols) {
encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
subsize);
case PARTITION_HORZ:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_HORZ]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_row + hbs < cm->mi_rows) {
encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
subsize);
}
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cm->counts.partition[ctx][PARTITION_SPLIT]++;
*get_sb_index(x, subsize) = 0;
encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 1;
encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
subsize);
*get_sb_index(x, subsize) = 2;
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
subsize);
*get_sb_index(x, subsize) = 3;
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
static void rd_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int mis = cm->mi_stride;
const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int64_t last_part_dist = INT64_MAX;
int64_t last_part_rd = INT64_MAX;
int64_t none_dist = INT64_MAX;
int64_t chosen_dist = INT64_MAX;
int64_t chosen_rd = INT64_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
int do_partition_search = 1;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (x->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
} else {
*(get_sb_partitioning(x, bsize)) = subsize;
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (bsize == BLOCK_16X16) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
x->mb_energy = vp9_block_energy(cpi, x, bsize);
} else {
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
if (!x->in_active_map) {
do_partition_search = 0;
if (mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
bs_type = mi_8x8[0]->mbmi.sb_type = bsize;
subsize = bsize;
partition = PARTITION_NONE;
}
}
if (do_partition_search &&
cpi->sf.partition_search_type == SEARCH_PARTITION &&
cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
get_block_context(x, bsize), INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rate < INT_MAX) {
none_rate += x->partition_cost[pl][PARTITION_NONE];
none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
mi_8x8[0]->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, bsize,
get_block_context(x, bsize), INT64_MAX);
*get_sb_index(x, subsize) = 0;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize,
get_block_context(x, subsize), INT64_MAX);
bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*get_sb_index(x, subsize) = 1;
rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
subsize, get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
*get_sb_index(x, subsize) = 0;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize,
get_block_context(x, subsize), INT64_MAX);
bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*get_sb_index(x, subsize) = 1;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
subsize, get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
// Split partition.
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*get_sb_index(x, subsize) = i;
rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
i != 3);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
}
break;
default:
assert(0);
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rate < INT_MAX) {
last_part_rate += x->partition_cost[pl][partition];
last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
}
if (do_partition_search
&& cpi->sf.adjust_partitioning_from_last_frame
&& cpi->sf.partition_search_type == SEARCH_PARTITION
&& partition != PARTITION_SPLIT && bsize > BLOCK_8X8
&& (mi_row + mi_step < cm->mi_rows ||
mi_row + (mi_step >> 1) == cm->mi_rows)
&& (mi_col + mi_step < cm->mi_cols ||
mi_col + (mi_step >> 1) == cm->mi_cols)) {
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rate = 0;
chosen_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
*get_sb_index(x, split_subsize) = i;
*get_sb_partitioning(x, bsize) = split_subsize;
*get_sb_partitioning(x, split_subsize) = split_subsize;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
split_subsize, get_block_context(x, split_subsize),
INT64_MAX);
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt == INT_MAX || dt == INT64_MAX) {
chosen_rate += rt;
chosen_dist += dt;
encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
chosen_rate += x->partition_cost[pl][PARTITION_NONE];
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rate < INT_MAX) {
chosen_rate += x->partition_cost[pl][PARTITION_SPLIT];
chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
}
// If last_part is better set the partitioning to that...
if (last_part_rd < chosen_rd) {
mi_8x8[0]->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
}
// If none was better set the partitioning to that...
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
if ( bsize == BLOCK_64X64)
assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
if (do_recon) {
int output_enabled = (bsize == BLOCK_64X64);
// Check the projected output rate for this SB against it's target
// and and if necessary apply a Q delta using segmentation to get
// closer to the target.
if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
output_enabled, chosen_rate);
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
chosen_rate, chosen_dist);
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, bsize);
}
*rate = chosen_rate;
*dist = chosen_dist;
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES] = {
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
BLOCK_16X16
};
static const BLOCK_SIZE max_partition_size[BLOCK_SIZES] = {
BLOCK_8X8, BLOCK_16X16, BLOCK_16X16,
BLOCK_16X16, BLOCK_32X32, BLOCK_32X32,
BLOCK_32X32, BLOCK_64X64, BLOCK_64X64,
BLOCK_64X64, BLOCK_64X64, BLOCK_64X64,
BLOCK_64X64
// Look at all the mode_info entries for blocks that are part of this
// partition and find the min and max values for sb_type.
// At the moment this is designed to work on a 64x64 SB but could be
// adjusted to use a size parameter.
//
// The min and max are assumed to have been initialized prior to calling this
// function so repeat calls can accumulate a min and max of more than one sb64.
static void get_sb_partition_size_range(VP9_COMP *cpi, MODE_INFO ** mi_8x8,
BLOCK_SIZE * min_block_size,
BLOCK_SIZE * max_block_size ) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
int sb_width_in_blocks = MI_BLOCK_SIZE;
int sb_height_in_blocks = MI_BLOCK_SIZE;
int i, j;
int index = 0;
// Check the sb_type for each block that belongs to this region.
for (i = 0; i < sb_height_in_blocks; ++i) {
for (j = 0; j < sb_width_in_blocks; ++j) {
MODE_INFO * mi = mi_8x8[index+j];
BLOCK_SIZE sb_type = mi ? mi->mbmi.sb_type : 0;
*min_block_size = MIN(*min_block_size, sb_type);
*max_block_size = MAX(*max_block_size, sb_type);
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4,
BLOCK_8X8, BLOCK_8X8, BLOCK_8X8,
BLOCK_16X16, BLOCK_16X16, BLOCK_16X16,
BLOCK_32X32, BLOCK_32X32, BLOCK_32X32,
BLOCK_64X64
};
// Look at neighboring blocks and set a min and max partition size based on
static void rd_auto_partition_range(VP9_COMP *cpi, const TileInfo *const tile,
BLOCK_SIZE *min_block_size,
BLOCK_SIZE *max_block_size) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
MODE_INFO **mi_8x8 = xd->mi;
const int left_in_image = xd->left_available && mi_8x8[-1];
const int above_in_image = xd->up_available &&
MODE_INFO **above_sb64_mi_8x8;
MODE_INFO **left_sb64_mi_8x8;
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
BLOCK_SIZE min_size = BLOCK_4X4;
BLOCK_SIZE max_size = BLOCK_64X64;
// Trap case where we do not have a prediction.
if (left_in_image || above_in_image || cm->frame_type != KEY_FRAME) {
min_size = BLOCK_64X64;
max_size = BLOCK_4X4;
// NOTE: each call to get_sb_partition_size_range() uses the previous
// passed in values for min and max as a starting point.
// Find the min and max partition used in previous frame at this location
if (cm->frame_type != KEY_FRAME) {
&cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col];
get_sb_partition_size_range(cpi, prev_mi, &min_size, &max_size);
}
// Find the min and max partition sizes used in the left SB64
if (left_in_image) {
left_sb64_mi_8x8 = &mi_8x8[-MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, left_sb64_mi_8x8,
}
// Find the min and max partition sizes used in the above SB64.
if (above_in_image) {
above_sb64_mi_8x8 = &mi_8x8[-xd->mi_stride * MI_BLOCK_SIZE];
get_sb_partition_size_range(cpi, above_sb64_mi_8x8,
&min_size, &max_size);
}
// adjust observed min and max
if (cpi->sf.auto_min_max_partition_size == RELAXED_NEIGHBORING_MIN_MAX) {
min_size = min_partition_size[min_size];
max_size = max_partition_size[max_size];
// Check border cases where max and min from neighbors may not be legal.
max_size = find_partition_size(max_size,
row8x8_remaining, col8x8_remaining,
&bh, &bw);
min_size = MIN(min_size, max_size);
// When use_square_partition_only is true, make sure at least one square
// partition is allowed by selecting the next smaller square size as
// *min_block_size.
if (cpi->sf.use_square_partition_only &&
next_square_size[max_size] < min_size) {
min_size = next_square_size[max_size];
*min_block_size = min_size;
*max_block_size = max_size;
static INLINE void store_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
vpx_memcpy(ctx->pred_mv, x->pred_mv, sizeof(x->pred_mv));
}
static INLINE void load_pred_mv(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
vpx_memcpy(x->pred_mv, ctx->pred_mv, sizeof(x->pred_mv));
}
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previous rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row,
int mi_col, BLOCK_SIZE bsize, int *rate,
int64_t *dist, int do_recon, int64_t best_rd) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int mi_step = num_8x8_blocks_wide_lookup[bsize] / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
PICK_MODE_CONTEXT *ctx = get_block_context(x, bsize);
BLOCK_SIZE subsize;
int this_rate, sum_rate = 0, best_rate = INT_MAX;
int64_t this_dist, sum_dist = 0, best_dist = INT64_MAX;
int do_split = bsize >= BLOCK_8X8;
int do_rect = 1;
// Override skipping rectangular partition operations for edge blocks
const int force_horz_split = (mi_row + mi_step >= cm->mi_rows);
const int force_vert_split = (mi_col + mi_step >= cm->mi_cols);
const int xss = x->e_mbd.plane[1].subsampling_x;
const int yss = x->e_mbd.plane[1].subsampling_y;
int partition_none_allowed = !force_horz_split && !force_vert_split;
int partition_horz_allowed = !force_vert_split && yss <= xss &&
bsize >= BLOCK_8X8;
int partition_vert_allowed = !force_horz_split && xss <= yss &&
bsize >= BLOCK_8X8;
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (x->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
assert(num_8x8_blocks_wide_lookup[bsize] ==
num_8x8_blocks_high_lookup[bsize]);
if (bsize == BLOCK_16X16) {
x->mb_energy = vp9_block_energy(cpi, x, bsize);
} else {
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);