Newer
Older
for (i = 0; i < MAX_MB_PLANE; ++i) {
p[i].coeff = ctx->coeff_pbuf[i][0];
p[i].qcoeff = ctx->qcoeff_pbuf[i][0];
pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][0];
// Set to zero to make sure we do not use the previous encoded frame stats
x->source_variance = get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
// Save rdmult before it might be changed, so it can be restored later.
orig_rdmult = x->rdmult;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
activity_masking(cpi, x);
if (aq_mode == VARIANCE_AQ) {
const int energy = bsize <= BLOCK_16X16 ? x->mb_energy
: vp9_block_energy(cpi, x, bsize);
if (cm->frame_type == KEY_FRAME ||
cpi->refresh_alt_ref_frame ||
(cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
mbmi->segment_id = vp9_vaq_segment_id(energy);
} else {
const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
rdmult_ratio = vp9_vaq_rdmult_ratio(energy);
vp9_clear_system_state();
x->rdmult = (int)round(x->rdmult * rdmult_ratio);
} else if (aq_mode == COMPLEXITY_AQ) {
const int mi_offset = mi_row * cm->mi_cols + mi_col;
unsigned char complexity = cpi->complexity_map[mi_offset];
const int is_edge = (mi_row <= 1) || (mi_row >= (cm->mi_rows - 2)) ||
(mi_col <= 1) || (mi_col >= (cm->mi_cols - 2));
if (!is_edge && (complexity > 128))
x->rdmult += ((x->rdmult * (complexity - 128)) / 256);
} else if (aq_mode == CYCLIC_REFRESH_AQ) {
const uint8_t *const map = cm->seg.update_map ? cpi->segmentation_map
// If segment 1, use rdmult for that segment.
if (vp9_get_segment_id(cm, map, bsize, mi_row, mi_col))
x->rdmult = vp9_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (frame_is_intra_only(cm)) {
vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx,
best_rd);
} else {
if (bsize >= BLOCK_8X8)
vp9_rd_pick_inter_mode_sb(cpi, x, tile, mi_row, mi_col,
totalrate, totaldist, bsize, ctx, best_rd);
vp9_rd_pick_inter_mode_sub8x8(cpi, x, tile, mi_row, mi_col, totalrate,
totaldist, bsize, ctx, best_rd);
}
x->rdmult = orig_rdmult;
if (aq_mode == VARIANCE_AQ && *totalrate != INT_MAX) {
vp9_clear_system_state();
*totalrate = (int)round(*totalrate * rdmult_ratio);
static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
const MACROBLOCK *const x = &cpi->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
const MODE_INFO *const mi = xd->mi[0];
const MB_MODE_INFO *const mbmi = &mi->mbmi;
if (!frame_is_intra_only(cm)) {
const int seg_ref_active = vp9_segfeature_active(&cm->seg, mbmi->segment_id,
SEG_LVL_REF_FRAME);
if (!seg_ref_active) {
FRAME_COUNTS *const counts = &cm->counts;
const int inter_block = is_inter_block(mbmi);
counts->intra_inter[vp9_get_intra_inter_context(xd)][inter_block]++;
// If the segment reference feature is enabled we have only a single
// reference frame allowed for the segment so exclude it from
// the reference frame counts used to work out probabilities.
if (inter_block) {
const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
if (cm->reference_mode == REFERENCE_MODE_SELECT)
counts->comp_inter[vp9_get_reference_mode_context(cm, xd)]
[has_second_ref(mbmi)]++;
if (has_second_ref(mbmi)) {
counts->comp_ref[vp9_get_pred_context_comp_ref_p(cm, xd)]
[ref0 == GOLDEN_FRAME]++;
} else {
counts->single_ref[vp9_get_pred_context_single_ref_p1(xd)][0]
[ref0 != LAST_FRAME]++;
if (ref0 != LAST_FRAME)
counts->single_ref[vp9_get_pred_context_single_ref_p2(xd)][1]
[ref0 != GOLDEN_FRAME]++;
}
static BLOCK_SIZE *get_sb_partitioning(MACROBLOCK *x, BLOCK_SIZE bsize) {
case BLOCK_64X64:
case BLOCK_32X32:
return &x->sb_partitioning[x->sb_index];
case BLOCK_16X16:
return &x->mb_partitioning[x->sb_index][x->mb_index];
case BLOCK_8X8:
return &x->b_partitioning[x->sb_index][x->mb_index][x->b_index];
}
}
static void restore_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
BLOCK_SIZE bsize) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
for (p = 0; p < MAX_MB_PLANE; p++) {
xd->above_context[p] + ((mi_col * 2) >> xd->plane[p].subsampling_x),
a + num_4x4_blocks_wide * p,
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
xd->plane[p].subsampling_x);
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
l + num_4x4_blocks_high * p,
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
xd->plane[p].subsampling_y);
}
vpx_memcpy(xd->above_seg_context + mi_col, sa,
sizeof(*xd->above_seg_context) * mi_width);
vpx_memcpy(xd->left_seg_context + (mi_row & MI_MASK), sl,
sizeof(xd->left_seg_context[0]) * mi_height);
static void save_context(VP9_COMP *cpi, int mi_row, int mi_col,
ENTROPY_CONTEXT a[16 * MAX_MB_PLANE],
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE],
PARTITION_CONTEXT sa[8], PARTITION_CONTEXT sl[8],
BLOCK_SIZE bsize) {
const MACROBLOCK *const x = &cpi->mb;
const MACROBLOCKD *const xd = &x->e_mbd;
const int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int mi_width = num_8x8_blocks_wide_lookup[bsize];
int mi_height = num_8x8_blocks_high_lookup[bsize];
// buffer the above/left context information of the block in search.
for (p = 0; p < MAX_MB_PLANE; ++p) {
xd->above_context[p] + (mi_col * 2 >> xd->plane[p].subsampling_x),
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_wide) >>
xd->plane[p].subsampling_x);
+ ((mi_row & MI_MASK) * 2 >> xd->plane[p].subsampling_y),
(sizeof(ENTROPY_CONTEXT) * num_4x4_blocks_high) >>
xd->plane[p].subsampling_y);
}
vpx_memcpy(sa, xd->above_seg_context + mi_col,
sizeof(*xd->above_seg_context) * mi_width);
vpx_memcpy(sl, xd->left_seg_context + (mi_row & MI_MASK),
sizeof(xd->left_seg_context[0]) * mi_height);
static void encode_b(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &cpi->mb;
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (x->ab_index > 0)
update_state(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize,
output_enabled);
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
if (output_enabled) {
(*tp)->token = EOSB_TOKEN;
(*tp)++;
static void encode_sb(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
int ctx;
BLOCK_SIZE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
subsize = *get_sb_partitioning(x, bsize);
} else {
ctx = 0;
subsize = BLOCK_4X4;
partition = partition_lookup[bsl][subsize];
if (output_enabled && bsize >= BLOCK_8X8)
cm->counts.partition[ctx][PARTITION_NONE]++;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
break;
case PARTITION_VERT:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_VERT]++;
*get_sb_index(x, subsize) = 0;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_col + hbs < cm->mi_cols) {
*get_sb_index(x, subsize) = 1;
encode_b(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
}
break;
case PARTITION_HORZ:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_HORZ]++;
*get_sb_index(x, subsize) = 0;
encode_b(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_row + hbs < cm->mi_rows) {
*get_sb_index(x, subsize) = 1;
encode_b(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
}
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cm->counts.partition[ctx][PARTITION_SPLIT]++;
*get_sb_index(x, subsize) = 0;
encode_sb(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 1;
encode_sb(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled, subsize);
*get_sb_index(x, subsize) = 2;
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 3;
encode_sb(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
subsize);
assert("Invalid partition type.");
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
// Check to see if the given partition size is allowed for a specified number
// of 8x8 block rows and columns remaining in the image.
// If not then return the largest allowed partition size
static BLOCK_SIZE find_partition_size(BLOCK_SIZE bsize,
int rows_left, int cols_left,
int *bh, int *bw) {
if (rows_left <= 0 || cols_left <= 0) {
for (; bsize > 0; bsize -= 3) {
*bh = num_8x8_blocks_high_lookup[bsize];
*bw = num_8x8_blocks_wide_lookup[bsize];
if ((*bh <= rows_left) && (*bw <= cols_left)) {
break;
}
}
}
return bsize;
}
// This function attempts to set all mode info entries in a given SB64
// to the same block partition size.
// However, at the bottom and right borders of the image the requested size
// may not be allowed in which case this code attempts to choose the largest
// allowable partition.
static void set_fixed_partitioning(VP9_COMP *cpi, const TileInfo *const tile,
MODE_INFO **mi_8x8, int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
int row8x8_remaining = tile->mi_row_end - mi_row;
int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
int bh = num_8x8_blocks_high_lookup[bsize];
int bw = num_8x8_blocks_wide_lookup[bsize];
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// Apply the requested partition size to the SB64 if it is all "in image"
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
int index = block_row * mis + block_col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = bsize;
}
}
} else {
// Else this is a partial SB64.
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
int index = block_row * mis + block_col;
bsize = find_partition_size(bsize,
(row8x8_remaining - block_row),
(col8x8_remaining - block_col), &bh, &bw);
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = bsize;
static void copy_partitioning(VP9_COMMON *cm, MODE_INFO **mi_8x8,
MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
MODE_INFO *const prev_mi = prev_mi_8x8[block_row * mis + block_col];
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[block_row * mis + block_col] = cm->mi + offset;
mi_8x8[block_row * mis + block_col]->mbmi.sb_type = sb_type;
}
}
}
}
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
static void constrain_copy_partitioning(VP9_COMP *const cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
MODE_INFO **prev_mi_8x8,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mi_stride;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *const mi_upper_left = cm->mi + mi_row * mis + mi_col;
const int bh = num_8x8_blocks_high_lookup[bsize];
const int bw = num_8x8_blocks_wide_lookup[bsize];
int block_row, block_col;
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// If the SB64 if it is all "in image".
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
for (block_row = 0; block_row < MI_BLOCK_SIZE; block_row += bh) {
for (block_col = 0; block_col < MI_BLOCK_SIZE; block_col += bw) {
const int index = block_row * mis + block_col;
MODE_INFO *prev_mi = prev_mi_8x8[index];
const BLOCK_SIZE sb_type = prev_mi ? prev_mi->mbmi.sb_type : 0;
// Use previous partition if block size is not larger than bsize.
if (prev_mi && sb_type <= bsize) {
int block_row2, block_col2;
for (block_row2 = 0; block_row2 < bh; ++block_row2) {
for (block_col2 = 0; block_col2 < bw; ++block_col2) {
const int index2 = (block_row + block_row2) * mis +
block_col + block_col2;
prev_mi = prev_mi_8x8[index2];
if (prev_mi) {
const ptrdiff_t offset = prev_mi - cm->prev_mi;
mi_8x8[index2] = cm->mi + offset;
mi_8x8[index2]->mbmi.sb_type = prev_mi->mbmi.sb_type;
}
}
}
} else {
// Otherwise, use fixed partition of size bsize.
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = bsize;
}
}
}
} else {
// Else this is a partial SB64, copy previous partition.
const struct {
int row;
int col;
} coord_lookup[16] = {
// 32x32 index = 0
{0, 0}, {0, 2}, {2, 0}, {2, 2},
// 32x32 index = 1
{0, 4}, {0, 6}, {2, 4}, {2, 6},
// 32x32 index = 2
{4, 0}, {4, 2}, {6, 0}, {6, 2},
// 32x32 index = 3
{4, 4}, {4, 6}, {6, 4}, {6, 6},
};
static void set_source_var_based_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
int mi_row, int mi_col) {
VP9_COMMON *const cm = &cpi->common;
const int row8x8_remaining = tile->mi_row_end - mi_row;
const int col8x8_remaining = tile->mi_col_end - mi_col;
MODE_INFO *mi_upper_left = cm->mi + mi_row * mis + mi_col;
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
assert((row8x8_remaining > 0) && (col8x8_remaining > 0));
// In-image SB64
if ((col8x8_remaining >= MI_BLOCK_SIZE) &&
(row8x8_remaining >= MI_BLOCK_SIZE)) {
const int src_stride = x->plane[0].src.stride;
const int pre_stride = cpi->Last_Source->y_stride;
const uint8_t *src = x->plane[0].src.buf;
const int pre_offset = (mi_row * MI_SIZE) * pre_stride +
(mi_col * MI_SIZE);
const uint8_t *pre_src = cpi->Last_Source->y_buffer + pre_offset;
const unsigned int thr_32x32 = cpi->sf.source_var_thresh;
const unsigned int thr_64x64 = thr_32x32 << 1;
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
int i, j;
int index;
diff d32[4];
int use16x16 = 0;
for (i = 0; i < 4; i++) {
diff d16[4];
for (j = 0; j < 4; j++) {
int b_mi_row = coord_lookup[i * 4 + j].row;
int b_mi_col = coord_lookup[i * 4 + j].col;
int b_offset = b_mi_row * MI_SIZE * src_stride +
b_mi_col * MI_SIZE;
vp9_get_sse_sum_16x16(src + b_offset,
src_stride,
pre_src + b_offset,
pre_stride, &d16[j].sse, &d16[j].sum);
d16[j].var = d16[j].sse -
(((uint32_t)d16[j].sum * d16[j].sum) >> 8);
index = b_mi_row * mis + b_mi_col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = BLOCK_16X16;
// TODO(yunqingwang): If d16[j].var is very large, use 8x8 partition
// size to further improve quality.
}
if (d16[0].var < thr_32x32 && d16[1].var < thr_32x32 &&
d16[2].var < thr_32x32 && d16[3].var < thr_32x32) {
d32[i].sse = d16[0].sse;
d32[i].sum = d16[0].sum;
for (j = 1; j < 4; j++) {
d32[i].sse += d16[j].sse;
d32[i].sum += d16[j].sum;
}
d32[i].var = d32[i].sse - (((int64_t)d32[i].sum * d32[i].sum) >> 10);
index = coord_lookup[i*4].row * mis + coord_lookup[i*4].col;
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = BLOCK_32X32;
if (!((cm->current_video_frame - 1) %
cpi->sf.search_type_check_frequency))
cpi->use_large_partition_rate += 1;
} else {
use16x16 = 1;
}
}
if (!use16x16) {
if (d32[0].var < thr_64x64 && d32[1].var < thr_64x64 &&
d32[2].var < thr_64x64 && d32[3].var < thr_64x64) {
mi_8x8[0] = mi_upper_left;
mi_8x8[0]->mbmi.sb_type = BLOCK_64X64;
}
}
} else { // partial in-image SB64
BLOCK_SIZE bsize = BLOCK_16X16;
int bh = num_8x8_blocks_high_lookup[bsize];
int bw = num_8x8_blocks_wide_lookup[bsize];
for (r = 0; r < MI_BLOCK_SIZE; r += bh) {
for (c = 0; c < MI_BLOCK_SIZE; c += bw) {
mi_8x8[index] = mi_upper_left + index;
mi_8x8[index]->mbmi.sb_type = find_partition_size(bsize,
row8x8_remaining - r, col8x8_remaining - c, &bh, &bw);
}
}
}
}
static int sb_has_motion(const VP9_COMMON *cm, MODE_INFO **prev_mi_8x8) {
const int mis = cm->mi_stride;
int block_row, block_col;
if (cm->prev_mi) {
for (block_row = 0; block_row < 8; ++block_row) {
for (block_col = 0; block_col < 8; ++block_col) {
const MODE_INFO *prev_mi = prev_mi_8x8[block_row * mis + block_col];
if (prev_mi) {
if (abs(prev_mi->mbmi.mv[0].as_mv.row) >= 8 ||
abs(prev_mi->mbmi.mv[0].as_mv.col) >= 8)
return 1;
}
}
}
}
return 0;
}
static void update_state_rt(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, int bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
const struct segmentation *const seg = &cm->seg;
// For in frame adaptive Q, check for reseting the segment_id and updating
// the cyclic refresh map.
if ((cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) && seg->enabled) {
vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
mi_row, mi_col, bsize, 1);
vp9_init_plane_quantizers(cpi, x);
}
vp9_update_mv_count(cm, xd);
if (cm->interp_filter == SWITCHABLE) {
const int pred_ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[pred_ctx][mbmi->interp_filter];
}
static void encode_b_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
set_offsets(cpi, tile, mi_row, mi_col, bsize);
update_state_rt(cpi, get_block_context(x, bsize), mi_row, mi_col, bsize);
encode_superblock(cpi, tp, output_enabled, mi_row, mi_col, bsize);
update_stats(cpi);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
static void encode_sb_rt(VP9_COMP *cpi, const TileInfo *const tile,
TOKENEXTRA **tp, int mi_row, int mi_col,
int output_enabled, BLOCK_SIZE bsize) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int bsl = b_width_log2(bsize), hbs = (1 << bsl) / 4;
int ctx;
PARTITION_TYPE partition;
BLOCK_SIZE subsize;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (bsize >= BLOCK_8X8) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
const int idx_str = xd->mi_stride * mi_row + mi_col;
MODE_INFO ** mi_8x8 = cm->mi_grid_visible + idx_str;
ctx = partition_plane_context(xd, mi_row, mi_col, bsize);
partition = partition_lookup[bsl][subsize];
switch (partition) {
case PARTITION_NONE:
if (output_enabled && bsize >= BLOCK_8X8)
cm->counts.partition[ctx][PARTITION_NONE]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
case PARTITION_VERT:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_VERT]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_col + hbs < cm->mi_cols) {
encode_b_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
subsize);
case PARTITION_HORZ:
if (output_enabled)
cm->counts.partition[ctx][PARTITION_HORZ]++;
encode_b_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
if (mi_row + hbs < cm->mi_rows) {
encode_b_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
subsize);
}
break;
case PARTITION_SPLIT:
subsize = get_subsize(bsize, PARTITION_SPLIT);
if (output_enabled)
cm->counts.partition[ctx][PARTITION_SPLIT]++;
*get_sb_index(x, subsize) = 0;
encode_sb_rt(cpi, tile, tp, mi_row, mi_col, output_enabled, subsize);
*get_sb_index(x, subsize) = 1;
encode_sb_rt(cpi, tile, tp, mi_row, mi_col + hbs, output_enabled,
subsize);
*get_sb_index(x, subsize) = 2;
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col, output_enabled,
subsize);
*get_sb_index(x, subsize) = 3;
encode_sb_rt(cpi, tile, tp, mi_row + hbs, mi_col + hbs, output_enabled,
if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)
update_partition_context(xd, mi_row, mi_col, subsize, bsize);
static void rd_use_partition(VP9_COMP *cpi,
const TileInfo *const tile,
MODE_INFO **mi_8x8,
TOKENEXTRA **tp, int mi_row, int mi_col,
BLOCK_SIZE bsize, int *rate, int64_t *dist,
int do_recon) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int mis = cm->mi_stride;
const int mi_step = num_4x4_blocks_wide_lookup[bsize] / 2;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int64_t last_part_dist = INT64_MAX;
int64_t last_part_rd = INT64_MAX;
int64_t none_dist = INT64_MAX;
int64_t chosen_dist = INT64_MAX;
int64_t chosen_rd = INT64_MAX;
BLOCK_SIZE sub_subsize = BLOCK_4X4;
BLOCK_SIZE bs_type = mi_8x8[0]->mbmi.sb_type;
int do_partition_search = 1;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
assert(num_4x4_blocks_wide_lookup[bsize] ==
num_4x4_blocks_high_lookup[bsize]);
if (bsize < BLOCK_8X8) {
// When ab_index = 0 all sub-blocks are handled, so for ab_index != 0
// there is nothing to be done.
if (x->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
} else {
*(get_sb_partitioning(x, bsize)) = subsize;
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (bsize == BLOCK_16X16) {
set_offsets(cpi, tile, mi_row, mi_col, bsize);
x->mb_energy = vp9_block_energy(cpi, x, bsize);
} else {
x->in_active_map = check_active_map(cpi, x, mi_row, mi_col, bsize);
if (!x->in_active_map) {
do_partition_search = 0;
if (mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
bs_type = mi_8x8[0]->mbmi.sb_type = bsize;
subsize = bsize;
partition = PARTITION_NONE;
}
}
if (do_partition_search &&
cpi->sf.partition_search_type == SEARCH_PARTITION &&
cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
MODE_INFO * this_mi = mi_8x8[jj * bss * mis + ii * bss];
if (this_mi && this_mi->mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (mi_step >> 1) < cm->mi_rows &&
mi_col + (mi_step >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &none_rate, &none_dist, bsize,
get_block_context(x, bsize), INT64_MAX);
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (none_rate < INT_MAX) {
none_rate += x->partition_cost[pl][PARTITION_NONE];
none_rd = RDCOST(x->rdmult, x->rddiv, none_rate, none_dist);
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
mi_8x8[0]->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, bsize,
get_block_context(x, bsize), INT64_MAX);
*get_sb_index(x, subsize) = 0;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize,
get_block_context(x, subsize), INT64_MAX);
bsize >= BLOCK_8X8 && mi_row + (mi_step >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*get_sb_index(x, subsize) = 1;
rd_pick_sb_modes(cpi, tile, mi_row + (mi_step >> 1), mi_col, &rt, &dt,
subsize, get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
*get_sb_index(x, subsize) = 0;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col, &last_part_rate,
&last_part_dist, subsize,
get_block_context(x, subsize), INT64_MAX);
bsize >= BLOCK_8X8 && mi_col + (mi_step >> 1) < cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), mi_row, mi_col,
subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*get_sb_index(x, subsize) = 1;
rd_pick_sb_modes(cpi, tile, mi_row, mi_col + (mi_step >> 1), &rt, &dt,
subsize, get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
// Split partition.
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*get_sb_index(x, subsize) = i;
rd_use_partition(cpi, tile, mi_8x8 + jj * bss * mis + ii * bss, tp,
mi_row + y_idx, mi_col + x_idx, subsize, &rt, &dt,
i != 3);
if (rt == INT_MAX || dt == INT64_MAX) {
last_part_rate += rt;
last_part_dist += dt;
}
break;
default:
assert(0);
}
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (last_part_rate < INT_MAX) {
last_part_rate += x->partition_cost[pl][partition];
last_part_rd = RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist);
}
if (do_partition_search
&& cpi->sf.adjust_partitioning_from_last_frame
&& cpi->sf.partition_search_type == SEARCH_PARTITION
&& partition != PARTITION_SPLIT && bsize > BLOCK_8X8
&& (mi_row + mi_step < cm->mi_rows ||
mi_row + (mi_step >> 1) == cm->mi_rows)
&& (mi_col + mi_step < cm->mi_cols ||
mi_col + (mi_step >> 1) == cm->mi_cols)) {
BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
chosen_rate = 0;
chosen_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (mi_step >> 1);
int y_idx = (i >> 1) * (mi_step >> 1);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
*get_sb_index(x, split_subsize) = i;
*get_sb_partitioning(x, bsize) = split_subsize;
*get_sb_partitioning(x, split_subsize) = split_subsize;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
rd_pick_sb_modes(cpi, tile, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
split_subsize, get_block_context(x, split_subsize),
INT64_MAX);
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt == INT_MAX || dt == INT64_MAX) {
chosen_rate += rt;
chosen_dist += dt;
encode_sb(cpi, tile, tp, mi_row + y_idx, mi_col + x_idx, 0,
pl = partition_plane_context(xd, mi_row + y_idx, mi_col + x_idx,
chosen_rate += x->partition_cost[pl][PARTITION_NONE];
pl = partition_plane_context(xd, mi_row, mi_col, bsize);
if (chosen_rate < INT_MAX) {
chosen_rate += x->partition_cost[pl][PARTITION_SPLIT];
chosen_rd = RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist);
}
// If last_part is better set the partitioning to that...
if (last_part_rd < chosen_rd) {
mi_8x8[0]->mbmi.sb_type = bsize;
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
}
// If none was better set the partitioning to that...
if (bsize >= BLOCK_8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
if ( bsize == BLOCK_64X64)
assert(chosen_rate < INT_MAX && chosen_dist < INT64_MAX);
if (do_recon) {
int output_enabled = (bsize == BLOCK_64X64);
// Check the projected output rate for this SB against it's target
// and and if necessary apply a Q delta using segmentation to get
// closer to the target.
if ((cpi->oxcf.aq_mode == COMPLEXITY_AQ) && cm->seg.update_map) {
vp9_select_in_frame_q_segment(cpi, mi_row, mi_col,
output_enabled, chosen_rate);
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
vp9_cyclic_refresh_set_rate_and_dist_sb(cpi->cyclic_refresh,
chosen_rate, chosen_dist);