Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
// horizontal split is available on all but the right border
if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
&& (rand() & 3) < 1) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
mi_col);
return 1;
}
return 0;
}
#else
static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
BLOCK_SIZE_TYPE block_size, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
vt_node vt;
const int mis = cm->mode_info_stride;
int64_t threshold = 50 * cpi->common.base_qindex;
tree_to_node(data, block_size, &vt);
// split none is available only if we have more than half a block size
// in width and height inside the visible image
if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
&& vt.vt->none.variance < threshold) {
set_block_size(cm, m, block_size, mis, mi_row, mi_col);
return 1;
}
// vertical split is available on all but the bottom border
if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
&& vt.vt->vert[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
mi_col);
return 1;
// horizontal split is available on all but the right border
if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
&& vt.vt->horz[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
mi_col);
return 1;
}
return 0;
}
#endif
static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
// TODO(JBB): More experimentation or testing of this threshold;
int64_t threshold = 4;
int i, j, k;
v64x64 vt;
unsigned char * s;
int sp;
int pixels_wide = 64, pixels_high = 64;
vpx_memset(&vt, 0, sizeof(vt));
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
if (xd->mb_to_right_edge < 0)
pixels_wide += (xd->mb_to_right_edge >> 3);
if (xd->mb_to_bottom_edge < 0)
pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
// TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
// but this needs more experimentation.
threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
d = vp9_64x64_zeros;
dp = 64;
YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[0];
YV12_BUFFER_CONFIG *second_ref_fb = NULL;
setup_pre_planes(xd, 0, ref_fb, mi_row, mi_col,
setup_pre_planes(xd, 1, second_ref_fb, mi_row, mi_col,
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
// Fill in the entire tree of 8x8 variances for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
v16x16 *vst = &vt.split[i].split[j];
for (k = 0; k < 4; k++) {
int x_idx = x16_idx + ((k & 1) << 3);
int y_idx = y16_idx + ((k >> 1) << 3);
unsigned int sse = 0;
int sum = 0;
if (x_idx < pixels_wide && y_idx < pixels_high)
vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
d + y_idx * dp + x_idx, dp, &sse, &sum);
fill_variance(&vst->split[k].vt.none, sse, sum, 64);
}
}
}
// Fill the rest of the variance tree by summing the split partition
// values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
fill_variance_tree(&vt.split[i].split[j], BLOCK_SIZE_MB16X16);
fill_variance_tree(&vt.split[i], BLOCK_SIZE_SB32X32);
fill_variance_tree(&vt, BLOCK_SIZE_SB64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
if (!set_vt_partitioning(cpi, &vt, m, BLOCK_SIZE_SB64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_SIZE_SB32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
BLOCK_SIZE_MB16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
}
}
}
}
}
}
static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
int *rate, int64_t *dist, int do_recon) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int bsl = b_width_log2(bsize);
int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize];
int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
int ms = num_4x4_blocks_wide / 2;
int mh = num_4x4_blocks_high / 2;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE_TYPE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int last_part_rate = INT_MAX;
int64_t last_part_dist = INT_MAX;
int split_rate = INT_MAX;
int64_t split_dist = INT_MAX;
int none_rate = INT_MAX;
int64_t none_dist = INT_MAX;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE_TYPE sub_subsize = BLOCK_SIZE_AB4X4;
int splits_below = 0;
BLOCK_SIZE_TYPE bs_type = m->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
subsize = get_subsize(bsize, partition);
if (bsize < BLOCK_SIZE_SB8X8) {
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
} else {
*(get_sb_partitioning(x, bsize)) = subsize;
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
x->fast_ms = 0;
x->pred_mv.as_int = 0;
x->subblock_ref = 0;
if (cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (ms >> 1) < cm->mi_rows &&
mi_col + (ms >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
pick_sb_modes(cpi, mi_row, mi_col, &none_rate, &none_dist, bsize,
get_block_context(x, bsize), INT64_MAX);
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
m->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
bsize, get_block_context(x, bsize), INT64_MAX);
break;
case PARTITION_HORZ:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &rt, &dt, subsize,
get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
break;
}
last_part_rate += rt;
last_part_dist += dt;
}
break;
case PARTITION_VERT:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize), INT64_MAX);
if (last_part_rate != INT_MAX &&
bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &rt, &dt, subsize,
get_block_context(x, subsize), INT64_MAX);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
break;
}
last_part_rate += rt;
last_part_dist += dt;
// Split partition.
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
mi_col + x_idx, subsize, &rt, &dt, i != 3);
if (rt == INT_MAX || dt == INT_MAX) {
last_part_rate = INT_MAX;
last_part_dist = INT_MAX;
break;
}
last_part_rate += rt;
last_part_dist += dt;
}
break;
default:
assert(0);
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (last_part_rate < INT_MAX)
last_part_rate += x->partition_cost[pl][partition];
if (cpi->sf.adjust_partitioning_from_last_frame
&& partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
split_rate = 0;
split_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (num_4x4_blocks_wide >> 2);
int y_idx = (i >> 1) * (num_4x4_blocks_wide >> 2);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows)
|| (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, split_subsize)) = i;
*(get_sb_partitioning(x, bsize)) = split_subsize;
*(get_sb_partitioning(x, split_subsize)) = split_subsize;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, &rt, &dt,
split_subsize, get_block_context(x, split_subsize),
INT64_MAX);
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt == INT_MAX || dt == INT_MAX) {
split_rate = INT_MAX;
split_dist = INT_MAX;
break;
}
if (i != 3)
encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize);
split_rate += rt;
split_dist += dt;
set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
pl = partition_plane_context(xd, bsize);
split_rate += x->partition_cost[pl][PARTITION_NONE];
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (split_rate < INT_MAX) {
split_rate += x->partition_cost[pl][PARTITION_SPLIT];
chosen_rate = split_rate;
chosen_dist = split_dist;
}
}
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
m->mbmi.sb_type = bsize;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
}
// If none was better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
if ( bsize == BLOCK_SIZE_SB64X64)
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
if (do_recon)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
*rate = chosen_rate;
*dist = chosen_dist;
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
int mi_col, BLOCK_SIZE_TYPE bsize, int *rate,
int64_t *dist, int do_recon, int64_t best_rd) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
int srate = INT_MAX;
int64_t sdist = INT_MAX;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (!cpi->sf.use_partitions_greater_than ||
bsize > cpi->sf.greater_than_block_size) {
if (bsize > BLOCK_SIZE_SB8X8) {
int64_t d4 = 0, sum_rd = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
&d, i != 3, best_rd - sum_rd);
if (r == INT_MAX) {
r4 = INT_MAX;
sum_rd = INT64_MAX;
} else {
r4 += r;
d4 += d;
sum_rd = RDCOST(x->rdmult, x->rddiv, r4, d4);
}
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r4 != INT_MAX && i == 4) {
r4 += x->partition_cost[pl][PARTITION_SPLIT];
*(get_sb_partitioning(x, bsize)) = subsize;
assert(r4 >= 0);
assert(d4 >= 0);
srate = r4;
sdist = d4;
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r4, d4));
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
x->fast_ms = 0;
x->pred_mv.as_int = 0;
x->subblock_ref = 0;
// Use 4 subblocks' motion estimation results to speed up current
// partition's checking.
if (cpi->sf.using_small_partition_info) {
// Only use 8x8 result for non HD videos.
// int use_8x8 = (MIN(cpi->common.width, cpi->common.height) < 720) ? 1 : 0;
int use_8x8 = 1;
if (cm->frame_type && !cpi->is_src_frame_alt_ref &&
((use_8x8 && bsize == BLOCK_SIZE_MB16X16) ||
bsize == BLOCK_SIZE_SB32X32 || bsize == BLOCK_SIZE_SB64X64)) {
int ref0 = 0, ref1 = 0, ref2 = 0, ref3 = 0;
PICK_MODE_CONTEXT *block_context = NULL;
if (bsize == BLOCK_SIZE_MB16X16) {
block_context = x->sb8x8_context[xd->sb_index][xd->mb_index];
} else if (bsize == BLOCK_SIZE_SB32X32) {
block_context = x->mb_context[xd->sb_index];
} else if (bsize == BLOCK_SIZE_SB64X64) {
block_context = x->sb32_context;
}
if (block_context) {
ref0 = block_context[0].mic.mbmi.ref_frame[0];
ref1 = block_context[1].mic.mbmi.ref_frame[0];
ref2 = block_context[2].mic.mbmi.ref_frame[0];
ref3 = block_context[3].mic.mbmi.ref_frame[0];
}
// Currently, only consider 4 inter ref frames.
if (ref0 && ref1 && ref2 && ref3) {
int16_t mvr0 = 0, mvc0 = 0, mvr1 = 0, mvc1 = 0, mvr2 = 0, mvc2 = 0,
mvr3 = 0, mvc3 = 0;
int d01, d23, d02, d13; // motion vector distance between 2 blocks
// Get each subblock's motion vectors.
mvr0 = block_context[0].mic.mbmi.mv[0].as_mv.row;
mvc0 = block_context[0].mic.mbmi.mv[0].as_mv.col;
mvr1 = block_context[1].mic.mbmi.mv[0].as_mv.row;
mvc1 = block_context[1].mic.mbmi.mv[0].as_mv.col;
mvr2 = block_context[2].mic.mbmi.mv[0].as_mv.row;
mvc2 = block_context[2].mic.mbmi.mv[0].as_mv.col;
mvr3 = block_context[3].mic.mbmi.mv[0].as_mv.row;
mvc3 = block_context[3].mic.mbmi.mv[0].as_mv.col;
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
// Adjust sign if ref is alt_ref
if (cm->ref_frame_sign_bias[ref0]) {
mvr0 *= -1;
mvc0 *= -1;
}
if (cm->ref_frame_sign_bias[ref1]) {
mvr1 *= -1;
mvc1 *= -1;
}
if (cm->ref_frame_sign_bias[ref2]) {
mvr2 *= -1;
mvc2 *= -1;
}
if (cm->ref_frame_sign_bias[ref3]) {
mvr3 *= -1;
mvc3 *= -1;
}
// Calculate mv distances.
d01 = MAX(abs(mvr0 - mvr1), abs(mvc0 - mvc1));
d23 = MAX(abs(mvr2 - mvr3), abs(mvc2 - mvc3));
d02 = MAX(abs(mvr0 - mvr2), abs(mvc0 - mvc2));
d13 = MAX(abs(mvr1 - mvr3), abs(mvc1 - mvc3));
if (d01 < 24 && d23 < 24 && d02 < 24 && d13 < 24) {
// Set fast motion search level.
x->fast_ms = 1;
// Calculate prediction MV
x->pred_mv.as_mv.row = (mvr0 + mvr1 + mvr2 + mvr3) >> 2;
x->pred_mv.as_mv.col = (mvc0 + mvc1 + mvc2 + mvc3) >> 2;
if (ref0 == ref1 && ref1 == ref2 && ref2 == ref3 &&
d01 < 2 && d23 < 2 && d02 < 2 && d13 < 2) {
// Set fast motion search level.
x->fast_ms = 2;
if (!d01 && !d23 && !d02 && !d13) {
x->fast_ms = 3;
x->subblock_ref = ref0;
}
}
}
}
}
}
if (!cpi->sf.use_partitions_less_than ||
bsize <= cpi->sf.less_than_block_size) {
// PARTITION_NONE
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, bsize,
get_block_context(x, bsize), best_rd);
if (r != INT_MAX && bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
}
if (r != INT_MAX &&
(bsize == BLOCK_SIZE_SB8X8 ||
RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, srate, sdist))) {
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r, d));
srate = r;
sdist = d;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
}
}
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
if (bsize == BLOCK_SIZE_SB8X8) {
int r4 = 0;
int64_t d4 = 0, sum_rd = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
for (i = 0; i < 4 && sum_rd < best_rd; ++i) {
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
int r = 0;
int64_t d = 0;
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
&d, i != 3, best_rd - sum_rd);
if (r == INT_MAX) {
r4 = INT_MAX;
sum_rd = INT64_MAX;
} else {
r4 += r;
d4 += d;
sum_rd = RDCOST(x->rdmult, x->rddiv, r4, d4);
}
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r4 != INT_MAX && i == 4) {
r4 += x->partition_cost[pl][PARTITION_SPLIT];
if (RDCOST(x->rdmult, x->rddiv, r4, d4) <
RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r4;
sdist = d4;
*(get_sb_partitioning(x, bsize)) = subsize;
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r4, d4));
}
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
if (!cpi->sf.use_square_partition_only &&
(!cpi->sf.less_rectangular_check ||!larger_is_better)) {
// PARTITION_HORZ
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
int r2, r = 0;
subsize = get_subsize(bsize, PARTITION_HORZ);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, &r2, &d2, subsize,
get_block_context(x, subsize), best_rd);
h_rd = RDCOST(x->rdmult, x->rddiv, r2, d2);
if (r2 != INT_MAX && h_rd < best_rd &&
mi_row + (ms >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, &r, &d, subsize,
get_block_context(x, subsize), best_rd - h_rd);
if (r == INT_MAX) {
r2 = INT_MAX;
} else {
r2 += r;
d2 += d;
}
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_HORZ];
if (r2 != INT_MAX && RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
best_rd = MIN(best_rd, RDCOST(x->rdmult, x->rddiv, r2, d2));
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_VERT
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
int r2;
subsize = get_subsize(bsize, PARTITION_VERT);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, &r2, &d2, subsize,
get_block_context(x, subsize), best_rd);
v_rd = RDCOST(x->rdmult, x->rddiv, r2, d2);
if (r2 != INT_MAX && v_rd < best_rd &&
mi_col + (ms >> 1) < cm->mi_cols) {
int r = 0;
int64_t d = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), &r, &d, subsize,
get_block_context(x, subsize), best_rd - v_rd);
if (r == INT_MAX) {
r2 = INT_MAX;
} else {
r2 += r;
d2 += d;
}
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_VERT];
if (r2 != INT_MAX &&
RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
}
}
*rate = srate;
*dist = sdist;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (srate < INT_MAX && sdist < INT_MAX && do_recon)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
// Examines 64x64 block and chooses a best reference frame
static void rd_pick_reference_frame(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
int mi_col, int *rate, int64_t *dist) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int bsl = b_width_log2(BLOCK_SIZE_SB64X64), bs = 1 << bsl;
int ms = bs / 2;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int pl;
int r;
int64_t d;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
// Default is non mask (all reference frames allowed.
cpi->ref_frame_mask = 0;
// Do RD search for 64x64.
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
cpi->set_ref_frame_mask = 1;
pick_sb_modes(cpi, mi_row, mi_col, &r, &d, BLOCK_SIZE_SB64X64,
get_block_context(x, BLOCK_SIZE_SB64X64), INT64_MAX);
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, BLOCK_SIZE_SB64X64);
r += x->partition_cost[pl][PARTITION_NONE];
*(get_sb_partitioning(x, BLOCK_SIZE_SB64X64)) = BLOCK_SIZE_SB64X64;
cpi->set_ref_frame_mask = 0;
}
*rate = r;
*dist = d;
// RDCOST(x->rdmult, x->rddiv, r, d)
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, BLOCK_SIZE_SB64X64);
/*if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, 1, BLOCK_SIZE_SB64X64);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(tp_orig < *tp);
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
assert(tp_orig == *tp);
}
*/
}
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
int *totalrate) {
VP9_COMMON * const cm = &cpi->common;
// Initialize the left context for the new SB row
vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
// Code each SB in the row
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
int dummy_rate;
int64_t dummy_dist;
// Initialize a mask of modes that we will not consider;
// cpi->unused_mode_skip_mask = 0x0000000AAE17F800 (test no golden)
if (cpi->common.frame_type == KEY_FRAME)
cpi->unused_mode_skip_mask = 0;
else
cpi->unused_mode_skip_mask = 0xFFFFFFFFFFFFFE00;
if (cpi->sf.reference_masking) {
rd_pick_reference_frame(cpi, tp, mi_row, mi_col,
&dummy_rate, &dummy_dist);
}
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO *m = cm->mi + idx_str;
MODE_INFO *p = cm->prev_mi + idx_str;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1);
} else if (cpi->sf.partition_by_variance) {
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1);
if ((cpi->common.current_video_frame
% cpi->sf.last_partitioning_redo_frequency) == 0
|| cm->prev_mi == 0
|| cpi->common.show_frame == 0
|| cpi->common.frame_type == KEY_FRAME
|| cpi->is_src_frame_alt_ref) {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
} else {
copy_partitioning(cpi, m, p);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1);
} else {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist, 1, INT64_MAX);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, 0, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]],
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cm->counts.inter_mode)
vp9_zero(cpi->intra_inter_count);
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
vp9_zero(cm->counts.tx);
vp9_zero(cm->counts.mbskip);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(cm->above_context[0], 0,
sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * aligned_mi_cols);
sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
cpi->mb.optimize = 0;
cpi->mb.e_mbd.lf.filter_level = 0;
cpi->common.tx_mode = ONLY_4X4;
cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
static void switch_tx_mode(VP9_COMP *cpi) {
if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
cpi->common.tx_mode >= ALLOW_32X32)
cpi->common.tx_mode = ALLOW_32X32;
static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
vp9_zero(cm->counts.switchable_interp);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts);
vp9_zero(cm->counts.eob_branch);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0