Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
static int set_vt_partitioning(VP9_COMP *cpi, void *data, MODE_INFO *m,
BLOCK_SIZE_TYPE block_size, int mi_row,
int mi_col, int mi_size) {
VP9_COMMON * const cm = &cpi->common;
vt_node vt;
const int mis = cm->mode_info_stride;
int64_t threshold = 50 * cpi->common.base_qindex;
tree_to_node(data, block_size, &vt);
// split none is available only if we have more than half a block size
// in width and height inside the visible image
if (mi_col + mi_size < cm->mi_cols && mi_row + mi_size < cm->mi_rows
&& vt.vt->none.variance < threshold) {
set_block_size(cm, m, block_size, mis, mi_row, mi_col);
return 1;
}
// vertical split is available on all but the bottom border
if (mi_row + mi_size < cm->mi_rows && vt.vt->vert[0].variance < threshold
&& vt.vt->vert[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_VERT), mis, mi_row,
mi_col);
return 1;
// horizontal split is available on all but the right border
if (mi_col + mi_size < cm->mi_cols && vt.vt->horz[0].variance < threshold
&& vt.vt->horz[1].variance < threshold) {
set_block_size(cm, m, get_subsize(block_size, PARTITION_HORZ), mis, mi_row,
mi_col);
return 1;
}
return 0;
}
#endif
static void choose_partitioning(VP9_COMP *cpi, MODE_INFO *m, int mi_row,
int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
// TODO(JBB): More experimentation or testing of this threshold;
int64_t threshold = 4;
int i, j, k;
v64x64 vt;
unsigned char * s;
int sp;
int pixels_wide = 64, pixels_high = 64;
vpx_memset(&vt, 0, sizeof(vt));
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
if (xd->mb_to_right_edge < 0)
pixels_wide += (xd->mb_to_right_edge >> 3);
if (xd->mb_to_bottom_edge < 0)
pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
// TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
// but this needs more experimentation.
threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
d = vp9_64x64_zeros;
dp = 64;
YV12_BUFFER_CONFIG *ref_fb = &cm->yv12_fb[0];
YV12_BUFFER_CONFIG *second_ref_fb = NULL;
setup_pre_planes(xd, ref_fb, second_ref_fb, mi_row, mi_col,
xd->scale_factor, xd->scale_factor_uv);
xd->mode_info_context->mbmi.ref_frame[0] = LAST_FRAME;
xd->mode_info_context->mbmi.sb_type = BLOCK_SIZE_SB64X64;
vp9_find_best_ref_mvs(xd, m->mbmi.ref_mvs[m->mbmi.ref_frame[0]],
&nearest_mv, &near_mv);
xd->mode_info_context->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_SIZE_SB64X64);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
}
// Fill in the entire tree of 8x8 variances for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
v16x16 *vst = &vt.split[i].split[j];
for (k = 0; k < 4; k++) {
int x_idx = x16_idx + ((k & 1) << 3);
int y_idx = y16_idx + ((k >> 1) << 3);
unsigned int sse = 0;
int sum = 0;
if (x_idx < pixels_wide && y_idx < pixels_high)
vp9_get_sse_sum_8x8(s + y_idx * sp + x_idx, sp,
d + y_idx * dp + x_idx, dp, &sse, &sum);
fill_variance(&vst->split[k].vt.none, sse, sum, 64);
}
}
}
// Fill the rest of the variance tree by summing the split partition
// values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
fill_variance_tree(&vt.split[i].split[j], BLOCK_SIZE_MB16X16);
fill_variance_tree(&vt.split[i], BLOCK_SIZE_SB32X32);
fill_variance_tree(&vt, BLOCK_SIZE_SB64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
if (!set_vt_partitioning(cpi, &vt, m, BLOCK_SIZE_SB64X64, mi_row, mi_col,
4)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
if (!set_vt_partitioning(cpi, &vt.split[i], m, BLOCK_SIZE_SB32X32,
(mi_row + y32_idx), (mi_col + x32_idx), 2)) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
if (!set_vt_partitioning(cpi, &vt.split[i].split[j], m,
BLOCK_SIZE_MB16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 1)) {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx));
}
}
}
}
}
}
}
static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int bwl = b_width_log2(m->mbmi.sb_type);
int bhl = b_height_log2(m->mbmi.sb_type);
int bsl = b_width_log2(bsize);
int bs = (1 << bsl);
int bh = (1 << bhl);
int ms = bs / 2;
int mh = bh / 2;
int i, pl;
PARTITION_TYPE partition;
BLOCK_SIZE_TYPE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int last_part_rate = INT_MAX;
int64_t last_part_dist = INT_MAX;
int split_rate = INT_MAX;
int64_t split_dist = INT_MAX;
int none_rate = INT_MAX;
int64_t none_dist = INT_MAX;
int chosen_rate = INT_MAX;
int64_t chosen_dist = INT_MAX;
BLOCK_SIZE_TYPE sub_subsize = BLOCK_SIZE_AB4X4;
int splits_below = 0;
BLOCK_SIZE_TYPE bs_type = m->mbmi.sb_type;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
// parse the partition type
if ((bwl == bsl) && (bhl == bsl))
partition = PARTITION_NONE;
else if ((bwl == bsl) && (bhl < bsl))
partition = PARTITION_HORZ;
else if ((bwl < bsl) && (bhl == bsl))
partition = PARTITION_VERT;
else if ((bwl < bsl) && (bhl < bsl))
partition = PARTITION_SPLIT;
else
assert(0);
subsize = get_subsize(bsize, partition);
if (bsize < BLOCK_SIZE_SB8X8) {
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
} else {
*(get_sb_partitioning(x, bsize)) = subsize;
}
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
if (cpi->sf.adjust_partitioning_from_last_frame) {
// Check if any of the sub blocks are further split.
if (partition == PARTITION_SPLIT && subsize > BLOCK_SIZE_SB8X8) {
sub_subsize = get_subsize(subsize, PARTITION_SPLIT);
splits_below = 1;
for (i = 0; i < 4; i++) {
int jj = i >> 1, ii = i & 0x01;
if (m[jj * bss * mis + ii * bss].mbmi.sb_type >= sub_subsize) {
splits_below = 0;
}
}
}
// If partition is not none try none unless each of the 4 splits are split
// even further..
if (partition != PARTITION_NONE && !splits_below &&
mi_row + (ms >> 1) < cm->mi_rows &&
mi_col + (ms >> 1) < cm->mi_cols) {
*(get_sb_partitioning(x, bsize)) = bsize;
pick_sb_modes(cpi, mi_row, mi_col, tp, &none_rate, &none_dist, bsize,
get_block_context(x, bsize));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
none_rate += x->partition_cost[pl][PARTITION_NONE];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
m->mbmi.sb_type = bs_type;
*(get_sb_partitioning(x, bsize)) = subsize;
}
}
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
bsize, get_block_context(x, bsize));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_NONE];
break;
case PARTITION_HORZ:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize));
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (mh >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &rt, &dt, subsize,
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_HORZ];
break;
case PARTITION_VERT:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &last_part_rate, &last_part_dist,
subsize, get_block_context(x, subsize));
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &rt, &dt, subsize,
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_VERT];
// Split partition.
last_part_rate = 0;
last_part_dist = 0;
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
mi_col + x_idx, subsize, &rt, &dt);
last_part_rate += rt;
last_part_dist += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
last_part_rate += x->partition_cost[pl][PARTITION_SPLIT];
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
if (cpi->sf.adjust_partitioning_from_last_frame
&& partition != PARTITION_SPLIT && bsize > BLOCK_SIZE_SB8X8
&& (mi_row + ms < cm->mi_rows || mi_row + (ms >> 1) == cm->mi_rows)
&& (mi_col + ms < cm->mi_cols || mi_col + (ms >> 1) == cm->mi_cols)) {
BLOCK_SIZE_TYPE split_subsize = get_subsize(bsize, PARTITION_SPLIT);
split_rate = 0;
split_dist = 0;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// Split partition.
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (bs >> 2);
int y_idx = (i >> 1) * (bs >> 2);
int rt = 0;
int64_t dt = 0;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
if ((mi_row + y_idx >= cm->mi_rows)
|| (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, split_subsize)) = i;
*(get_sb_partitioning(x, bsize)) = split_subsize;
*(get_sb_partitioning(x, split_subsize)) = split_subsize;
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
pick_sb_modes(cpi, mi_row + y_idx, mi_col + x_idx, tp, &rt, &dt,
split_subsize, get_block_context(x, split_subsize));
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (rt < INT_MAX && dt < INT_MAX)
encode_sb(cpi, tp, mi_row + y_idx, mi_col + x_idx, 0,
split_subsize);
split_rate += rt;
split_dist += dt;
set_partition_seg_context(cm, xd, mi_row + y_idx, mi_col + x_idx);
pl = partition_plane_context(xd, bsize);
split_rate += x->partition_cost[pl][PARTITION_NONE];
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
split_rate += x->partition_cost[pl][PARTITION_SPLIT];
chosen_rate = split_rate;
chosen_dist = split_dist;
}
// If last_part is better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, last_part_rate, last_part_dist)
< RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)) {
m->mbmi.sb_type = bsize;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = subsize;
chosen_rate = last_part_rate;
chosen_dist = last_part_dist;
}
// If none was better set the partitioning to that...
if (RDCOST(x->rdmult, x->rddiv, chosen_rate, chosen_dist)
> RDCOST(x->rdmult, x->rddiv, none_rate, none_dist)) {
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
chosen_rate = none_rate;
chosen_dist = none_dist;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
// We must have chosen a partitioning and encoding or we'll fail later on.
// No other opportunities for success.
assert(chosen_rate < INT_MAX && chosen_dist < INT_MAX);
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
*rate = chosen_rate;
*dist = chosen_dist;
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp, int mi_row,
int mi_col, BLOCK_SIZE_TYPE bsize, int *rate,
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
int srate = INT_MAX;
int64_t sdist = INT_MAX;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (!cpi->sf.use_partitions_greater_than
|| (cpi->sf.use_partitions_greater_than
&& bsize > cpi->sf.greater_than_block_size)) {
if (bsize >= BLOCK_SIZE_SB8X8) {
subsize = get_subsize(bsize, PARTITION_SPLIT);
*(get_sb_partitioning(x, bsize)) = subsize;
for (i = 0; i < 4; ++i) {
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize, &r,
&d);
r4 += r;
d4 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r4 < INT_MAX)
r4 += x->partition_cost[pl][PARTITION_SPLIT];
assert(r4 >= 0);
assert(d4 >= 0);
srate = r4;
sdist = d4;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (!cpi->sf.use_partitions_less_than
|| (cpi->sf.use_partitions_less_than
&& bsize <= cpi->sf.less_than_block_size)) {
// PARTITION_NONE
if ((mi_row + (ms >> 1) < cm->mi_rows) &&
(mi_col + (ms >> 1) < cm->mi_cols)) {
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
get_block_context(x, bsize));
if (bsize >= BLOCK_SIZE_SB8X8) {
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
}
if (RDCOST(x->rdmult, x->rddiv, r, d)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r;
sdist = d;
if (bsize >= BLOCK_SIZE_SB8X8)
*(get_sb_partitioning(x, bsize)) = bsize;
}
}
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
if (!cpi->sf.less_rectangular_check || !larger_is_better) {
// PARTITION_HORZ
if (bsize >= BLOCK_SIZE_SB8X8 && mi_col + (ms >> 1) < cm->mi_cols) {
int r2, r = 0;
int64_t d2, d = 0;
subsize = get_subsize(bsize, PARTITION_HORZ);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_row + (ms >> 1) < cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_HORZ];
if (RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_VERT
if (bsize >= BLOCK_SIZE_SB8X8 && mi_row + (ms >> 1) < cm->mi_rows) {
int r2;
int64_t d2;
subsize = get_subsize(bsize, PARTITION_VERT);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_col + (ms >> 1) < cm->mi_cols) {
int r = 0;
int64_t d = 0;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_VERT];
if (RDCOST(x->rdmult, x->rddiv, r2, d2)
< RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
}
}
*rate = srate;
*dist = sdist;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
static void encode_sb_row(VP9_COMP *cpi, int mi_row, TOKENEXTRA **tp,
int *totalrate) {
VP9_COMMON * const cm = &cpi->common;
// Initialize the left context for the new SB row
vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
// Code each SB in the row
for (mi_col = cm->cur_tile_mi_col_start; mi_col < cm->cur_tile_mi_col_end;
mi_col += 64 / MI_SIZE) {
int dummy_rate;
int64_t dummy_dist;
if (cpi->sf.partition_by_variance || cpi->sf.use_lastframe_partitioning ||
cpi->sf.use_one_partition_size_always ) {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO *m = cm->mi + idx_str;
MODE_INFO *p = cm->prev_mi + idx_str;
if (cpi->sf.use_one_partition_size_always) {
set_offsets(cpi, mi_row, mi_col, BLOCK_SIZE_SB64X64);
set_partitioning(cpi, m, cpi->sf.always_this_block_size);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
} else if (cpi->sf.partition_by_variance) {
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
if ((cpi->common.current_video_frame
% cpi->sf.last_partitioning_redo_frequency) == 0
|| cm->prev_mi == 0
|| cpi->common.show_frame == 0
|| cpi->common.frame_type == KEY_FRAME
|| cpi->is_src_frame_alt_ref) {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
} else {
copy_partitioning(cpi, m, p);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
}
} else {
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
xd->mode_info_stride = cm->mode_info_stride;
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cm->fc.inter_mode_counts)
vp9_zero(cpi->intra_inter_count);
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
vp9_zero(cm->fc.tx_count_32x32p);
vp9_zero(cm->fc.tx_count_16x16p);
vp9_zero(cm->fc.tx_count_8x8p);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(
cm->above_context[0], 0,
sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * mi_cols_aligned_to_sb(cm));
vpx_memset(cm->above_seg_context, 0,
sizeof(PARTITION_CONTEXT) * mi_cols_aligned_to_sb(cm));
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
cpi->mb.optimize = 0;
cpi->common.filter_level = 0;
cpi->zbin_mode_boost_enabled = 0;
cpi->common.txfm_mode = ONLY_4X4;
cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
static void switch_txfm_mode(VP9_COMP *cpi) {
if (cpi->sf.tx_size_search_method == USE_LARGESTALL &&
cpi->common.txfm_mode >= ALLOW_32X32)
cpi->common.txfm_mode = ALLOW_32X32;
}
static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCK * const x = &cpi->mb;
VP9_COMMON * const cm = &cpi->common;
MACROBLOCKD * const xd = &x->e_mbd;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
vp9_zero(cm->fc.switchable_interp_count);
vp9_zero(cpi->best_switchable_interp_count);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts);
vp9_zero(cm->fc.eob_branch_counts);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 && cm->y_dc_delta_q == 0
&& cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
vp9_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
set_prev_mi(cm);
// Take tiles into account and give start/end MB
int tile_col, tile_row;
for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_get_tile_col_offsets(cm, tile_col);
for (mi_row = cm->cur_tile_mi_row_start;
mi_row < cm->cur_tile_mi_row_end; mi_row += 8)
encode_sb_row(cpi, mi_row, &tp, &totalrate);
cpi->tok_count[tile_row][tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <=
get_token_alloc(cm->mb_rows, cm->mb_cols));
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
return (!!(ref_flags & VP9_GOLD_FLAG) + !!(ref_flags & VP9_LAST_FLAG)
+ !!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi[y * mis + x].mbmi.mb_skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
TX_SIZE txfm_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
mi[y * mis + x].mbmi.txfm_size = txfm_size;
}
}
static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi, int mis,
TX_SIZE txfm_max, int bw, int bh, int mi_row,
int mi_col, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON * const cm = &cpi->common;
MB_MODE_INFO * const mbmi = &mi->mbmi;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD * const xd = &x->e_mbd;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
assert(
vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) || get_skip_flag(mi, mis, ymbs, xmbs));
set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
}
}
static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
BLOCK_SIZE_TYPE bsize) {
const int mis = cm->mode_info_stride;
int bwl, bhl;
const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bwl = mi_width_log2(mi->mbmi.sb_type);
bhl = mi_height_log2(mi->mbmi.sb_type);
if (bwl == bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, 1 << bsl, mi_row,
mi_col, bsize);
} else if (bwl == bsl && bhl < bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, bs, mi_row, mi_col,
bsize);
reset_skip_txfm_size_b(cpi, mi + bs * mis, mis, txfm_max, 1 << bsl, bs,
mi_row + bs, mi_col, bsize);
} else if (bwl < bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, bs, 1 << bsl, mi_row, mi_col,
bsize);
reset_skip_txfm_size_b(cpi, mi + bs, mis, txfm_max, bs, 1 << bsl, mi_row,
mi_col + bs, bsize);
} else {
BLOCK_SIZE_TYPE subsize;
int n;
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
}
for (n = 0; n < 4; n++) {
const int y_idx = n >> 1, x_idx = n & 0x01;
reset_skip_txfm_size_sb(cpi, mi + y_idx * bs * mis + x_idx * bs, txfm_max,
mi_row + y_idx * bs, mi_col + x_idx * bs,
subsize);
}
}
}
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
const int mis = cm->mode_info_stride;
MODE_INFO *mi, *mi_ptr = cm->mi;
for (mi_row = 0; mi_row < cm->mi_rows; mi_row += 8, mi_ptr += 8 * mis) {
mi = mi_ptr;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += 8, mi += 8) {
reset_skip_txfm_size_sb(cpi, mi, txfm_max, mi_row, mi_col,
BLOCK_SIZE_SB64X64);
}
}
}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
static int get_frame_type(VP9_COMP *cpi) {
int frame_type;
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
frame_type = 3;
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
frame_type = 1;
else
frame_type = 2;
return frame_type;
}
static void select_txfm_mode(VP9_COMP *cpi) {
if (cpi->oxcf.lossless) {
cpi->common.txfm_mode = ONLY_4X4;
} else if (cpi->common.current_video_frame == 0) {
cpi->common.txfm_mode = TX_MODE_SELECT;
} else {
if (cpi->sf.tx_size_search_method == USE_FULL_RD) {
int frame_type = get_frame_type(cpi);
cpi->common.txfm_mode =
cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32]
> cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
} else if (cpi->sf.tx_size_search_method == USE_LARGESTALL) {
cpi->common.txfm_mode = ALLOW_32X32;
} else {
unsigned int total = 0;
int i;
for (i = 0; i < TX_SIZE_MAX_SB; ++i)
total += cpi->txfm_stepdown_count[i];
if (total) {
double fraction = (double)cpi->txfm_stepdown_count[0] / total;
cpi->common.txfm_mode = fraction > 0.90 ? ALLOW_32X32 : TX_MODE_SELECT;
// printf("fraction = %f\n", fraction);
} // else keep unchanged
}
}
}
void vp9_encode_frame(VP9_COMP *cpi) {
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
// differnt sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
// side behaviour is where the ALT ref buffer has oppositie sign bias to
// the other two.
== cm->ref_frame_sign_bias[GOLDEN_FRAME])
|| (cm->ref_frame_sign_bias[ALTREF_FRAME]
== cm->ref_frame_sign_bias[LAST_FRAME])) {
cm->allow_comp_inter_inter = 0;
} else {
cm->allow_comp_inter_inter = 1;
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
}