Newer
Older
pixels_wide += (xd->mb_to_right_edge >> 3);
if (xd->mb_to_bottom_edge < 0)
pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
// TODO(JBB): Clearly the higher the quantizer the fewer partitions we want
// but this needs more experimentation.
threshold = threshold * cpi->common.base_qindex * cpi->common.base_qindex;
// if ( cm->frame_type == KEY_FRAME ) {
d = vp9_64x64_zeros;
dp = 64;
// }
// Fill in the entire tree of 8x8 variances for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
for (j = 0; j < 4; j++) {
const int x_idx = x32_idx + ((j & 1) << 4);
const int y_idx = y32_idx + ((j >> 1) << 4);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
const uint8_t *st = s + y_idx * sp + x_idx;
const uint8_t *dt = d + y_idx * dp + x_idx;
unsigned int sse = 0;
int sum = 0;
v16x16 *vst = &vt.split[i].split[j];
sse = sum = 0;
if (x_idx < pixels_wide && y_idx < pixels_high)
vp9_get_sse_sum_8x8(st, sp, dt, dp, &sse, &sum);
fill_variance(&vst->split[0].none, sse, sum, 64);
sse = sum = 0;
if (x_idx + 8 < pixels_wide && y_idx < pixels_high)
vp9_get_sse_sum_8x8(st + 8, sp, dt + 8, dp, &sse, &sum);
fill_variance(&vst->split[1].none, sse, sum, 64);
sse = sum = 0;
if (x_idx < pixels_wide && y_idx + 8 < pixels_high)
vp9_get_sse_sum_8x8(st + 8 * sp, sp, dt + 8 * dp, dp, &sse, &sum);
fill_variance(&vst->split[2].none, sse, sum, 64);
sse = sum = 0;
if (x_idx + 8 < pixels_wide && y_idx + 8 < pixels_high)
vp9_get_sse_sum_8x8(st + 8 * sp + 8, sp, dt + 8 + 8 * dp, dp, &sse,
&sum);
fill_variance(&vst->split[3].none, sse, sum, 64);
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
}
}
// Fill the rest of the variance tree by summing the split partition
// values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
fill_variance_tree(&vt.split[i].split[j])
}
fill_variance_tree(&vt.split[i])
}
fill_variance_tree(&vt)
// Now go through the entire structure, splitting every blocksize until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
set_vt_size( vt, BLOCK_SIZE_SB64X64, mi_row, mi_col, return);
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
set_vt_size(vt, BLOCK_SIZE_SB32X32, mi_row + y32_idx, mi_col + x32_idx,
continue);
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
set_vt_size(vt, BLOCK_SIZE_MB16X16, mi_row + y32_idx + y16_idx,
mi_col+x32_idx+x16_idx, continue);
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cm, m, BLOCK_SIZE_SB8X8, mis,
mi_row + y32_idx + y16_idx + y8_idx,
mi_col + x32_idx + x16_idx + x8_idx);
}
}
}
}
static void rd_use_partition(VP9_COMP *cpi, MODE_INFO *m, TOKENEXTRA **tp,
int mi_row, int mi_col, BLOCK_SIZE_TYPE bsize,
int *rate, int *dist) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK * const x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
const int mis = cm->mode_info_stride;
int bwl = b_width_log2(m->mbmi.sb_type);
int bhl = b_height_log2(m->mbmi.sb_type);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
int bs = (1 << bsl);
int bss = (1 << bsl)/4;
int i, pl;
PARTITION_TYPE partition;
BLOCK_SIZE_TYPE subsize;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
int r = 0, d = 0;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
// parse the partition type
if ((bwl == bsl) && (bhl == bsl))
partition = PARTITION_NONE;
else if ((bwl == bsl) && (bhl < bsl))
partition = PARTITION_HORZ;
else if ((bwl < bsl) && (bhl == bsl))
partition = PARTITION_VERT;
else if ((bwl < bsl) && (bhl < bsl))
partition = PARTITION_SPLIT;
else
assert(0);
subsize = get_subsize(bsize, partition);
// TODO(JBB): this restriction is here because pick_sb_modes can return
// r's that are INT_MAX meaning we can't select a mode / mv for this block.
// when the code is made to work for less than sb8x8 we need to come up with
// a solution to this problem.
assert(subsize >= BLOCK_SIZE_SB8X8);
if (bsize >= BLOCK_SIZE_SB8X8) {
xd->left_seg_context = cm->left_seg_context + (mi_row & MI_MASK);
xd->above_seg_context = cm->above_seg_context + mi_col;
*(get_sb_partitioning(x, bsize)) = subsize;
}
pl = partition_plane_context(xd, bsize);
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
switch (partition) {
case PARTITION_NONE:
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
get_block_context(x, bsize));
r += x->partition_cost[pl][PARTITION_NONE];
break;
case PARTITION_HORZ:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
get_block_context(x, subsize));
if (mi_row + (bh >> 1) <= cm->mi_rows) {
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
int rt, dt;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (bs >> 2), mi_col, tp, &rt, &dt, subsize,
get_block_context(x, subsize));
r += rt;
d += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_HORZ];
break;
case PARTITION_VERT:
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, subsize,
get_block_context(x, subsize));
if (mi_col + (bs >> 1) <= cm->mi_cols) {
int rt, dt;
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (bs >> 2), tp, &rt, &dt, subsize,
get_block_context(x, subsize));
r += rt;
d += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_VERT];
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
break;
case PARTITION_SPLIT:
for (i = 0; i < 4; i++) {
int x_idx = (i & 1) * (bs >> 2);
int y_idx = (i >> 1) * (bs >> 2);
int jj = i >> 1, ii = i & 0x01;
int rt, dt;
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
continue;
*(get_sb_index(xd, subsize)) = i;
rd_use_partition(cpi, m + jj * bss * mis + ii * bss, tp, mi_row + y_idx,
mi_col + x_idx, subsize, &rt, &dt);
r += rt;
d += dt;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_SPLIT];
break;
default:
assert(0);
}
// update partition context
#if CONFIG_AB4X4
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
#else
if (bsize > BLOCK_SIZE_SB8X8
&& (bsize == BLOCK_SIZE_MB16X16 || partition != PARTITION_SPLIT)) {
#endif
set_partition_seg_context(cm, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (r < INT_MAX && d < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
*rate = r;
*dist = d;
}
// TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
// unlikely to be selected depending on previously rate-distortion optimization
// results, for encoding speed-up.
static void rd_pick_partition(VP9_COMP *cpi, TOKENEXTRA **tp,
int mi_row, int mi_col,
BLOCK_SIZE_TYPE bsize,
int *rate, int *dist) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int bsl = b_width_log2(bsize), bs = 1 << bsl;
ENTROPY_CONTEXT l[16 * MAX_MB_PLANE], a[16 * MAX_MB_PLANE];
PARTITION_CONTEXT sl[8], sa[8];
TOKENEXTRA *tp_orig = *tp;
BLOCK_SIZE_TYPE subsize;
int srate = INT_MAX, sdist = INT_MAX;
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0) {
*rate = 0;
*dist = 0;
return;
}
assert(mi_height_log2(bsize) == mi_width_log2(bsize));
save_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
int r4 = 0, d4 = 0;
subsize = get_subsize(bsize, PARTITION_SPLIT);
*(get_sb_partitioning(x, bsize)) = subsize;
for (i = 0; i < 4; ++i) {
int x_idx = (i & 1) * (ms >> 1);
int y_idx = (i >> 1) * (ms >> 1);
if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
*(get_sb_index(xd, subsize)) = i;
rd_pick_partition(cpi, tp, mi_row + y_idx, mi_col + x_idx, subsize,
&r, &d);
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r4 < INT_MAX)
r4 += x->partition_cost[pl][PARTITION_SPLIT];
assert(r4 >= 0);
assert(d4 >= 0);
srate = r4;
sdist = d4;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_HORZ
if ((mi_col + ms <= cm->mi_cols) && (mi_row + (ms >> 1) <= cm->mi_rows) &&
(bsize >= BLOCK_SIZE_SB8X8)) {
int r2, d2;
int mb_skip = 0;
subsize = get_subsize(bsize, PARTITION_HORZ);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_row + ms <= cm->mi_rows) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row + (ms >> 1), mi_col, tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
} else {
if (mi_row + (ms >> 1) != cm->mi_rows)
mb_skip = 1;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_HORZ];
if ((RDCOST(x->rdmult, x->rddiv, r2, d2) <
RDCOST(x->rdmult, x->rddiv, srate, sdist)) && !mb_skip) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_VERT
if ((mi_row + ms <= cm->mi_rows) && (mi_col + (ms >> 1) <= cm->mi_cols) &&
(bsize >= BLOCK_SIZE_SB8X8)) {
int r2, d2;
int mb_skip = 0;
subsize = get_subsize(bsize, PARTITION_VERT);
*(get_sb_index(xd, subsize)) = 0;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r2, &d2, subsize,
get_block_context(x, subsize));
if (mi_col + ms <= cm->mi_cols) {
update_state(cpi, get_block_context(x, subsize), subsize, 0);
encode_superblock(cpi, tp, 0, mi_row, mi_col, subsize);
*(get_sb_index(xd, subsize)) = 1;
pick_sb_modes(cpi, mi_row, mi_col + (ms >> 1), tp, &r, &d, subsize,
get_block_context(x, subsize));
r2 += r;
d2 += d;
} else {
if (mi_col + (ms >> 1) != cm->mi_cols)
mb_skip = 1;
}
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (r2 < INT_MAX)
r2 += x->partition_cost[pl][PARTITION_VERT];
if ((RDCOST(x->rdmult, x->rddiv, r2, d2) <
RDCOST(x->rdmult, x->rddiv, srate, sdist)) && !mb_skip) {
srate = r2;
sdist = d2;
*(get_sb_partitioning(x, bsize)) = subsize;
}
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
}
// PARTITION_NONE
if (mi_row + ms <= cm->mi_rows && mi_col + ms <= cm->mi_cols) {
int r, d;
pick_sb_modes(cpi, mi_row, mi_col, tp, &r, &d, bsize,
get_block_context(x, bsize));
set_partition_seg_context(cm, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
r += x->partition_cost[pl][PARTITION_NONE];
}
if (RDCOST(x->rdmult, x->rddiv, r, d) <
RDCOST(x->rdmult, x->rddiv, srate, sdist)) {
srate = r;
sdist = d;
*(get_sb_partitioning(x, bsize)) = bsize;
}
*rate = srate;
*dist = sdist;
restore_context(cpi, mi_row, mi_col, a, l, sa, sl, bsize);
if (srate < INT_MAX && sdist < INT_MAX)
encode_sb(cpi, tp, mi_row, mi_col, bsize == BLOCK_SIZE_SB64X64, bsize);
if (bsize == BLOCK_SIZE_SB64X64) {
assert(srate < INT_MAX);
assert(sdist < INT_MAX);
} else {
static void encode_sb_row(VP9_COMP *cpi, int mi_row,
TOKENEXTRA **tp, int *totalrate) {
VP9_COMMON *const cm = &cpi->common;
int mi_col;
// Initialize the left context for the new SB row
vpx_memset(&cm->left_context, 0, sizeof(cm->left_context));
vpx_memset(cm->left_seg_context, 0, sizeof(cm->left_seg_context));
// Code each SB in the row
for (mi_col = cm->cur_tile_mi_col_start;
mi_col < cm->cur_tile_mi_col_end; mi_col += 8) {
int dummy_rate, dummy_dist;
rd_pick_partition(cpi, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
} else {
const int idx_str = cm->mode_info_stride * mi_row + mi_col;
MODE_INFO *m = cm->mi + idx_str;
// set_partitioning(cpi, m, BLOCK_SIZE_SB64X64);
choose_partitioning(cpi, cm->mi, mi_row, mi_col);
rd_use_partition(cpi, m, tp, mi_row, mi_col, BLOCK_SIZE_SB64X64,
&dummy_rate, &dummy_dist);
}
static void init_encode_frame_mb_context(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
xd->mode_info_stride = cm->mode_info_stride;
xd->frame_type = cm->frame_type;
xd->frames_since_golden = cm->frames_since_golden;
xd->frames_till_alt_ref_frame = cm->frames_till_alt_ref_frame;
// reset intra mode contexts
if (cm->frame_type == KEY_FRAME)
vp9_init_mbmode_probs(cm);
// TODO(jkoleszar): are these initializations required?
setup_pre_planes(xd, &cm->yv12_fb[cm->ref_frame_map[cpi->lst_fb_idx]], NULL,
0, 0, NULL, NULL);
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], 0, 0);
vp9_build_block_offsets(x);
vp9_setup_block_dptrs(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_context->mbmi.uv_mode = DC_PRED;
vp9_zero(cpi->y_mode_count)
vp9_zero(cm->fc.inter_mode_counts)
vp9_zero(cpi->intra_inter_count);
vp9_zero(cpi->comp_inter_count);
vp9_zero(cpi->single_ref_count);
vp9_zero(cpi->comp_ref_count);
vp9_zero(cm->fc.tx_count_32x32p);
vp9_zero(cm->fc.tx_count_16x16p);
vp9_zero(cm->fc.tx_count_8x8p);
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(cm->above_context[0], 0, sizeof(ENTROPY_CONTEXT) * 2 *
MAX_MB_PLANE * mi_cols_aligned_to_sb(cm));
vpx_memset(cm->above_seg_context, 0, sizeof(PARTITION_CONTEXT) *
mi_cols_aligned_to_sb(cm));
static void switch_lossless_mode(VP9_COMP *cpi, int lossless) {
if (lossless) {
cpi->mb.fwd_txm8x4 = vp9_short_walsh8x4;
cpi->mb.fwd_txm4x4 = vp9_short_walsh4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_iwalsh4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_iwalsh4x4_add;
cpi->mb.optimize = 0;
cpi->common.filter_level = 0;
cpi->zbin_mode_boost_enabled = 0;
cpi->common.txfm_mode = ONLY_4X4;
} else {
cpi->mb.fwd_txm8x4 = vp9_short_fdct8x4;
cpi->mb.fwd_txm4x4 = vp9_short_fdct4x4;
cpi->mb.e_mbd.inv_txm4x4_1_add = vp9_short_idct4x4_1_add;
cpi->mb.e_mbd.inv_txm4x4_add = vp9_short_idct4x4_add;
static void encode_frame_internal(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
totalrate = 0;
// Reset frame count of inter 0,0 motion vector usage.
cpi->inter_zz_count = 0;
cpi->skip_true_count[0] = cpi->skip_true_count[1] = cpi->skip_true_count[2] = 0;
cpi->skip_false_count[0] = cpi->skip_false_count[1] = cpi->skip_false_count[2] = 0;
vp9_zero(cm->fc.switchable_interp_count);
vp9_zero(cpi->best_switchable_interp_count);
xd->mode_info_context = cm->mi;
xd->prev_mode_info_context = cm->prev_mi;
vp9_zero(cpi->coef_counts);
vp9_zero(cm->fc.eob_branch_counts);
cpi->mb.e_mbd.lossless = cm->base_qindex == 0 &&
cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
vp9_frame_init_quantizer(cpi);
vp9_initialize_rd_consts(cpi, cm->base_qindex + cm->y_dc_delta_q);
vp9_initialize_me_consts(cpi, cm->base_qindex);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Initialize encode frame context.
// Build a frame level activity map
build_activity_map(cpi);
}
// re-initencode frame context.
init_encode_frame_mb_context(cpi);
vpx_memset(cpi->rd_comp_pred_diff, 0, sizeof(cpi->rd_comp_pred_diff));
vpx_memset(cpi->rd_tx_select_diff, 0, sizeof(cpi->rd_tx_select_diff));
vpx_memset(cpi->rd_tx_select_threshes, 0, sizeof(cpi->rd_tx_select_threshes));
set_prev_mi(cm);
{
struct vpx_usec_timer emr_timer;
vpx_usec_timer_start(&emr_timer);
// Take tiles into account and give start/end MB
int tile_col, tile_row;
for (tile_row = 0; tile_row < cm->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(cm, tile_row);
for (tile_col = 0; tile_col < cm->tile_columns; tile_col++) {
TOKENEXTRA *tp_old = tp;
// For each row of SBs in the frame
vp9_get_tile_col_offsets(cm, tile_col);
for (mi_row = cm->cur_tile_mi_row_start;
mi_row < cm->cur_tile_mi_row_end;
encode_sb_row(cpi, mi_row, &tp, &totalrate);
cpi->tok_count[tile_col] = (unsigned int)(tp - tp_old);
assert(tp - cpi->tok <=
get_token_alloc(cm->mb_rows, cm->mb_cols));
vpx_usec_timer_mark(&emr_timer);
cpi->time_encode_mb_row += vpx_usec_timer_elapsed(&emr_timer);
}
// 256 rate units to the bit,
// projected_frame_size in units of BYTES
cpi->projected_frame_size = totalrate >> 8;
// Keep record of the total distortion this time around for future use
cpi->last_frame_distortion = cpi->frame_distortion;
static int check_dual_ref_flags(VP9_COMP *cpi) {
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int ref_flags = cpi->ref_frame_flags;
if (vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME)) {
return (!!(ref_flags & VP9_GOLD_FLAG) +
!!(ref_flags & VP9_LAST_FLAG) +
!!(ref_flags & VP9_ALT_FLAG)) >= 2;
Ronald S. Bultje
committed
}
static int get_skip_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs) {
int x, y;
for (y = 0; y < ymbs; y++) {
for (x = 0; x < xmbs; x++) {
if (!mi[y * mis + x].mbmi.mb_skip_coeff)
return 0;
}
}
return 1;
}
static void set_txfm_flag(MODE_INFO *mi, int mis, int ymbs, int xmbs,
TX_SIZE txfm_size) {
int x, y;
for (y = 0; y < ymbs; y++) {
mi[y * mis + x].mbmi.txfm_size = txfm_size;
}
}
static void reset_skip_txfm_size_b(VP9_COMP *cpi, MODE_INFO *mi,
int mis, TX_SIZE txfm_max,
int bw, int bh, int mi_row, int mi_col,
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
if (mbmi->txfm_size > txfm_max) {
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
const int segment_id = mbmi->segment_id;
const int ymbs = MIN(bh, cm->mi_rows - mi_row);
const int xmbs = MIN(bw, cm->mi_cols - mi_col);
assert(vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP) ||
get_skip_flag(mi, mis, ymbs, xmbs));
set_txfm_flag(mi, mis, ymbs, xmbs, txfm_max);
}
}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
static void reset_skip_txfm_size_sb(VP9_COMP *cpi, MODE_INFO *mi,
TX_SIZE txfm_max,
int mi_row, int mi_col,
BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
int bwl, bhl;
const int bsl = mi_width_log2(bsize), bs = 1 << (bsl - 1);
if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols)
return;
bwl = mi_width_log2(mi->mbmi.sb_type);
bhl = mi_height_log2(mi->mbmi.sb_type);
if (bwl == bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, 1 << bsl,
mi_row, mi_col, bsize);
} else if (bwl == bsl && bhl < bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, 1 << bsl, bs,
mi_row, mi_col, bsize);
reset_skip_txfm_size_b(cpi, mi + bs * mis, mis, txfm_max, 1 << bsl, bs,
mi_row + bs, mi_col, bsize);
} else if (bwl < bsl && bhl == bsl) {
reset_skip_txfm_size_b(cpi, mi, mis, txfm_max, bs, 1 << bsl,
mi_row, mi_col, bsize);
reset_skip_txfm_size_b(cpi, mi + bs, mis, txfm_max, bs, 1 << bsl,
mi_row, mi_col + bs, bsize);
} else {
BLOCK_SIZE_TYPE subsize;
int n;
assert(bwl < bsl && bhl < bsl);
if (bsize == BLOCK_SIZE_SB64X64) {
subsize = BLOCK_SIZE_SB32X32;
} else if (bsize == BLOCK_SIZE_SB32X32) {
subsize = BLOCK_SIZE_MB16X16;
} else {
assert(bsize == BLOCK_SIZE_MB16X16);
subsize = BLOCK_SIZE_SB8X8;
}
for (n = 0; n < 4; n++) {
const int y_idx = n >> 1, x_idx = n & 0x01;
reset_skip_txfm_size_sb(cpi, mi + y_idx * bs * mis + x_idx * bs,
txfm_max, mi_row + y_idx * bs,
mi_col + x_idx * bs, subsize);
}
}
}
static void reset_skip_txfm_size(VP9_COMP *cpi, TX_SIZE txfm_max) {
VP9_COMMON *const cm = &cpi->common;
const int mis = cm->mode_info_stride;
MODE_INFO *mi, *mi_ptr = cm->mi;
for (mi_row = 0; mi_row < cm->mi_rows;
mi = mi_ptr;
for (mi_col = 0; mi_col < cm->mi_cols;
reset_skip_txfm_size_sb(cpi, mi, txfm_max,
mi_row, mi_col, BLOCK_SIZE_SB64X64);
}
}
}
void vp9_encode_frame(VP9_COMP *cpi) {
// In the longer term the encoder should be generalized to match the
// decoder such that we allow compound where one of the 3 buffers has a
// differnt sign bias and that buffer is then the fixed ref. However, this
// requires further work in the rd loop. For now the only supported encoder
// side behaviour is where the ALT ref buffer has oppositie sign bias to
// the other two.
if ((cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) ||
(cm->ref_frame_sign_bias[ALTREF_FRAME] ==
cm->ref_frame_sign_bias[LAST_FRAME])) {
cm->allow_comp_inter_inter = 0;
} else {
cm->allow_comp_inter_inter = 1;
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
}
int i, frame_type, pred_type;
/*
* This code does a single RD pass over the whole frame assuming
* either compound, single or hybrid prediction as per whatever has
* worked best for that type of frame in the past.
* It also predicts whether another coding mode would have worked
* better that this coding mode. If that is the case, it remembers
* that for subsequent frames.
* It does the same analysis for transform size selection also.
*/
if (cpi->common.frame_type == KEY_FRAME)
frame_type = 0;
else if (cpi->is_src_frame_alt_ref && cpi->refresh_golden_frame)
else if (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)
/* prediction (compound, single or hybrid) mode selection */
if (frame_type == 3 || !cm->allow_comp_inter_inter)
pred_type = SINGLE_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][1] >
cpi->rd_prediction_type_threshes[frame_type][0] &&
cpi->rd_prediction_type_threshes[frame_type][2] &&
check_dual_ref_flags(cpi) && cpi->static_mb_pct == 100)
pred_type = COMP_PREDICTION_ONLY;
else if (cpi->rd_prediction_type_threshes[frame_type][0] >
cpi->rd_prediction_type_threshes[frame_type][2])
pred_type = SINGLE_PREDICTION_ONLY;
else
pred_type = HYBRID_PREDICTION;
/* transform size (4x4, 8x8, 16x16 or select-per-mb) selection */
if (cpi->oxcf.lossless) {
txfm_type = ONLY_4X4;
/* FIXME (rbultje): this code is disabled until we support cost updates
* while a frame is being encoded; the problem is that each time we
* "revert" to 4x4 only (or even 8x8 only), the coefficient probabilities
* for 16x16 (and 8x8) start lagging behind, thus leading to them lagging
* further behind and not being chosen for subsequent frames either. This
* is essentially a local minimum problem that we can probably fix by
* estimating real costs more closely within a frame, perhaps by re-
* calculating costs on-the-fly as frame encoding progresses. */
if (cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] &&
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = TX_MODE_SELECT;
} else if (cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]
&& cpi->rd_tx_select_threshes[frame_type][ONLY_4X4] >
cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16]
) {
txfm_type = ONLY_4X4;
} else if (cpi->rd_tx_select_threshes[frame_type][ALLOW_16X16] >=
cpi->rd_tx_select_threshes[frame_type][ALLOW_8X8]) {
txfm_type = ALLOW_16X16;
} else
txfm_type = ALLOW_8X8;
#else
txfm_type = cpi->rd_tx_select_threshes[frame_type][ALLOW_32X32] >
cpi->rd_tx_select_threshes[frame_type][TX_MODE_SELECT] ?
ALLOW_32X32 : TX_MODE_SELECT;
#endif
cpi->common.txfm_mode = txfm_type;
cpi->common.comp_pred_mode = pred_type;
encode_frame_internal(cpi);
for (i = 0; i < NB_PREDICTION_TYPES; ++i) {
const int diff = (int)(cpi->rd_comp_pred_diff[i] / cpi->common.MBs);
cpi->rd_prediction_type_threshes[frame_type][i] += diff;
cpi->rd_prediction_type_threshes[frame_type][i] >>= 1;
}
for (i = 0; i < NB_TXFM_MODES; ++i) {
int64_t pd = cpi->rd_tx_select_diff[i];
int diff;
if (i == TX_MODE_SELECT)
pd -= RDCOST(cpi->mb.rdmult, cpi->mb.rddiv,
2048 * (TX_SIZE_MAX_SB - 1), 0);
cpi->rd_tx_select_threshes[frame_type][i] += diff;
cpi->rd_tx_select_threshes[frame_type][i] /= 2;
}
if (cpi->common.comp_pred_mode == HYBRID_PREDICTION) {
int single_count_zero = 0;
int comp_count_zero = 0;
for (i = 0; i < COMP_INTER_CONTEXTS; i++) {
single_count_zero += cpi->comp_inter_count[i][0];
comp_count_zero += cpi->comp_inter_count[i][1];
}
if (comp_count_zero == 0) {
cpi->common.comp_pred_mode = SINGLE_PREDICTION_ONLY;
} else if (single_count_zero == 0) {
cpi->common.comp_pred_mode = COMP_PREDICTION_ONLY;
if (cpi->common.txfm_mode == TX_MODE_SELECT) {
const int count4x4 = cm->fc.tx_count_16x16p[TX_4X4] +
cm->fc.tx_count_32x32p[TX_4X4] +
cm->fc.tx_count_8x8p[TX_4X4];
const int count8x8_lp = cm->fc.tx_count_32x32p[TX_8X8] +
cm->fc.tx_count_16x16p[TX_8X8];
const int count8x8_8x8p = cm->fc.tx_count_8x8p[TX_8X8];
const int count16x16_16x16p = cm->fc.tx_count_16x16p[TX_16X16];
const int count16x16_lp = cm->fc.tx_count_32x32p[TX_16X16];
const int count32x32 = cm->fc.tx_count_32x32p[TX_32X32];
if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 &&
count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_8X8);
} else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 &&
count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) {
reset_skip_txfm_size(cpi, TX_4X4);
} else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_32X32;
} else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) {
cpi->common.txfm_mode = ALLOW_16X16;
reset_skip_txfm_size(cpi, TX_16X16);
// Update interpolation filter strategy for next frame.
if ((cpi->common.frame_type != KEY_FRAME) && (cpi->sf.search_best_filter))
void vp9_build_block_offsets(MACROBLOCK *x) {
static void sum_intra_stats(VP9_COMP *cpi, MACROBLOCK *x) {
const MB_PREDICTION_MODE m = xd->mode_info_context->mbmi.mode;
const MB_PREDICTION_MODE uvm = xd->mode_info_context->mbmi.uv_mode;
++cpi->y_uv_mode_count[m][uvm];
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB8X8) {
const BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type;
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bsl = MIN(bwl, bhl);
++cpi->y_mode_count[MIN(bsl, 3)][m];
int idx, idy;
int bw = 1 << b_width_log2(xd->mode_info_context->mbmi.sb_type);
int bh = 1 << b_height_log2(xd->mode_info_context->mbmi.sb_type);
for (idy = 0; idy < 2; idy += bh) {
for (idx = 0; idx < 2; idx += bw) {
int m = xd->mode_info_context->bmi[idy * 2 + idx].as_mode.first;
++cpi->y_mode_count[0][m];
// Experimental stub function to create a per MB zbin adjustment based on
// some previously calculated measure of MB activity.
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x) {
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (act > cpi->activity_avg)
x->act_zbin_adj = (int)(((int64_t)b + (a >> 1)) / a) - 1;
else
x->act_zbin_adj = 1 - (int)(((int64_t)a + (b >> 1)) / b);
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mi_row, int mi_col,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int n;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *mbmi = &mi->mbmi;
unsigned int segment_id = mbmi->segment_id;
const int bwl = mi_width_log2(bsize);
const int bw = 1 << bwl, bh = 1 << mi_height_log2(bsize);
if (cm->frame_type == KEY_FRAME) {
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
adjust_act_zbin(cpi, x);
vp9_update_zbin_extra(cpi, x);
}
} else {
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) {
// Adjust the zbin based on this MB rate.
adjust_act_zbin(cpi, x);
}
// Experimental code. Special case for gf and arf zeromv modes.
// Increase zbin size to suppress noise
cpi->zbin_mode_boost = 0;
if (cpi->zbin_mode_boost_enabled) {
cpi->zbin_mode_boost = GF_ZEROMV_ZBIN_BOOST;
else
cpi->zbin_mode_boost = LF_ZEROMV_ZBIN_BOOST;
} else {
cpi->zbin_mode_boost = MV_ZBIN_BOOST;
}
} else {
cpi->zbin_mode_boost = INTRA_ZBIN_BOOST;
}
}
vp9_update_zbin_extra(cpi, x);
}