Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/common/vp9_setupintrarecon.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_invtrans.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
#include <limits.h>
#include "vpx_ports/vpx_timer.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_mvref_common.h"
static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col);
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
int output_enabled, int mb_row, int mb_col,
BLOCK_SIZE_TYPE bsize);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
unsigned int inter_y_modes[MB_MODE_COUNT];
unsigned int inter_uv_modes[VP9_UV_MODES];
unsigned int inter_b_modes[B_MODE_COUNT];
unsigned int y_modes[VP9_YMODES];
unsigned int i8x8_modes[VP9_I8X8_MODES];
unsigned int uv_modes[VP9_UV_MODES];
unsigned int uv_modes_y[VP9_YMODES][VP9_UV_MODES];
unsigned int b_modes[B_MODE_COUNT];
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const uint8_t VP9_VAR_OFFS[16] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
/* If the region is flat, lower the activity some more. */
if (act < 8 << 12)
act = act < 5 << 12 ? act : 5 << 12;
return act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP9_COMP *cpi,
return vp9_encode_intra(cpi, x, use_dc_pred);
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
if (ALT_ACT_MEASURE) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
} else {
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure(cpi, x);
}
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
mb_activity = VP9_ACTIVITY_AVG_MIN;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i, j;
unsigned int *sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy(sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs);
// Ripple each value down to its correct position
for (i = 1; i < cpi->common.MBs; i ++) {
for (j = i; j > 0; j --) {
if (sortlist[j] < sortlist[j - 1]) {
// Swap values
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else
break;
}
}
// Even number MBs so estimate median as mean of two either side.
median = (1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->activity_avg = 100000;
// Calculate an activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg);
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (b >= a)
*(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
else
*(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
// Increment activity map pointers
x->mb_activity_ptr++;
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
int mb_row, mb_col;
unsigned int mb_activity;
int64_t activity_sum = 0;
x->mb_activity_ptr = cpi->mb_activity_map;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
xd->plane[0].dst.buf = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
x->src.y_buffer += 16;
}
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
vp9_extend_mb_row(new_yv12, xd->plane[0].dst.buf + 16,
xd->plane[1].dst.buf + 8, xd->plane[2].dst.buf + 8);
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
// Calculate an activity index number of each mb
calc_activity_index(cpi, x);
void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2 * cpi->activity_avg);
b = (2 * act) + cpi->activity_avg;
x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
static void update_state(VP9_COMP *cpi,
PICK_MODE_CONTEXT *ctx,
BLOCK_SIZE_TYPE bsize,
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
int mb_mode = mi->mbmi.mode;
int mb_mode_index = ctx->best_mode_index;
const int mis = cpi->common.mode_info_stride;
const int bh = 1 << mb_height_log2(bsize), bw = 1 << mb_width_log2(bsize);
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < bh; y++) {
for (x_idx = 0; x_idx < bw; x_idx++) {
if ((xd->mb_to_right_edge >> 7) + bw > x_idx &&
(xd->mb_to_bottom_edge >> 7) + bh > y) {
MODE_INFO *mi_addr = xd->mode_info_context + x_idx + y * mis;
vpx_memcpy(mi_addr, mi, sizeof(MODE_INFO));
}
ctx->txfm_rd_diff[ALLOW_32X32] = ctx->txfm_rd_diff[ALLOW_16X16];
for (i = 0; i < 16; i++) {
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
assert(xd->block[i].bmi.as_mode.first < B_MODE_COUNT);
} else if (mb_mode == I8X8_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i];
} else if (mb_mode == SPLITMV) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
x->skip = ctx->skip;
int segment_id = mbmi->segment_id, ref_pred_flag;
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
}
}
// Did the chosen reference frame match its predicted value.
ref_pred_flag = ((xd->mode_info_context->mbmi.ref_frame ==
vp9_get_pred_ref(cm, xd)));
vp9_set_pred_flag(xd, PRED_REF, ref_pred_flag);
if (!xd->segmentation_enabled ||
!vp9_segfeature_active(xd, segment_id, SEG_LVL_REF_FRAME) ||
vp9_check_segref(xd, segment_id, INTRA_FRAME) +
vp9_check_segref(xd, segment_id, LAST_FRAME) +
vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
vp9_check_segref(xd, segment_id, ALTREF_FRAME) > 1) {
// Get the prediction context and status
int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
// Count prediction success
cpi->ref_pred_count[pred_context][ref_pred_flag]++;
}
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// for (i = 0; i < 16; i++)
// {
// xd->block[i].bmi.as_mode =
// xd->mode_info_context->bmi[i].as_mode;
// assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
// }
#if CONFIG_INTERNAL_STATS
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
THR_I8X8_PRED /*I8X8_PRED*/,
} else {
/*
// Reduce the activation RD thresholds for the best choice mode
if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
(cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
cpi->rd_thresh_mult[mb_mode_index] =
(cpi->rd_thresh_mult[mb_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
MIN_THRESHMULT;
cpi->rd_threshes[mb_mode_index] =
(cpi->rd_baseline_thresh[mb_mode_index] >> 7)
* cpi->rd_thresh_mult[mb_mode_index];
}
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
if (mbmi->mode == SPLITMV || mbmi->mode == NEWMV) {
int_mv best_mv, best_second_mv;
MV_REFERENCE_FRAME rf = mbmi->ref_frame;
best_mv.as_int = ctx->best_ref_mv.as_int;
best_second_mv.as_int = ctx->second_best_ref_mv.as_int;
if (mbmi->mode == NEWMV) {
best_mv.as_int = mbmi->ref_mvs[rf][0].as_int;
best_second_mv.as_int = mbmi->ref_mvs[mbmi->second_ref_frame][0].as_int;
}
mbmi->best_mv.as_int = best_mv.as_int;
mbmi->best_second_mv.as_int = best_second_mv.as_int;
vp9_update_nmv_count(cpi, x, &best_mv, &best_second_mv);
}
#if CONFIG_COMP_INTERINTRA_PRED
if (mbmi->mode >= NEARESTMV && mbmi->mode < SPLITMV &&
mbmi->second_ref_frame <= INTRA_FRAME) {
if (mbmi->second_ref_frame == INTRA_FRAME) {
++cpi->interintra_count[1];
++cpi->ymode_count[mbmi->interintra_mode];
#if SEPARATE_INTERINTRA_UV
++cpi->y_uv_mode_count[mbmi->interintra_mode][mbmi->interintra_uv_mode];
#endif
} else {
++cpi->interintra_count[0];
}
}
if (cpi->common.mcomp_filter_type == SWITCHABLE &&
mbmi->mode >= NEARESTMV &&
mbmi->mode <= SPLITMV) {
++cpi->switchable_interp_count
[vp9_get_pred_context(&cpi->common, xd, PRED_SWITCHABLE_INTERP)]
[vp9_switchable_interp_map[mbmi->interp_filter]];
}
cpi->rd_comp_pred_diff[SINGLE_PREDICTION_ONLY] += ctx->single_pred_diff;
cpi->rd_comp_pred_diff[COMP_PREDICTION_ONLY] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[HYBRID_PREDICTION] += ctx->hybrid_pred_diff;
static unsigned find_seg_id(uint8_t *buf, BLOCK_SIZE_TYPE bsize,
int start_y, int height, int start_x, int width) {
const int bw = 1 << mb_width_log2(bsize), bh = 1 << mb_height_log2(bsize);
const int end_x = MIN(start_x + bw, width);
const int end_y = MIN(start_y + bh, height);
int x, y;
unsigned seg_id = -1;
buf += width * start_y;
for (y = start_y; y < end_y; y++, buf += width) {
for (x = start_x; x < end_x; x++) {
seg_id = MIN(seg_id, buf[x]);
}
}
return seg_id;
}
static void set_offsets(VP9_COMP *cpi,
int mb_row, int mb_col, BLOCK_SIZE_TYPE bsize) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int dst_fb_idx = cm->new_fb_idx;
const int idx_map = mb_row * cm->mb_cols + mb_col;
const int idx_str = xd->mode_info_stride * mb_row + mb_col;
const int bw = 1 << mb_width_log2(bsize), bh = 1 << mb_height_log2(bsize);
// entropy context structures
xd->above_context = cm->above_context + mb_col;
xd->left_context = cm->left_context + (mb_row & 3);
// Activity map pointer
x->mb_activity_ptr = &cpi->mb_activity_map[idx_map];
x->active_ptr = cpi->active_map + idx_map;
/* pointers to mode info contexts */
x->partition_info = x->pi + idx_str;
xd->mode_info_context = cm->mi + idx_str;
mbmi = &xd->mode_info_context->mbmi;
xd->prev_mode_info_context = cm->prev_mi + idx_str;
// Set up destination pointers
setup_dst_planes(xd, &cm->yv12_fb[dst_fb_idx], mb_row, mb_col);
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_row_max = ((cm->mb_rows - mb_row) * 16 +
(VP9BORDERINPIXELS - 16 * bh - VP9_INTERP_EXTEND));
x->mv_col_max = ((cm->mb_cols - mb_col) * 16 +
(VP9BORDERINPIXELS - 16 * bw - VP9_INTERP_EXTEND));
// Set up distance of MB to edge of frame in 1/8th pel units
assert(!(mb_col & (bw - 1)) && !(mb_row & (bh - 1)));
set_mb_row(cm, xd, mb_row, bh);
set_mb_col(cm, xd, mb_col, bw);
setup_pred_block(&x->src, cpi->Source, mb_row, mb_col, NULL, NULL);
/* R/D setup */
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
/* segment ID */
if (xd->segmentation_enabled) {
if (xd->update_mb_segmentation_map) {
mbmi->segment_id = find_seg_id(cpi->segmentation_map, bsize,
mb_row, cm->mb_rows, mb_col, cm->mb_cols);
} else {
mbmi->segment_id = find_seg_id(cm->last_frame_seg_map, bsize,
mb_row, cm->mb_rows, mb_col, cm->mb_cols);
}
assert(mbmi->segment_id <= (MAX_MB_SEGMENTS-1));
vp9_mb_init_quantizer(cpi, x);
if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
!vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
vp9_check_segref(xd, 1, INTRA_FRAME) +
vp9_check_segref(xd, 1, LAST_FRAME) +
vp9_check_segref(xd, 1, GOLDEN_FRAME) +
vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
const int y = mb_row & ~3;
const int x = mb_col & ~3;
const int p16 = ((mb_row & 1) << 1) + (mb_col & 1);
const int p32 = ((mb_row & 2) << 2) + ((mb_col & 2) << 1);
const int tile_progress = cm->cur_tile_mb_col_start * cm->mb_rows;
const int mb_cols = cm->cur_tile_mb_col_end - cm->cur_tile_mb_col_start;
((y * mb_cols + x * 4 + p32 + p16 + tile_progress) << 16) / cm->MBs;
}
} else {
mbmi->segment_id = 0;
}
}
static int pick_mb_mode(VP9_COMP *cpi,
int mb_row,
int mb_col,
TOKENEXTRA **tp,
int *totalrate,
int *totaldist) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
set_offsets(cpi, mb_row, mb_col, BLOCK_SIZE_MB16X16);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
mbmi = &xd->mode_info_context->mbmi;
mbmi->sb_type = BLOCK_SIZE_MB16X16;
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (cm->frame_type == KEY_FRAME) {
vp9_rd_pick_intra_mode(cpi, x, totalrate, totaldist);
// Save the coding context
vpx_memcpy(&x->mb_context[xd->sb_index][xd->mb_index].mic,
xd->mode_info_context, sizeof(MODE_INFO));
} else {
vp9_pick_mode_inter_macroblock(cpi, x, mb_row, mb_col,
totalrate, totaldist);
splitmodes_used += (mbmi->mode == SPLITMV);
if (cpi->mb.e_mbd.segmentation_enabled && mbmi->segment_id == 0) {
cpi->seg0_idx++;
static void pick_sb_modes(VP9_COMP *cpi, int mb_row, int mb_col,
TOKENEXTRA **tp, int *totalrate, int *totaldist,
BLOCK_SIZE_TYPE bsize, PICK_MODE_CONTEXT *ctx) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
set_offsets(cpi, mb_row, mb_col, bsize);
xd->mode_info_context->mbmi.sb_type = bsize;
vp9_activity_masking(cpi, x);
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
vp9_rd_pick_intra_mode_sb(cpi, x, totalrate, totaldist, bsize, ctx);
vp9_rd_pick_inter_mode_sb(cpi, x, mb_row, mb_col, totalrate, totaldist,
static void update_stats(VP9_COMP *cpi, int mb_row, int mb_col) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MODE_INFO *mi = xd->mode_info_context;
MB_MODE_INFO *const mbmi = &mi->mbmi;
if (cm->frame_type == KEY_FRAME) {
#ifdef MODE_STATS
y_modes[mbmi->mode]++;
} else {
int segment_id, seg_ref_active;
if (mbmi->ref_frame) {
int pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
if (mbmi->second_ref_frame <= INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
else
cpi->comp_pred_count[pred_context]++;
#ifdef MODE_STATS
inter_y_modes[mbmi->mode]++;
for (b = 0; b < x->partition_info->count; b++) {
inter_b_modes[x->partition_info->bmi[b].mode]++;
}
}
#endif
// If we have just a single reference frame coded for a segment then
// exclude from the reference frame counts used to work out
// probabilities. NOTE: At the moment we dont support custom trees
// for the reference frame coding for each segment but this is a
// possible future action.
segment_id = mbmi->segment_id;
seg_ref_active = vp9_segfeature_active(xd, segment_id,
SEG_LVL_REF_FRAME);
if (!seg_ref_active ||
((vp9_check_segref(xd, segment_id, INTRA_FRAME) +
vp9_check_segref(xd, segment_id, LAST_FRAME) +
vp9_check_segref(xd, segment_id, GOLDEN_FRAME) +
vp9_check_segref(xd, segment_id, ALTREF_FRAME)) > 1)) {
cpi->count_mb_ref_frame_usage[mbmi->ref_frame]++;
// Count of last ref frame 0,0 usage
if ((mbmi->mode == ZEROMV) && (mbmi->ref_frame == LAST_FRAME))
cpi->inter_zz_count++;
}
}
static void encode_sb(VP9_COMP *cpi,
int mb_row,
int mb_col,
int output_enabled,
TOKENEXTRA **tp, BLOCK_SIZE_TYPE is_sb) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
BLOCK_SIZE_TYPE bsize = BLOCK_SIZE_SB32X32;
if (is_sb == BLOCK_SIZE_SB32X32) {
set_offsets(cpi, mb_row, mb_col, bsize);
update_state(cpi, &x->sb32_context[xd->sb_index],
output_enabled, mb_row, mb_col, bsize);
if (output_enabled) {
update_stats(cpi, mb_row, mb_col);
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
(*tp)->token = EOSB_TOKEN;
#if CONFIG_SBSEGMENT
} else if (is_sb == BLOCK_SIZE_SB16X32) {
int i;
if (output_enabled)
cpi->partition_count[partition_plane(bsize)][PARTITION_VERT]++;
for (i = 0; i < 2 && mb_col + i != cm->mb_cols; i++) {
set_offsets(cpi, mb_row, mb_col + i, BLOCK_SIZE_SB16X32);
update_state(cpi, &x->sb16x32_context[xd->sb_index][i],
BLOCK_SIZE_SB16X32, output_enabled);
encode_superblock(cpi, tp,
output_enabled, mb_row, mb_col + i, BLOCK_SIZE_SB16X32);
if (output_enabled) {
update_stats(cpi, mb_row, mb_col + i);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
} else if (is_sb == BLOCK_SIZE_SB32X16) {
int i;
if (output_enabled)
cpi->partition_count[partition_plane(bsize)][PARTITION_HORZ]++;
for (i = 0; i < 2 && mb_row + i != cm->mb_rows; i++) {
set_offsets(cpi, mb_row + i, mb_col, BLOCK_SIZE_SB32X16);
update_state(cpi, &x->sb32x16_context[xd->sb_index][i],
BLOCK_SIZE_SB32X16, output_enabled);
encode_superblock(cpi, tp,
output_enabled, mb_row + i, mb_col, BLOCK_SIZE_SB32X16);
if (output_enabled) {
update_stats(cpi, mb_row + i, mb_col);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
}
#endif
if (output_enabled)
cpi->partition_count[partition_plane(bsize)][PARTITION_SPLIT]++;
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
if ((mb_row + y_idx >= cm->mb_rows) || (mb_col + x_idx >= cm->mb_cols)) {
// MB lies outside frame, move on
continue;
set_offsets(cpi, mb_row + y_idx, mb_col + x_idx, BLOCK_SIZE_MB16X16);
update_state(cpi, &x->mb_context[xd->sb_index][i],
BLOCK_SIZE_MB16X16, output_enabled);
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
encode_macroblock(cpi, tp,
output_enabled, mb_row + y_idx, mb_col + x_idx);
if (output_enabled) {
update_stats(cpi, mb_row + y_idx, mb_col + x_idx);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
{
FILE *statsfile;
statsfile = fopen("segmap2.stt", "a");
fprintf(statsfile, "\n");
fclose(statsfile);
}
#endif
}
static void encode_sb64(VP9_COMP *cpi,
int mb_row,
int mb_col,
TOKENEXTRA **tp, BLOCK_SIZE_TYPE is_sb[4]) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
BLOCK_SIZE_TYPE bsize = BLOCK_SIZE_SB64X64;
if (is_sb[0] == BLOCK_SIZE_SB64X64) {
set_offsets(cpi, mb_row, mb_col, bsize);
update_state(cpi, &x->sb64_context, bsize, 1);
update_stats(cpi, mb_row, mb_col);
(*tp)->token = EOSB_TOKEN;
cpi->partition_count[partition_plane(bsize)][PARTITION_NONE]++;
#if CONFIG_SBSEGMENT
} else if (is_sb[0] == BLOCK_SIZE_SB32X64) {
int i;
cpi->partition_count[partition_plane(bsize)][PARTITION_VERT]++;
for (i = 0; i < 2 && mb_col + i * 2 != cm->mb_cols; i++) {
set_offsets(cpi, mb_row, mb_col + i * 2, BLOCK_SIZE_SB32X64);
update_state(cpi, &x->sb32x64_context[i], BLOCK_SIZE_SB32X64, 1);
encode_superblock(cpi, tp,
1, mb_row, mb_col + i * 2, BLOCK_SIZE_SB32X64);
update_stats(cpi, mb_row, mb_col + i * 2);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
} else if (is_sb[0] == BLOCK_SIZE_SB64X32) {
int i;
cpi->partition_count[partition_plane(bsize)][PARTITION_HORZ]++;
for (i = 0; i < 2 && mb_row + i * 2 != cm->mb_rows; i++) {
set_offsets(cpi, mb_row + i * 2, mb_col, BLOCK_SIZE_SB64X32);
update_state(cpi, &x->sb64x32_context[i], BLOCK_SIZE_SB64X32, 1);
encode_superblock(cpi, tp,
1, mb_row + i * 2, mb_col, BLOCK_SIZE_SB64X32);
update_stats(cpi, mb_row + i * 2, mb_col);
(*tp)->token = EOSB_TOKEN;
(*tp)++;
}
#endif
cpi->partition_count[partition_plane(bsize)][PARTITION_SPLIT]++;
for (i = 0; i < 4; i++) {
const int x_idx = i & 1, y_idx = i >> 1;
if (mb_row + y_idx * 2 >= cm->mb_rows ||
mb_col + x_idx * 2 >= cm->mb_cols) {
// MB lies outside frame, move on
continue;
}
xd->sb_index = i;
encode_sb(cpi, mb_row + 2 * y_idx, mb_col + 2 * x_idx, 1, tp,
static void encode_sb_row(VP9_COMP *cpi,
int mb_row,
TOKENEXTRA **tp,
int *totalrate) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
int mb_col;
// Initialize the left context for the new SB row
vpx_memset(cm->left_context, 0, sizeof(cm->left_context));
for (mb_col = cm->cur_tile_mb_col_start;
mb_col < cm->cur_tile_mb_col_end; mb_col += 4) {
BLOCK_SIZE_TYPE sb_partitioning[4];
int sb64_rate = 0, sb64_dist = 0;
ENTROPY_CONTEXT_PLANES l[4], a[4];
TOKENEXTRA *tp_orig = *tp;
memcpy(&a, cm->above_context + mb_col, sizeof(a));
memcpy(&l, cm->left_context, sizeof(l));
for (i = 0; i < 4; i++) {
const int x_idx = (i & 1) << 1, y_idx = i & 2;
int sb32_rate = 0, sb32_dist = 0;
int splitmodes_used = 0;
int sb32_skip = 0;
int j;
ENTROPY_CONTEXT_PLANES l2[2], a2[2];
if (mb_row + y_idx >= cm->mb_rows || mb_col + x_idx >= cm->mb_cols)
continue;
xd->sb_index = i;
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy(l2, cm->left_context + y_idx, sizeof(l2));
vpx_memcpy(a2, cm->above_context + mb_col + x_idx, sizeof(a2));
/* Encode MBs in raster order within the SB */
sb_partitioning[i] = BLOCK_SIZE_MB16X16;
for (j = 0; j < 4; j++) {
const int x_idx_m = x_idx + (j & 1), y_idx_m = y_idx + (j >> 1);
int r, d;
if (mb_row + y_idx_m >= cm->mb_rows ||
mb_col + x_idx_m >= cm->mb_cols) {
// MB lies outside frame, move on
continue;
}
// Index of the MB in the SB 0..3
xd->mb_index = j;
splitmodes_used += pick_mb_mode(cpi, mb_row + y_idx_m,
mb_col + x_idx_m, tp, &r, &d);
sb32_rate += r;
sb32_dist += d;
// Dummy encode, do not do the tokenization
encode_macroblock(cpi, tp, 0, mb_row + y_idx_m,
mb_col + x_idx_m);
}
/* Restore L & A coding context to those in place on entry */
vpx_memcpy(cm->left_context + y_idx, l2, sizeof(l2));
vpx_memcpy(cm->above_context + mb_col + x_idx, a2, sizeof(a2));
sb32_rate += x->partition_cost[partition_plane(BLOCK_SIZE_SB32X32)]
[PARTITION_SPLIT];
if (cpi->sf.splitmode_breakout) {
sb32_skip = splitmodes_used;
sb64_skip += splitmodes_used;
}
#if CONFIG_SBSEGMENT
// check 32x16
if (mb_col + x_idx + 1 < cm->mb_cols) {
int r, d;
xd->mb_index = 0;
pick_sb_modes(cpi, mb_row + y_idx, mb_col + x_idx,
tp, &r, &d, BLOCK_SIZE_SB32X16,