Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp9/encoder/encodeframe.h"
#include "vp9/common/extend.h"
#include "vp9/common/entropymode.h"
#include "vp9/common/quant_common.h"
#include "vp9/common/setupintrarecon.h"
#include "vp9/common/reconintra4x4.h"
#include "vp9/common/reconinter.h"
#include "vp9/common/invtrans.h"
#include "vp9/common/findnearmv.h"
#include "vp9/common/reconintra.h"
#include "vp9/common/seg_common.h"
#include "vp9/encoder/tokenize.h"
#include "vp9/common/pred_common.h"
#include "vp9/common/mvref_common.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
#define IF_RTCD(x) (x)
#else
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
int mb_row_debug, mb_col_debug;
#endif
static void encode_inter_macroblock(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset, int output_enabled);
static void encode_inter_superblock(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int recon_yoffset,
int recon_uvoffset, int mb_col, int mb_row);
static void encode_intra_macro_block(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int output_enabled);
static void encode_intra_super_block(VP9_COMP *cpi, MACROBLOCK *x,
TOKENEXTRA **t, int mb_col);
static void adjust_act_zbin(VP9_COMP *cpi, MACROBLOCK *x);
unsigned int inter_y_modes[MB_MODE_COUNT];
unsigned int inter_uv_modes[VP9_UV_MODES];
unsigned int inter_b_modes[B_MODE_COUNT];
unsigned int y_modes[VP9_YMODES];
unsigned int i8x8_modes[VP9_I8X8_MODES];
unsigned int uv_modes[VP9_UV_MODES];
unsigned int uv_modes_y[VP9_YMODES][VP9_UV_MODES];
unsigned int b_modes[B_MODE_COUNT];
/* activity_avg must be positive, or flat regions could get a zero weight
* (infinite lambda), which confounds analysis.
* This also avoids the need for divide by zero checks in
* vp9_activity_masking().
/* This is used as a reference when computing the source variance for the
* purposes of activity masking.
* Eventually this should be replaced by custom no-reference routines,
* which will be faster.
*/
static const unsigned char VP9_VAR_OFFS[16] = {
128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
// Original activity measure from Tim T's code.
static unsigned int tt_activity_measure(VP9_COMP *cpi, MACROBLOCK *x) {
unsigned int act;
unsigned int sse;
/* TODO: This could also be done over smaller areas (8x8), but that would
* require extensive changes elsewhere, as lambda is assumed to be fixed
* over an entire MB in most of the code.
* Another option is to compute four 8x8 variances, and pick a single
* lambda using a non-linear combination (e.g., the smallest, or second
* smallest, etc.).
*/
act = vp9_variance16x16(x->src.y_buffer, x->src.y_stride, VP9_VAR_OFFS, 0,
act = act << 4;
/* If the region is flat, lower the activity some more. */
if (act < 8 << 12)
act = act < 5 << 12 ? act : 5 << 12;
return act;
// Stub for alternative experimental activity measures.
static unsigned int alt_activity_measure(VP9_COMP *cpi,
return vp9_encode_intra(cpi, x, use_dc_pred);
}
// Measure the activity of the current macroblock
// What we measure here is TBD so abstracted to this function
static unsigned int mb_activity_measure(VP9_COMP *cpi, MACROBLOCK *x,
if (ALT_ACT_MEASURE) {
int use_dc_pred = (mb_col || mb_row) && (!mb_col || !mb_row);
// Or use and alternative.
mb_activity = alt_activity_measure(cpi, x, use_dc_pred);
} else {
// Original activity measure from Tim T's code.
mb_activity = tt_activity_measure(cpi, x);
}
if (mb_activity < VP9_ACTIVITY_AVG_MIN)
mb_activity = VP9_ACTIVITY_AVG_MIN;
}
// Calculate an "average" mb activity value for the frame
static void calc_av_activity(VP9_COMP *cpi, int64_t activity_sum) {
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
// Find median: Simple n^2 algorithm for experimentation
{
unsigned int median;
unsigned int i, j;
unsigned int *sortlist;
unsigned int tmp;
// Create a list to sort to
CHECK_MEM_ERROR(sortlist,
vpx_calloc(sizeof(unsigned int),
cpi->common.MBs));
// Copy map to sort list
vpx_memcpy(sortlist, cpi->mb_activity_map,
sizeof(unsigned int) * cpi->common.MBs);
// Ripple each value down to its correct position
for (i = 1; i < cpi->common.MBs; i ++) {
for (j = i; j > 0; j --) {
if (sortlist[j] < sortlist[j - 1]) {
// Swap values
tmp = sortlist[j - 1];
sortlist[j - 1] = sortlist[j];
sortlist[j] = tmp;
} else
break;
}
}
// Even number MBs so estimate median as mean of two either side.
median = (1 + sortlist[cpi->common.MBs >> 1] +
sortlist[(cpi->common.MBs >> 1) + 1]) >> 1;
// Simple mean for now
cpi->activity_avg = (unsigned int)(activity_sum / cpi->common.MBs);
if (cpi->activity_avg < VP9_ACTIVITY_AVG_MIN)
cpi->activity_avg = VP9_ACTIVITY_AVG_MIN;
// Experimental code: return fixed value normalized for several clips
if (ALT_ACT_MEASURE)
cpi->activity_avg = 100000;
#if USE_ACT_INDEX
// Calculate and activity index for each mb
static void calc_activity_index(VP9_COMP *cpi, MACROBLOCK *x) {
VP9_COMMON *const cm = &cpi->common;
FILE *f = fopen("norm_act.stt", "a");
fprintf(f, "\n%12d\n", cpi->activity_avg);
// Reset pointers to start of activity map
x->mb_activity_ptr = cpi->mb_activity_map;
// Calculate normalized mb activity number.
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
// Read activity from the map
act = *(x->mb_activity_ptr);
// Calculate a normalized activity number
a = act + 4 * cpi->activity_avg;
b = 4 * act + cpi->activity_avg;
if (b >= a)
*(x->activity_ptr) = (int)((b + (a >> 1)) / a) - 1;
else
*(x->activity_ptr) = 1 - (int)((a + (b >> 1)) / b);
// Increment activity map pointers
x->mb_activity_ptr++;
}
// Loop through all MBs. Note activity of each, average activity and
// calculate a normalized activity for each
static void build_activity_map(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common;
YV12_BUFFER_CONFIG *new_yv12 = &cm->yv12_fb[cm->new_fb_idx];
int recon_yoffset;
int recon_y_stride = new_yv12->y_stride;
int mb_row, mb_col;
unsigned int mb_activity;
int64_t activity_sum = 0;
// for each macroblock row in image
for (mb_row = 0; mb_row < cm->mb_rows; mb_row++) {
// reset above block coeffs
xd->up_available = (mb_row != 0);
recon_yoffset = (mb_row * recon_y_stride * 16);
// for each macroblock col in image
for (mb_col = 0; mb_col < cm->mb_cols; mb_col++) {
xd->dst.y_buffer = new_yv12->y_buffer + recon_yoffset;
xd->left_available = (mb_col != 0);
recon_yoffset += 16;
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
// measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
// Store MB level activity details.
*x->mb_activity_ptr = mb_activity;
// Increment activity map pointer
x->mb_activity_ptr++;
// adjust to the next column of source macroblocks
x->src.y_buffer += 16;
}
// adjust to the next row of mbs
x->src.y_buffer += 16 * x->src.y_stride - 16 * cm->mb_cols;
vp9_extend_mb_row(new_yv12, xd->dst.y_buffer + 16,
// Calculate an "average" MB activity
calc_av_activity(cpi, activity_sum);
// Calculate an activity index number of each mb
calc_activity_index(cpi, x);
void vp9_activity_masking(VP9_COMP *cpi, MACROBLOCK *x) {
x->rdmult += *(x->mb_activity_ptr) * (x->rdmult >> 2);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
int64_t a;
int64_t b;
int64_t act = *(x->mb_activity_ptr);
// Apply the masking to the RD multiplier.
a = act + (2 * cpi->activity_avg);
b = (2 * act) + cpi->activity_avg;
x->rdmult = (unsigned int)(((int64_t)x->rdmult * b + (a >> 1)) / a);
x->errorperbit = x->rdmult * 100 / (110 * x->rddiv);
x->errorperbit += (x->errorperbit == 0);
// Activity based Zbin adjustment
adjust_act_zbin(cpi, x);
static void update_state(VP9_COMP *cpi, MACROBLOCK *x, PICK_MODE_CONTEXT *ctx) {
int i;
MACROBLOCKD *xd = &x->e_mbd;
MODE_INFO *mi = &ctx->mic;
int mb_mode = mi->mbmi.mode;
int mb_mode_index = ctx->best_mode_index;
assert(mb_mode < MB_MODE_COUNT);
assert(mb_mode_index < MAX_MODES);
assert(mi->mbmi.ref_frame < MAX_REF_FRAMES);
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
vpx_memcpy(xd->mode_info_context, mi, sizeof(MODE_INFO));
#if CONFIG_SUPERBLOCKS
if (mi->mbmi.encoded_as_sb) {
const int mis = cpi->common.mode_info_stride;
if (xd->mb_to_right_edge > 0)
vpx_memcpy(xd->mode_info_context + 1, mi, sizeof(MODE_INFO));
if (xd->mb_to_bottom_edge > 0) {
vpx_memcpy(xd->mode_info_context + mis, mi, sizeof(MODE_INFO));
if (xd->mb_to_right_edge > 0)
vpx_memcpy(xd->mode_info_context + mis + 1, mi, sizeof(MODE_INFO));
}
if (mb_mode == B_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi.as_mode = xd->mode_info_context->bmi[i].as_mode;
assert(xd->block[i].bmi.as_mode.first < MB_MODE_COUNT);
} else if (mb_mode == I8X8_PRED) {
for (i = 0; i < 16; i++) {
xd->block[i].bmi = xd->mode_info_context->bmi[i];
} else if (mb_mode == SPLITMV) {
vpx_memcpy(x->partition_info, &ctx->partition_info,
sizeof(PARTITION_INFO));
mbmi->mv[0].as_int = x->partition_info->bmi[15].mv.as_int;
mbmi->mv[1].as_int = x->partition_info->bmi[15].second_mv.as_int;
{
int segment_id = mbmi->segment_id;
if (!vp9_segfeature_active(xd, segment_id, SEG_LVL_EOB) ||
vp9_get_segdata(xd, segment_id, SEG_LVL_EOB)) {
for (i = 0; i < NB_TXFM_MODES; i++) {
cpi->rd_tx_select_diff[i] += ctx->txfm_rd_diff[i];
}
}
}
if (cpi->common.frame_type == KEY_FRAME) {
// Restore the coding modes to that held in the coding context
// if (mb_mode == B_PRED)
// for (i = 0; i < 16; i++)
// {
// xd->block[i].bmi.as_mode =
// xd->mode_info_context->bmi[i].as_mode;
// assert(xd->mode_info_context->bmi[i].as_mode < MB_MODE_COUNT);
// }
#if CONFIG_INTERNAL_STATS
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D27_PRED /*D27_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
THR_I8X8_PRED /*I8X8_PRED*/,
THR_B_PRED /*B_PRED*/,
};
cpi->mode_chosen_counts[kf_mode_index[mb_mode]]++;
} else {
/*
// Reduce the activation RD thresholds for the best choice mode
if ((cpi->rd_baseline_thresh[mb_mode_index] > 0) &&
(cpi->rd_baseline_thresh[mb_mode_index] < (INT_MAX >> 2)))
{
int best_adjustment = (cpi->rd_thresh_mult[mb_mode_index] >> 2);
cpi->rd_thresh_mult[mb_mode_index] =
(cpi->rd_thresh_mult[mb_mode_index]
>= (MIN_THRESHMULT + best_adjustment)) ?
cpi->rd_thresh_mult[mb_mode_index] - best_adjustment :
MIN_THRESHMULT;
cpi->rd_threshes[mb_mode_index] =
(cpi->rd_baseline_thresh[mb_mode_index] >> 7)
* cpi->rd_thresh_mult[mb_mode_index];
}
*/
// Note how often each mode chosen as best
cpi->mode_chosen_counts[mb_mode_index]++;
cpi->prediction_error += ctx->distortion;
cpi->intra_error += ctx->intra_error;
cpi->rd_comp_pred_diff[0] += ctx->single_pred_diff;
cpi->rd_comp_pred_diff[1] += ctx->comp_pred_diff;
cpi->rd_comp_pred_diff[2] += ctx->hybrid_pred_diff;
static void pick_mb_modes(VP9_COMP *cpi,
VP9_COMMON *cm,
int mb_row,
int mb_col,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int i;
int map_index;
int recon_yoffset, recon_uvoffset;
int ref_fb_idx = cm->lst_fb_idx;
int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
ENTROPY_CONTEXT_PLANES left_context[2];
ENTROPY_CONTEXT_PLANES above_context[2];
ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
+ mb_col;
// Offsets to move pointers from MB to MB within a SB in raster order
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy(left_context,
sizeof(left_context));
vpx_memcpy(above_context,
initial_above_context_ptr,
sizeof(above_context));
/* Encode MBs in raster order within the SB */
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_unextended = dy * cm->mb_cols + dx;
int offset_extended = dy * xd->mode_info_stride + dx;
// TODO Many of the index items here can be computed more efficiently!
if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
// MB lies outside frame, move on
mb_row += dy;
mb_col += dx;
// Update pointers
x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
x->gf_active_ptr += offset_unextended;
x->partition_info += offset_extended;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
// Index of the MB in the SB 0..3
xd->mb_index = i;
map_index = (mb_row * cpi->common.mb_cols) + mb_col;
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// set above context pointer
xd->above_context = cm->above_context + mb_col;
// Restore the appropriate left context depending on which
// row in the SB the MB is situated
// Set up distance of MB to edge of frame in 1/8th pel units
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
// Set up limit values for MV components to prevent them from
// extending beyond the UMV borders assuming 16x16 block size
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
// Copy current MB to a work buffer
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
// Is segmentation enabled
if (xd->segmentation_enabled) {
// Code to set segment id in xd->mbmi.segment_id
mbmi->segment_id = cm->last_frame_seg_map[map_index];
if (mbmi->segment_id > 3)
vp9_mb_init_quantizer(cpi, x);
#if CONFIG_SUPERBLOCKS
xd->mode_info_context->mbmi.encoded_as_sb = 0;
#endif
vp9_intra_prediction_down_copy(xd);
// Find best coding mode & reconstruct the MB so it is available
// as a predictor for MBs that follow in the SB
if (cm->frame_type == KEY_FRAME) {
vp9_rd_pick_intra_mode(cpi, x, &r, &d);
encode_intra_macro_block(cpi, x, tp, 0);
// Save the coding context
vpx_memcpy(&x->mb_context[i].mic, xd->mode_info_context,
sizeof(MODE_INFO));
!vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
vp9_check_segref(xd, 1, INTRA_FRAME) +
vp9_check_segref(xd, 1, LAST_FRAME) +
vp9_check_segref(xd, 1, GOLDEN_FRAME) +
vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
} else {
cpi->seg0_progress = (((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols + i) << 16) / cm->MBs;
}
vp9_pick_mode_inter_macroblock(cpi, x, recon_yoffset,
recon_uvoffset, &r, &d);
encode_inter_macroblock(cpi, x, tp,
recon_yoffset, recon_uvoffset, 0);
if (cpi->mb.e_mbd.segmentation_enabled && seg_id == 0) {
cpi->seg0_idx++;
}
if (!xd->segmentation_enabled ||
!vp9_segfeature_active(xd, seg_id, SEG_LVL_REF_FRAME) ||
vp9_check_segref(xd, seg_id, INTRA_FRAME) +
vp9_check_segref(xd, seg_id, LAST_FRAME) +
vp9_check_segref(xd, seg_id, GOLDEN_FRAME) +
vp9_check_segref(xd, seg_id, ALTREF_FRAME) > 1) {
int pred_flag = vp9_get_pred_flag(xd, PRED_REF);
int pred_context = vp9_get_pred_context(cm, xd, PRED_REF);
// Count prediction success
cpi->ref_pred_count[pred_context][pred_flag]++;
}
}
x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
x->gf_active_ptr += offset_unextended;
x->partition_info += offset_extended;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
}
/* Restore L & A coding context to those in place on entry */
left_context,
sizeof(left_context));
vpx_memcpy(initial_above_context_ptr,
above_context,
sizeof(above_context));
static void pick_sb_modes (VP9_COMP *cpi,
VP9_COMMON *cm,
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
int mb_row,
int mb_col,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp,
int *totalrate,
int *totaldist)
{
int map_index;
int recon_yoffset, recon_uvoffset;
int ref_fb_idx = cm->lst_fb_idx;
int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
ENTROPY_CONTEXT_PLANES left_context[2];
ENTROPY_CONTEXT_PLANES above_context[2];
ENTROPY_CONTEXT_PLANES *initial_above_context_ptr = cm->above_context
+ mb_col;
/* Function should not modify L & A contexts; save and restore on exit */
vpx_memcpy (left_context,
cm->left_context,
sizeof(left_context));
vpx_memcpy (above_context,
initial_above_context_ptr,
sizeof(above_context));
map_index = (mb_row * cpi->common.mb_cols) + mb_col;
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
/* set above context pointer */
xd->above_context = cm->above_context + mb_col;
/* Restore the appropriate left context depending on which
* row in the SB the MB is situated */
xd->left_context = cm->left_context;
// Set up distance of MB to edge of frame in 1/8th pel units
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
/* Set up limit values for MV components to prevent them from
* extending beyond the UMV borders assuming 16x16 block size */
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if 0 // FIXME
/* Copy current MB to a work buffer */
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT;
if(cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x);
/* Is segmentation enabled */
if (xd->segmentation_enabled)
{
/* Code to set segment id in xd->mbmi.segment_id */
if (xd->update_mb_segmentation_map)
xd->mode_info_context->mbmi.segment_id =
cpi->segmentation_map[map_index] &&
cpi->segmentation_map[map_index + 1] &&
cpi->segmentation_map[map_index + cm->mb_cols] &&
cpi->segmentation_map[map_index + cm->mb_cols + 1];
else
xd->mode_info_context->mbmi.segment_id =
cm->last_frame_seg_map[map_index] &&
cm->last_frame_seg_map[map_index + 1] &&
cm->last_frame_seg_map[map_index + cm->mb_cols] &&
cm->last_frame_seg_map[map_index + cm->mb_cols + 1];
if (xd->mode_info_context->mbmi.segment_id > 3)
xd->mode_info_context->mbmi.segment_id = 0;
vp9_mb_init_quantizer(cpi, x);
}
else
/* Set to Segment 0 by default */
xd->mode_info_context->mbmi.segment_id = 0;
x->active_ptr = cpi->active_map + map_index;
cpi->update_context = 0; // TODO Do we need this now??
/* Find best coding mode & reconstruct the MB so it is available
* as a predictor for MBs that follow in the SB */
if (cm->frame_type == KEY_FRAME)
{
vp9_rd_pick_intra_mode_sb(cpi, x,
totalrate,
totaldist);
/* Save the coding context */
vpx_memcpy(&x->sb_context[0].mic, xd->mode_info_context,
sizeof(MODE_INFO));
if (xd->segmentation_enabled && cpi->seg0_cnt > 0 &&
!vp9_segfeature_active(xd, 0, SEG_LVL_REF_FRAME) &&
vp9_segfeature_active(xd, 1, SEG_LVL_REF_FRAME) &&
vp9_check_segref(xd, 1, INTRA_FRAME) +
vp9_check_segref(xd, 1, LAST_FRAME) +
vp9_check_segref(xd, 1, GOLDEN_FRAME) +
vp9_check_segref(xd, 1, ALTREF_FRAME) == 1) {
cpi->seg0_progress = (cpi->seg0_idx << 16) / cpi->seg0_cnt;
cpi->seg0_progress =
(((mb_col & ~1) * 2 + (mb_row & ~1) * cm->mb_cols) << 16) / cm->MBs;
}
vp9_rd_pick_inter_mode_sb(cpi, x,
recon_yoffset,
recon_uvoffset,
totalrate,
totaldist);
}
/* Restore L & A coding context to those in place on entry */
vpx_memcpy (cm->left_context,
left_context,
sizeof(left_context));
vpx_memcpy (initial_above_context_ptr,
above_context,
sizeof(above_context));
}
#endif
static void encode_sb(VP9_COMP *cpi,
VP9_COMMON *cm,
int mbrow,
int mbcol,
MACROBLOCK *x,
MACROBLOCKD *xd,
TOKENEXTRA **tp) {
int i;
int map_index;
int mb_row, mb_col;
int recon_yoffset, recon_uvoffset;
int ref_fb_idx = cm->lst_fb_idx;
int dst_fb_idx = cm->new_fb_idx;
int recon_y_stride = cm->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = cm->yv12_fb[ref_fb_idx].uv_stride;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
mb_row = mbrow;
mb_col = mbcol;
/* Encode MBs in raster order within the SB */
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
int offset_unextended = dy * cm->mb_cols + dx;
if ((mb_row >= cm->mb_rows) || (mb_col >= cm->mb_cols)) {
// MB lies outside frame, move on
mb_row += dy;
mb_col += dx;
x->src.y_buffer += 16 * (dx + dy * x->src.y_stride);
x->src.u_buffer += 8 * (dx + dy * x->src.uv_stride);
x->src.v_buffer += 8 * (dx + dy * x->src.uv_stride);
x->gf_active_ptr += offset_unextended;
x->partition_info += offset_extended;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
assert((xd->prev_mode_info_context - cpi->common.prev_mip) ==
(xd->mode_info_context - cpi->common.mip));
enc_debug = (cpi->common.current_video_frame == 0 &&
mb_row == 0 && mb_col == 0);
mb_col_debug = mb_col;
mb_row_debug = mb_row;
update_state(cpi, x, &x->mb_context[i]);
map_index = (mb_row * cpi->common.mb_cols) + mb_col;
x->mb_activity_ptr = &cpi->mb_activity_map[map_index];
// reset above block coeffs
xd->above_context = cm->above_context + mb_col;
// Set up distance of MB to edge of the frame in 1/8th pel units
xd->mb_to_top_edge = -((mb_row * 16) << 3);
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_bottom_edge = ((cm->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_right_edge = ((cm->mb_cols - 1 - mb_col) * 16) << 3;
if (xd->mode_info_context->mbmi.encoded_as_sb) {
// Set up limit values for MV components to prevent them from
// extending beyond the UMV borders assuming 32x32 block size
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 32 - VP9_INTERP_EXTEND));
} else {
#endif
// Set up limit values for MV components to prevent them from
// extending beyond the UMV borders assuming 16x16 block size
x->mv_row_min = -((mb_row * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
x->mv_col_min = -((mb_col * 16) + VP9BORDERINPIXELS - VP9_INTERP_EXTEND);
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
(VP9BORDERINPIXELS - 16 - VP9_INTERP_EXTEND));
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = cm->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
vp9_activity_masking(cpi, x);
// Is segmentation enabled
if (xd->segmentation_enabled) {
vp9_mb_init_quantizer(cpi, x);
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb)
#endif
vp9_intra_prediction_down_copy(xd);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb)
encode_intra_super_block(cpi, x, tp, mb_col);
encode_intra_macro_block(cpi, x, tp, 1);
// Note the encoder may have changed the segment_id
} else {
unsigned char *segment_id;
int seg_ref_active;
if (xd->mode_info_context->mbmi.ref_frame) {
unsigned char pred_context;
pred_context = vp9_get_pred_context(cm, xd, PRED_COMP);
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME)
cpi->single_pred_count[pred_context]++;
else
cpi->comp_pred_count[pred_context]++;
}