An error occurred while loading the file. Please try again.
-
Yunqing Wang authored
The code covers both x->skip=0 & x->skip=1 cases. Change-Id: I09745c10e5994dc700ae4c01b4b62979cdaf3306
1bf0beb5
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "vpx_ports/vpx_timer.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_idct.h"
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/encoder/vp9_aq_complexity.h"
#include "vp9/encoder/vp9_aq_cyclicrefresh.h"
#include "vp9/encoder/vp9_aq_variance.h"
#include "vp9/encoder/vp9_encodeframe.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/encoder/vp9_extend.h"
#include "vp9/encoder/vp9_pickmode.h"
#include "vp9/encoder/vp9_rd.h"
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_segmentation.h"
#include "vp9/encoder/vp9_tokenize.h"
#define GF_ZEROMV_ZBIN_BOOST 0
#define LF_ZEROMV_ZBIN_BOOST 0
#define MV_ZBIN_BOOST 0
#define SPLIT_MV_ZBIN_BOOST 0
#define INTRA_ZBIN_BOOST 0
static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t, int output_enabled,
int mi_row, int mi_col, BLOCK_SIZE bsize,
PICK_MODE_CONTEXT *ctx);
// Motion vector component magnitude threshold for defining fast motion.
#define FAST_MOTION_MV_THRESH 24
// This is used as a reference when computing the source variance for the
// purposes of activity masking.
// Eventually this should be replaced by custom no-reference routines,
// which will be faster.
static const uint8_t VP9_VAR_OFFS[64] = {
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
7172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
128, 128, 128, 128, 128, 128, 128, 128
};
static unsigned int get_sby_perpixel_variance(VP9_COMP *cpi,
const struct buf_2d *ref,
BLOCK_SIZE bs) {
unsigned int sse;
const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
VP9_VAR_OFFS, 0, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
static unsigned int get_sby_perpixel_diff_variance(VP9_COMP *cpi,
const struct buf_2d *ref,
int mi_row, int mi_col,
BLOCK_SIZE bs) {
const YV12_BUFFER_CONFIG *last = get_ref_frame_buffer(cpi, LAST_FRAME);
const uint8_t* last_y = &last->y_buffer[mi_row * MI_SIZE * last->y_stride +
mi_col * MI_SIZE];
unsigned int sse;
const unsigned int var = cpi->fn_ptr[bs].vf(ref->buf, ref->stride,
last_y, last->y_stride, &sse);
return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
}
static BLOCK_SIZE get_rd_var_based_fixed_partition(VP9_COMP *cpi,
int mi_row,
int mi_col) {
unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
mi_row, mi_col,
BLOCK_64X64);
if (var < 8)
return BLOCK_64X64;
else if (var < 128)
return BLOCK_32X32;
else if (var < 2048)
return BLOCK_16X16;
else
return BLOCK_8X8;
}
static BLOCK_SIZE get_nonrd_var_based_fixed_partition(VP9_COMP *cpi,
int mi_row,
int mi_col) {
unsigned int var = get_sby_perpixel_diff_variance(cpi, &cpi->mb.plane[0].src,
mi_row, mi_col,
BLOCK_64X64);
if (var < 4)
return BLOCK_64X64;
else if (var < 10)
return BLOCK_32X32;
else
return BLOCK_16X16;
}
// Lighter version of set_offsets that only sets the mode info
// pointers.
static INLINE void set_modeinfo_offsets(VP9_COMMON *const cm,
MACROBLOCKD *const xd,
int mi_row,
int mi_col) {
const int idx_str = xd->mi_stride * mi_row + mi_col;
xd->mi = cm->mi_grid_visible + idx_str;
xd->mi[0] = cm->mi + idx_str;
}
static void set_offsets(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col, BLOCK_SIZE bsize) {
MACROBLOCK *const x = &cpi->mb;
VP9_COMMON *const cm = &cpi->common;
141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
const struct segmentation *const seg = &cm->seg;
set_skip_context(xd, mi_row, mi_col);
set_modeinfo_offsets(cm, xd, mi_row, mi_col);
mbmi = &xd->mi[0]->mbmi;
// Set up destination pointers.
vp9_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
// Set up limit values for MV components.
// Mv beyond the range do not produce new/different prediction block.
x->mv_row_min = -(((mi_row + mi_height) * MI_SIZE) + VP9_INTERP_EXTEND);
x->mv_col_min = -(((mi_col + mi_width) * MI_SIZE) + VP9_INTERP_EXTEND);
x->mv_row_max = (cm->mi_rows - mi_row) * MI_SIZE + VP9_INTERP_EXTEND;
x->mv_col_max = (cm->mi_cols - mi_col) * MI_SIZE + VP9_INTERP_EXTEND;
// Set up distance of MB to edge of frame in 1/8th pel units.
assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1)));
set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width,
cm->mi_rows, cm->mi_cols);
// Set up source buffers.
vp9_setup_src_planes(x, cpi->Source, mi_row, mi_col);
// R/D setup.
x->rddiv = cpi->rd.RDDIV;
x->rdmult = cpi->rd.RDMULT;
// Setup segment ID.
if (seg->enabled) {
if (cpi->oxcf.aq_mode != VARIANCE_AQ) {
const uint8_t *const map = seg->update_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mbmi->segment_id = vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
}
vp9_init_plane_quantizers(cpi, x);
x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
} else {
mbmi->segment_id = 0;
x->encode_breakout = cpi->encode_breakout;
}
}
static void duplicate_mode_info_in_sb(VP9_COMMON *cm, MACROBLOCKD *xd,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
int i, j;
for (j = 0; j < block_height; ++j)
for (i = 0; i < block_width; ++i) {
if (mi_row + j < cm->mi_rows && mi_col + i < cm->mi_cols)
xd->mi[j * xd->mi_stride + i] = xd->mi[0];
}
}
static void set_block_size(VP9_COMP * const cpi,
int mi_row, int mi_col,
BLOCK_SIZE bsize) {
if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
MACROBLOCKD *const xd = &cpi->mb.e_mbd;
set_modeinfo_offsets(&cpi->common, xd, mi_row, mi_col);
xd->mi[0]->mbmi.sb_type = bsize;
211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
duplicate_mode_info_in_sb(&cpi->common, xd, mi_row, mi_col, bsize);
}
}
typedef struct {
int64_t sum_square_error;
int64_t sum_error;
int count;
int variance;
} var;
typedef struct {
var none;
var horz[2];
var vert[2];
} partition_variance;
typedef struct {
partition_variance part_variances;
var split[4];
} v8x8;
typedef struct {
partition_variance part_variances;
v8x8 split[4];
} v16x16;
typedef struct {
partition_variance part_variances;
v16x16 split[4];
} v32x32;
typedef struct {
partition_variance part_variances;
v32x32 split[4];
} v64x64;
typedef struct {
partition_variance *part_variances;
var *split[4];
} variance_node;
typedef enum {
V16X16,
V32X32,
V64X64,
} TREE_LEVEL;
static void tree_to_node(void *data, BLOCK_SIZE bsize, variance_node *node) {
int i;
node->part_variances = NULL;
vpx_memset(node->split, 0, sizeof(node->split));
switch (bsize) {
case BLOCK_64X64: {
v64x64 *vt = (v64x64 *) data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_32X32: {
v32x32 *vt = (v32x32 *) data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_16X16: {
v16x16 *vt = (v16x16 *) data;
node->part_variances = &vt->part_variances;
281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i].part_variances.none;
break;
}
case BLOCK_8X8: {
v8x8 *vt = (v8x8 *) data;
node->part_variances = &vt->part_variances;
for (i = 0; i < 4; i++)
node->split[i] = &vt->split[i];
break;
}
default: {
assert(0);
break;
}
}
}
// Set variance values given sum square error, sum error, count.
static void fill_variance(int64_t s2, int64_t s, int c, var *v) {
v->sum_square_error = s2;
v->sum_error = s;
v->count = c;
if (c > 0)
v->variance = (int)(256 *
(v->sum_square_error - v->sum_error * v->sum_error /
v->count) / v->count);
else
v->variance = 0;
}
void sum_2_variances(const var *a, const var *b, var *r) {
fill_variance(a->sum_square_error + b->sum_square_error,
a->sum_error + b->sum_error, a->count + b->count, r);
}
static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
variance_node node;
tree_to_node(data, bsize, &node);
sum_2_variances(node.split[0], node.split[1], &node.part_variances->horz[0]);
sum_2_variances(node.split[2], node.split[3], &node.part_variances->horz[1]);
sum_2_variances(node.split[0], node.split[2], &node.part_variances->vert[0]);
sum_2_variances(node.split[1], node.split[3], &node.part_variances->vert[1]);
sum_2_variances(&node.part_variances->vert[0], &node.part_variances->vert[1],
&node.part_variances->none);
}
static int set_vt_partitioning(VP9_COMP *cpi,
void *data,
BLOCK_SIZE bsize,
int mi_row,
int mi_col) {
VP9_COMMON * const cm = &cpi->common;
variance_node vt;
const int block_width = num_8x8_blocks_wide_lookup[bsize];
const int block_height = num_8x8_blocks_high_lookup[bsize];
// TODO(debargha): Choose this more intelligently.
const int64_t threshold_multiplier = 25;
int64_t threshold = threshold_multiplier * cpi->common.base_qindex;
assert(block_height == block_width);
tree_to_node(data, bsize, &vt);
// Split none is available only if we have more than half a block size
// in width and height inside the visible image.
if (mi_col + block_width / 2 < cm->mi_cols &&
mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->none.variance < threshold) {
set_block_size(cpi, mi_row, mi_col, bsize);
return 1;
351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
}
// Vertical split is available on all but the bottom border.
if (mi_row + block_height / 2 < cm->mi_rows &&
vt.part_variances->vert[0].variance < threshold &&
vt.part_variances->vert[1].variance < threshold) {
BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_VERT);
set_block_size(cpi, mi_row, mi_col, subsize);
set_block_size(cpi, mi_row, mi_col + block_width / 2, subsize);
return 1;
}
// Horizontal split is available on all but the right border.
if (mi_col + block_width / 2 < cm->mi_cols &&
vt.part_variances->horz[0].variance < threshold &&
vt.part_variances->horz[1].variance < threshold) {
BLOCK_SIZE subsize = get_subsize(bsize, PARTITION_HORZ);
set_block_size(cpi, mi_row, mi_col, subsize);
set_block_size(cpi, mi_row + block_height / 2, mi_col, subsize);
return 1;
}
return 0;
}
// TODO(debargha): Fix this function and make it work as expected.
static void choose_partitioning(VP9_COMP *cpi,
const TileInfo *const tile,
int mi_row, int mi_col) {
VP9_COMMON * const cm = &cpi->common;
MACROBLOCK *x = &cpi->mb;
MACROBLOCKD *xd = &cpi->mb.e_mbd;
int i, j, k;
v64x64 vt;
uint8_t *s;
const uint8_t *d;
int sp;
int dp;
int pixels_wide = 64, pixels_high = 64;
int_mv nearest_mv, near_mv;
const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, LAST_FRAME);
const struct scale_factors *const sf = &cm->frame_refs[LAST_FRAME - 1].sf;
vp9_zero(vt);
set_offsets(cpi, tile, mi_row, mi_col, BLOCK_64X64);
if (xd->mb_to_right_edge < 0)
pixels_wide += (xd->mb_to_right_edge >> 3);
if (xd->mb_to_bottom_edge < 0)
pixels_high += (xd->mb_to_bottom_edge >> 3);
s = x->plane[0].src.buf;
sp = x->plane[0].src.stride;
if (cm->frame_type != KEY_FRAME) {
vp9_setup_pre_planes(xd, 0, yv12, mi_row, mi_col, sf);
xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
xd->mi[0]->mbmi.sb_type = BLOCK_64X64;
vp9_find_best_ref_mvs(xd, cm->allow_high_precision_mv,
xd->mi[0]->mbmi.ref_mvs[LAST_FRAME],
&nearest_mv, &near_mv);
xd->mi[0]->mbmi.mv[0] = nearest_mv;
vp9_build_inter_predictors_sby(xd, mi_row, mi_col, BLOCK_64X64);
d = xd->plane[0].dst.buf;
dp = xd->plane[0].dst.stride;
} else {
d = VP9_VAR_OFFS;
421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
dp = 0;
}
// Fill in the entire tree of 8x8 variances for splits.
for (i = 0; i < 4; i++) {
const int x32_idx = ((i & 1) << 5);
const int y32_idx = ((i >> 1) << 5);
for (j = 0; j < 4; j++) {
const int x16_idx = x32_idx + ((j & 1) << 4);
const int y16_idx = y32_idx + ((j >> 1) << 4);
v16x16 *vst = &vt.split[i].split[j];
for (k = 0; k < 4; k++) {
int x_idx = x16_idx + ((k & 1) << 3);
int y_idx = y16_idx + ((k >> 1) << 3);
unsigned int sse = 0;
int sum = 0;
if (x_idx < pixels_wide && y_idx < pixels_high)
vp9_get8x8var(s + y_idx * sp + x_idx, sp,
d + y_idx * dp + x_idx, dp, &sse, &sum);
fill_variance(sse, sum, 64, &vst->split[k].part_variances.none);
}
}
}
// Fill the rest of the variance tree by summing split partition values.
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
fill_variance_tree(&vt.split[i].split[j], BLOCK_16X16);
}
fill_variance_tree(&vt.split[i], BLOCK_32X32);
}
fill_variance_tree(&vt, BLOCK_64X64);
// Now go through the entire structure, splitting every block size until
// we get to one that's got a variance lower than our threshold, or we
// hit 8x8.
if (!set_vt_partitioning(cpi, &vt, BLOCK_64X64,
mi_row, mi_col)) {
for (i = 0; i < 4; ++i) {
const int x32_idx = ((i & 1) << 2);
const int y32_idx = ((i >> 1) << 2);
if (!set_vt_partitioning(cpi, &vt.split[i], BLOCK_32X32,
(mi_row + y32_idx), (mi_col + x32_idx))) {
for (j = 0; j < 4; ++j) {
const int x16_idx = ((j & 1) << 1);
const int y16_idx = ((j >> 1) << 1);
// NOTE: This is a temporary hack to disable 8x8 partitions,
// since it works really bad - possibly due to a bug
#define DISABLE_8X8_VAR_BASED_PARTITION
#ifdef DISABLE_8X8_VAR_BASED_PARTITION
if (mi_row + y32_idx + y16_idx + 1 < cm->mi_rows &&
mi_row + x32_idx + x16_idx + 1 < cm->mi_cols) {
set_block_size(cpi,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx),
BLOCK_16X16);
} else {
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cpi,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
BLOCK_8X8);
}
}
#else
if (!set_vt_partitioning(cpi, &vt.split[i].split[j], tile,
BLOCK_16X16,
(mi_row + y32_idx + y16_idx),
(mi_col + x32_idx + x16_idx), 2)) {
491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
for (k = 0; k < 4; ++k) {
const int x8_idx = (k & 1);
const int y8_idx = (k >> 1);
set_block_size(cpi,
(mi_row + y32_idx + y16_idx + y8_idx),
(mi_col + x32_idx + x16_idx + x8_idx),
BLOCK_8X8);
}
}
#endif
}
}
}
}
}
static void update_state(VP9_COMP *cpi, PICK_MODE_CONTEXT *ctx,
int mi_row, int mi_col, BLOCK_SIZE bsize,
int output_enabled) {
int i, x_idx, y;
VP9_COMMON *const cm = &cpi->common;
RD_OPT *const rd_opt = &cpi->rd;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
struct macroblock_plane *const p = x->plane;
struct macroblockd_plane *const pd = xd->plane;
MODE_INFO *mi = &ctx->mic;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
MODE_INFO *mi_addr = xd->mi[0];
const struct segmentation *const seg = &cm->seg;
const int mis = cm->mi_stride;
const int mi_width = num_8x8_blocks_wide_lookup[bsize];
const int mi_height = num_8x8_blocks_high_lookup[bsize];
int max_plane;
assert(mi->mbmi.sb_type == bsize);
*mi_addr = *mi;
// If segmentation in use
if (seg->enabled && output_enabled) {
// For in frame complexity AQ copy the segment id from the segment map.
if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
const uint8_t *const map = seg->update_map ? cpi->segmentation_map
: cm->last_frame_seg_map;
mi_addr->mbmi.segment_id =
vp9_get_segment_id(cm, map, bsize, mi_row, mi_col);
}
// Else for cyclic refresh mode update the segment map, set the segment id
// and then update the quantizer.
if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
vp9_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi,
mi_row, mi_col, bsize, 1);
}
}
max_plane = is_inter_block(mbmi) ? MAX_MB_PLANE : 1;
for (i = 0; i < max_plane; ++i) {
p[i].coeff = ctx->coeff_pbuf[i][1];
p[i].qcoeff = ctx->qcoeff_pbuf[i][1];
pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][1];
p[i].eobs = ctx->eobs_pbuf[i][1];
}
for (i = max_plane; i < MAX_MB_PLANE; ++i) {
p[i].coeff = ctx->coeff_pbuf[i][2];
p[i].qcoeff = ctx->qcoeff_pbuf[i][2];
pd[i].dqcoeff = ctx->dqcoeff_pbuf[i][2];
p[i].eobs = ctx->eobs_pbuf[i][2];
561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
}
// Restore the coding context of the MB to that that was in place
// when the mode was picked for it
for (y = 0; y < mi_height; y++)
for (x_idx = 0; x_idx < mi_width; x_idx++)
if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx
&& (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) {
xd->mi[x_idx + y * mis] = mi_addr;
}
if (cpi->oxcf.aq_mode)
vp9_init_plane_quantizers(cpi, x);
// FIXME(rbultje) I'm pretty sure this should go to the end of this block
// (i.e. after the output_enabled)
if (bsize < BLOCK_32X32) {
if (bsize < BLOCK_16X16)
ctx->tx_rd_diff[ALLOW_16X16] = ctx->tx_rd_diff[ALLOW_8X8];
ctx->tx_rd_diff[ALLOW_32X32] = ctx->tx_rd_diff[ALLOW_16X16];
}
if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int;
}
x->skip = ctx->skip;
vpx_memcpy(x->zcoeff_blk[mbmi->tx_size], ctx->zcoeff_blk,
sizeof(uint8_t) * ctx->num_4x4_blk);
if (!output_enabled)
return;
if (!vp9_segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
for (i = 0; i < TX_MODES; i++)
rd_opt->tx_select_diff[i] += ctx->tx_rd_diff[i];
}
#if CONFIG_INTERNAL_STATS
if (frame_is_intra_only(cm)) {
static const int kf_mode_index[] = {
THR_DC /*DC_PRED*/,
THR_V_PRED /*V_PRED*/,
THR_H_PRED /*H_PRED*/,
THR_D45_PRED /*D45_PRED*/,
THR_D135_PRED /*D135_PRED*/,
THR_D117_PRED /*D117_PRED*/,
THR_D153_PRED /*D153_PRED*/,
THR_D207_PRED /*D207_PRED*/,
THR_D63_PRED /*D63_PRED*/,
THR_TM /*TM_PRED*/,
};
++cpi->mode_chosen_counts[kf_mode_index[mbmi->mode]];
} else {
// Note how often each mode chosen as best
++cpi->mode_chosen_counts[ctx->best_mode_index];
}
#endif
if (!frame_is_intra_only(cm)) {
if (is_inter_block(mbmi)) {
vp9_update_mv_count(cm, xd);
if (cm->interp_filter == SWITCHABLE) {
const int ctx = vp9_get_pred_context_switchable_interp(xd);
++cm->counts.switchable_interp[ctx][mbmi->interp_filter];
}
}
rd_opt->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff;
631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
rd_opt->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff;
rd_opt->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff;
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
rd_opt->filter_diff[i] += ctx->best_filter_diff[i];
}
}
void vp9_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
int mi_row, int mi_col) {
uint8_t *const buffers[3] = {src->y_buffer, src->u_buffer, src->v_buffer };
const int strides[3] = {src->y_stride, src->uv_stride, src->uv_stride };
int i;
// Set current frame pointer.
x->e_mbd.cur_buf = src;
for (i = 0; i < MAX_MB_PLANE; i++)
setup_pred_plane(&x->plane[i].src, buffers[i], strides[i], mi_row, mi_col,
NULL, x->e_mbd.plane[i].subsampling_x,
x->e_mbd.plane[i].subsampling_y);
}
static void set_mode_info_seg_skip(MACROBLOCK *x, TX_MODE tx_mode, int *rate,
int64_t *dist, BLOCK_SIZE bsize) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
INTERP_FILTER filter_ref;
if (xd->up_available)
filter_ref = xd->mi[-xd->mi_stride]->mbmi.interp_filter;
else if (xd->left_available)
filter_ref = xd->mi[-1]->mbmi.interp_filter;
else
filter_ref = EIGHTTAP;
mbmi->sb_type = bsize;
mbmi->mode = ZEROMV;
mbmi->tx_size = MIN(max_txsize_lookup[bsize],
tx_mode_to_biggest_tx_size[tx_mode]);
mbmi->skip = 1;
mbmi->uv_mode = DC_PRED;
mbmi->ref_frame[0] = LAST_FRAME;
mbmi->ref_frame[1] = NONE;
mbmi->mv[0].as_int = 0;
mbmi->interp_filter = filter_ref;
xd->mi[0]->bmi[0].as_mv[0].as_int = 0;
x->skip = 1;
*rate = 0;
*dist = 0;
}
static void rd_pick_sb_modes(VP9_COMP *cpi, const TileInfo *const tile,
int mi_row, int mi_col,
int *totalrate, int64_t *totaldist,
BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
int64_t best_rd, int block) {
VP9_COMMON *const cm = &cpi->common;
MACROBLOCK *const x = &cpi->mb;
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *mbmi;
struct macroblock_plane *const p = x->plane;
struct macroblockd_plane *const pd = xd->plane;
const AQ_MODE aq_mode = cpi->oxcf.aq_mode;
int i, orig_rdmult;
double rdmult_ratio;
vp9_clear_system_state();