vp9_rdopt.c 154.48 KiB
/*
 *  Copyright (c) 2010 The WebM project authors. All Rights Reserved.
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */
#include <stdio.h>
#include <math.h>
#include <limits.h>
#include <assert.h>
#include "vp9/common/vp9_pragmas.h"
#include "vp9/encoder/vp9_tokenize.h"
#include "vp9/encoder/vp9_treewriter.h"
#include "vp9/encoder/vp9_onyx_int.h"
#include "vp9/encoder/vp9_modecosts.h"
#include "vp9/encoder/vp9_encodeintra.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_findnearmv.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/encoder/vp9_encodemb.h"
#include "vp9/encoder/vp9_quantize.h"
#include "vp9/encoder/vp9_variance.h"
#include "vp9/encoder/vp9_mcomp.h"
#include "vp9/encoder/vp9_rdopt.h"
#include "vp9/encoder/vp9_ratectrl.h"
#include "vpx_mem/vpx_mem.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/encoder/vp9_encodemv.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_entropy.h"
#include "vp9_rtcd.h"
#include "vp9/common/vp9_mvref_common.h"
#include "vp9/common/vp9_common.h"
#define INVALID_MV 0x80008000
/* Factor to weigh the rate for switchable interp filters */
#define SWITCHABLE_INTERP_RATE_FACTOR 1
DECLARE_ALIGNED(16, extern const uint8_t,
                vp9_pt_energy_class[MAX_ENTROPY_TOKENS]);
#define I4X4_PRED 0x8000
#define SPLITMV 0x10000
const MODE_DEFINITION vp9_mode_order[MAX_MODES] = {
  {NEARESTMV, LAST_FRAME,   NONE},
  {NEARESTMV, ALTREF_FRAME, NONE},
  {NEARESTMV, GOLDEN_FRAME, NONE},
  {NEWMV,     LAST_FRAME,   NONE},
  {NEARESTMV, LAST_FRAME,   ALTREF_FRAME},
  {NEARMV,    LAST_FRAME,   NONE},
  {NEARESTMV, GOLDEN_FRAME, ALTREF_FRAME},
  {DC_PRED,   INTRA_FRAME,  NONE},
  {NEWMV,     GOLDEN_FRAME, NONE},
  {NEWMV,     ALTREF_FRAME, NONE},
  {NEARMV,    ALTREF_FRAME, NONE},
  {TM_PRED,   INTRA_FRAME,  NONE},
7172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
{NEARMV, LAST_FRAME, ALTREF_FRAME}, {NEWMV, LAST_FRAME, ALTREF_FRAME}, {NEARMV, GOLDEN_FRAME, NONE}, {NEARMV, GOLDEN_FRAME, ALTREF_FRAME}, {NEWMV, GOLDEN_FRAME, ALTREF_FRAME}, {SPLITMV, LAST_FRAME, NONE}, {SPLITMV, GOLDEN_FRAME, NONE}, {SPLITMV, ALTREF_FRAME, NONE}, {SPLITMV, LAST_FRAME, ALTREF_FRAME}, {SPLITMV, GOLDEN_FRAME, ALTREF_FRAME}, {ZEROMV, LAST_FRAME, NONE}, {ZEROMV, GOLDEN_FRAME, NONE}, {ZEROMV, ALTREF_FRAME, NONE}, {ZEROMV, LAST_FRAME, ALTREF_FRAME}, {ZEROMV, GOLDEN_FRAME, ALTREF_FRAME}, {I4X4_PRED, INTRA_FRAME, NONE}, {H_PRED, INTRA_FRAME, NONE}, {V_PRED, INTRA_FRAME, NONE}, {D135_PRED, INTRA_FRAME, NONE}, {D27_PRED, INTRA_FRAME, NONE}, {D153_PRED, INTRA_FRAME, NONE}, {D63_PRED, INTRA_FRAME, NONE}, {D117_PRED, INTRA_FRAME, NONE}, {D45_PRED, INTRA_FRAME, NONE}, }; // The baseline rd thresholds for breaking out of the rd loop for // certain modes are assumed to be based on 8x8 blocks. // This table is used to correct for blocks size. // The factors here are << 2 (2 = x0.5, 32 = x8 etc). static int rd_thresh_block_size_factor[BLOCK_SIZE_TYPES] = {2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32}; #define BASE_RD_THRESH_FREQ_FACT 16 #define MAX_RD_THRESH_FREQ_FACT 32 #define MAX_RD_THRESH_FREQ_INC 1 static void fill_token_costs(vp9_coeff_cost *c, vp9_coeff_probs_model (*p)[BLOCK_TYPES]) { int i, j, k, l; TX_SIZE t; for (t = TX_4X4; t <= TX_32X32; t++) for (i = 0; i < BLOCK_TYPES; i++) for (j = 0; j < REF_TYPES; j++) for (k = 0; k < COEF_BANDS; k++) for (l = 0; l < PREV_COEF_CONTEXTS; l++) { vp9_prob probs[ENTROPY_NODES]; vp9_model_to_full_probs(p[t][i][j][k][l], probs); vp9_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp9_coef_tree); vp9_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs, vp9_coef_tree); assert(c[t][i][j][k][0][l][DCT_EOB_TOKEN] == c[t][i][j][k][1][l][DCT_EOB_TOKEN]); } } static const int rd_iifactor[32] = { 4, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; // 3* dc_qlookup[Q]*dc_qlookup[Q];
141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210
/* values are now correlated to quantizer */ static int sad_per_bit16lut[QINDEX_RANGE]; static int sad_per_bit4lut[QINDEX_RANGE]; void vp9_init_me_luts() { int i; // Initialize the sad lut tables using a formulaic calculation for now // This is to make it easier to resolve the impact of experimental changes // to the quantizer tables. for (i = 0; i < QINDEX_RANGE; i++) { sad_per_bit16lut[i] = (int)((0.0418 * vp9_convert_qindex_to_q(i)) + 2.4107); sad_per_bit4lut[i] = (int)(0.063 * vp9_convert_qindex_to_q(i) + 2.742); } } static int compute_rd_mult(int qindex) { const int q = vp9_dc_quant(qindex, 0); return (11 * q * q) >> 2; } void vp9_initialize_me_consts(VP9_COMP *cpi, int qindex) { cpi->mb.sadperbit16 = sad_per_bit16lut[qindex]; cpi->mb.sadperbit4 = sad_per_bit4lut[qindex]; } void vp9_initialize_rd_consts(VP9_COMP *cpi, int qindex) { int q, i, bsize; vp9_clear_system_state(); // __asm emms; // Further tests required to see if optimum is different // for key frames, golden frames and arf frames. // if (cpi->common.refresh_golden_frame || // cpi->common.refresh_alt_ref_frame) qindex = clamp(qindex, 0, MAXQ); cpi->RDMULT = compute_rd_mult(qindex); if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME)) { if (cpi->twopass.next_iiratio > 31) cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4; else cpi->RDMULT += (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4; } cpi->mb.errorperbit = cpi->RDMULT >> 6; cpi->mb.errorperbit += (cpi->mb.errorperbit == 0); vp9_set_speed_features(cpi); q = (int)pow(vp9_dc_quant(qindex, 0) >> 2, 1.25); q <<= 2; if (q < 8) q = 8; if (cpi->RDMULT > 1000) { cpi->RDDIV = 1; cpi->RDMULT /= 100; for (bsize = 0; bsize < BLOCK_SIZE_TYPES; ++bsize) { for (i = 0; i < MAX_MODES; ++i) { // Threshold here seem unecessarily harsh but fine given actual // range of values used for cpi->sf.thresh_mult[] int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]); // *4 relates to the scaling of rd_thresh_block_size_factor[] if ((int64_t)cpi->sf.thresh_mult[i] < thresh_max) { cpi->rd_threshes[bsize][i] =
211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
cpi->sf.thresh_mult[i] * q * rd_thresh_block_size_factor[bsize] / (4 * 100); } else { cpi->rd_threshes[bsize][i] = INT_MAX; } cpi->rd_baseline_thresh[bsize][i] = cpi->rd_threshes[bsize][i]; if (cpi->sf.adaptive_rd_thresh) cpi->rd_thresh_freq_fact[bsize][i] = MAX_RD_THRESH_FREQ_FACT; else cpi->rd_thresh_freq_fact[bsize][i] = BASE_RD_THRESH_FREQ_FACT; } } } else { cpi->RDDIV = 100; for (bsize = 0; bsize < BLOCK_SIZE_TYPES; ++bsize) { for (i = 0; i < MAX_MODES; i++) { // Threshold here seem unecessarily harsh but fine given actual // range of values used for cpi->sf.thresh_mult[] int thresh_max = INT_MAX / (q * rd_thresh_block_size_factor[bsize]); if (cpi->sf.thresh_mult[i] < thresh_max) { cpi->rd_threshes[bsize][i] = cpi->sf.thresh_mult[i] * q * rd_thresh_block_size_factor[bsize] / 4; } else { cpi->rd_threshes[bsize][i] = INT_MAX; } cpi->rd_baseline_thresh[bsize][i] = cpi->rd_threshes[bsize][i]; if (cpi->sf.adaptive_rd_thresh) cpi->rd_thresh_freq_fact[bsize][i] = MAX_RD_THRESH_FREQ_FACT; else cpi->rd_thresh_freq_fact[bsize][i] = BASE_RD_THRESH_FREQ_FACT; } } } fill_token_costs(cpi->mb.token_costs, cpi->common.fc.coef_probs); for (i = 0; i < NUM_PARTITION_CONTEXTS; i++) vp9_cost_tokens(cpi->mb.partition_cost[i], cpi->common.fc.partition_prob[cpi->common.frame_type][i], vp9_partition_tree); /*rough estimate for costing*/ vp9_init_mode_costs(cpi); if (cpi->common.frame_type != KEY_FRAME) { vp9_build_nmv_cost_table( cpi->mb.nmvjointcost, cpi->mb.e_mbd.allow_high_precision_mv ? cpi->mb.nmvcost_hp : cpi->mb.nmvcost, &cpi->common.fc.nmvc, cpi->mb.e_mbd.allow_high_precision_mv, 1, 1); for (i = 0; i < INTER_MODE_CONTEXTS; i++) { MB_PREDICTION_MODE m; for (m = NEARESTMV; m < MB_MODE_COUNT; m++) cpi->mb.inter_mode_cost[i][m - NEARESTMV] = cost_token(vp9_inter_mode_tree, cpi->common.fc.inter_mode_probs[i], vp9_inter_mode_encodings - NEARESTMV + m); } } } static INLINE BLOCK_SIZE_TYPE get_block_size(int bwl, int bhl) {
281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
return bsize_from_dim_lookup[bwl][bhl]; } static BLOCK_SIZE_TYPE get_plane_block_size(BLOCK_SIZE_TYPE bsize, struct macroblockd_plane *pd) { return get_block_size(plane_block_width_log2by4(bsize, pd), plane_block_height_log2by4(bsize, pd)); } static INLINE void linear_interpolate2(double x, int ntab, int inv_step, const double *tab1, const double *tab2, double *v1, double *v2) { double y = x * inv_step; int d = (int) y; if (d >= ntab - 1) { *v1 = tab1[ntab - 1]; *v2 = tab2[ntab - 1]; } else { double a = y - d; *v1 = tab1[d] * (1 - a) + tab1[d + 1] * a; *v2 = tab2[d] * (1 - a) + tab2[d + 1] * a; } } static void model_rd_norm(double x, double *R, double *D) { static const int inv_tab_step = 8; static const int tab_size = 120; // NOTE: The tables below must be of the same size // // Normalized rate // This table models the rate for a Laplacian source // source with given variance when quantized with a uniform quantizer // with given stepsize. The closed form expression is: // Rn(x) = H(sqrt(r)) + sqrt(r)*[1 + H(r)/(1 - r)], // where r = exp(-sqrt(2) * x) and x = qpstep / sqrt(variance), // and H(x) is the binary entropy function. static const double rate_tab[] = { 64.00, 4.944, 3.949, 3.372, 2.966, 2.655, 2.403, 2.194, 2.014, 1.858, 1.720, 1.596, 1.485, 1.384, 1.291, 1.206, 1.127, 1.054, 0.986, 0.923, 0.863, 0.808, 0.756, 0.708, 0.662, 0.619, 0.579, 0.541, 0.506, 0.473, 0.442, 0.412, 0.385, 0.359, 0.335, 0.313, 0.291, 0.272, 0.253, 0.236, 0.220, 0.204, 0.190, 0.177, 0.165, 0.153, 0.142, 0.132, 0.123, 0.114, 0.106, 0.099, 0.091, 0.085, 0.079, 0.073, 0.068, 0.063, 0.058, 0.054, 0.050, 0.047, 0.043, 0.040, 0.037, 0.034, 0.032, 0.029, 0.027, 0.025, 0.023, 0.022, 0.020, 0.019, 0.017, 0.016, 0.015, 0.014, 0.013, 0.012, 0.011, 0.010, 0.009, 0.008, 0.008, 0.007, 0.007, 0.006, 0.006, 0.005, 0.005, 0.005, 0.004, 0.004, 0.004, 0.003, 0.003, 0.003, 0.003, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.000, }; // Normalized distortion // This table models the normalized distortion for a Laplacian source // source with given variance when quantized with a uniform quantizer // with given stepsize. The closed form expression is: // Dn(x) = 1 - 1/sqrt(2) * x / sinh(x/sqrt(2)) // where x = qpstep / sqrt(variance) // Note the actual distortion is Dn * variance. static const double dist_tab[] = { 0.000, 0.001, 0.005, 0.012, 0.021, 0.032, 0.045, 0.061, 0.079, 0.098, 0.119, 0.142, 0.166, 0.190, 0.216, 0.242, 0.269, 0.296, 0.324, 0.351, 0.378, 0.405, 0.432, 0.458, 0.484, 0.509, 0.534, 0.557, 0.580, 0.603, 0.624, 0.645, 0.664, 0.683, 0.702, 0.719, 0.735, 0.751, 0.766, 0.780, 0.794, 0.807, 0.819, 0.830, 0.841, 0.851, 0.861, 0.870, 0.878, 0.886, 0.894, 0.901, 0.907, 0.913, 0.919, 0.925, 0.930, 0.935, 0.939, 0.943, 0.947, 0.951, 0.954, 0.957, 0.960, 0.963, 0.966, 0.968, 0.971, 0.973, 0.975, 0.976,
351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
0.978, 0.980, 0.981, 0.982, 0.984, 0.985, 0.986, 0.987, 0.988, 0.989, 0.990, 0.990, 0.991, 0.992, 0.992, 0.993, 0.993, 0.994, 0.994, 0.995, 0.995, 0.996, 0.996, 0.996, 0.996, 0.997, 0.997, 0.997, 0.997, 0.998, 0.998, 0.998, 0.998, 0.998, 0.998, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 1.000, }; /* assert(sizeof(rate_tab) == tab_size * sizeof(rate_tab[0]); assert(sizeof(dist_tab) == tab_size * sizeof(dist_tab[0]); assert(sizeof(rate_tab) == sizeof(dist_tab)); */ assert(x >= 0.0); linear_interpolate2(x, tab_size, inv_tab_step, rate_tab, dist_tab, R, D); } static void model_rd_from_var_lapndz(int var, int n, int qstep, int *rate, int64_t *dist) { // This function models the rate and distortion for a Laplacian // source with given variance when quantized with a uniform quantizer // with given stepsize. The closed form expressions are in: // Hang and Chen, "Source Model for transform video coder and its // application - Part I: Fundamental Theory", IEEE Trans. Circ. // Sys. for Video Tech., April 1997. vp9_clear_system_state(); if (var == 0 || n == 0) { *rate = 0; *dist = 0; } else { double D, R; double s2 = (double) var / n; double x = qstep / sqrt(s2); model_rd_norm(x, &R, &D); *rate = ((n << 8) * R + 0.5); *dist = (var * D + 0.5); } vp9_clear_system_state(); } static void model_rd_for_sb(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, MACROBLOCK *x, MACROBLOCKD *xd, int *out_rate_sum, int64_t *out_dist_sum) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. int i, rate_sum = 0, dist_sum = 0; for (i = 0; i < MAX_MB_PLANE; ++i) { struct macroblock_plane *const p = &x->plane[i]; struct macroblockd_plane *const pd = &xd->plane[i]; // TODO(dkovalev) the same code in get_plane_block_size const int bwl = plane_block_width_log2by4(bsize, pd); const int bhl = plane_block_height_log2by4(bsize, pd); const BLOCK_SIZE_TYPE bs = get_block_size(bwl, bhl); unsigned int sse; int rate; int64_t dist; (void) cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse); // sse works better than var, since there is no dc prediction used model_rd_from_var_lapndz(sse, 16 << (bwl + bhl), pd->dequant[1] >> 3, &rate, &dist); rate_sum += rate; dist_sum += dist; } *out_rate_sum = rate_sum;
421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
*out_dist_sum = dist_sum << 4; } static void model_rd_for_sb_y(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, MACROBLOCK *x, MACROBLOCKD *xd, int *out_rate_sum, int64_t *out_dist_sum) { // Note our transform coeffs are 8 times an orthogonal transform. // Hence quantizer step is also 8 times. To get effective quantizer // we need to divide by 8 before sending to modeling function. struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &xd->plane[0]; // TODO(dkovalev) the same code in get_plane_block_size const int bwl = plane_block_width_log2by4(bsize, pd); const int bhl = plane_block_height_log2by4(bsize, pd); const BLOCK_SIZE_TYPE bs = get_block_size(bwl, bhl); unsigned int sse; int rate; int64_t dist; (void) cpi->fn_ptr[bs].vf(p->src.buf, p->src.stride, pd->dst.buf, pd->dst.stride, &sse); // sse works better than var, since there is no dc prediction used model_rd_from_var_lapndz(sse, 16 << (bwl + bhl), pd->dequant[1] >> 3, &rate, &dist); *out_rate_sum = rate; *out_dist_sum = dist << 4; } static void model_rd_for_sb_y_tx(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size, MACROBLOCK *x, MACROBLOCKD *xd, int *out_rate_sum, int64_t *out_dist_sum, int *out_skip) { int t = 4, j, k; BLOCK_SIZE_TYPE bs = BLOCK_SIZE_AB4X4; struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &xd->plane[0]; const int width = plane_block_width(bsize, pd); const int height = plane_block_height(bsize, pd); int rate_sum = 0; int64_t dist_sum = 0; if (tx_size == TX_4X4) { bs = BLOCK_4X4; t = 4; } else if (tx_size == TX_8X8) { bs = BLOCK_8X8; t = 8; } else if (tx_size == TX_16X16) { bs = BLOCK_16X16; t = 16; } else if (tx_size == TX_32X32) { bs = BLOCK_32X32; t = 32; } else { assert(0); } *out_skip = 1; for (j = 0; j < height; j += t) { for (k = 0; k < width; k += t) { int rate; int64_t dist; unsigned int sse; (void) cpi->fn_ptr[bs].vf(p->src.buf + j * p->src.stride + k, p->src.stride, pd->dst.buf + j * pd->dst.stride + k, pd->dst.stride, &sse); // sse works better than var, since there is no dc prediction used model_rd_from_var_lapndz(sse, t * t, pd->dequant[1] >> 3,
491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
&rate, &dist); rate_sum += rate; dist_sum += dist; *out_skip &= (rate < 1024); } } *out_rate_sum = rate_sum; *out_dist_sum = (dist_sum << 4); } int64_t vp9_block_error_c(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size, int64_t *ssz) { int i; int64_t error = 0, sqcoeff = 0; for (i = 0; i < block_size; i++) { int this_diff = coeff[i] - dqcoeff[i]; error += (unsigned)this_diff * this_diff; sqcoeff += (unsigned) coeff[i] * coeff[i]; } *ssz = sqcoeff; return error; } /* The trailing '0' is a terminator which is used inside cost_coeffs() to * decide whether to include cost of a trailing EOB node or not (i.e. we * can skip this if the last coefficient in this transform block, e.g. the * 16th coefficient in a 4x4 block or the 64th coefficient in a 8x8 block, * were non-zero). */ static const int16_t band_counts[TX_SIZES][8] = { { 1, 2, 3, 4, 3, 16 - 13, 0 }, { 1, 2, 3, 4, 11, 64 - 21, 0 }, { 1, 2, 3, 4, 11, 256 - 21, 0 }, { 1, 2, 3, 4, 11, 1024 - 21, 0 }, }; static INLINE int cost_coeffs(MACROBLOCK *mb, int plane, int block, PLANE_TYPE type, ENTROPY_CONTEXT *A, ENTROPY_CONTEXT *L, TX_SIZE tx_size, const int16_t *scan, const int16_t *nb) { MACROBLOCKD *const xd = &mb->e_mbd; MB_MODE_INFO *mbmi = &xd->mode_info_context->mbmi; int pt, c, cost; const int16_t *band_count = &band_counts[tx_size][1]; const int eob = xd->plane[plane].eobs[block]; const int16_t *qcoeff_ptr = BLOCK_OFFSET(xd->plane[plane].qcoeff, block, 16); const int ref = mbmi->ref_frame[0] != INTRA_FRAME; unsigned int (*token_costs)[2][PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS] = mb->token_costs[tx_size][type][ref]; ENTROPY_CONTEXT above_ec = !!*A, left_ec = !!*L; uint8_t token_cache[1024]; // Check for consistency of tx_size with mode info assert((!type && !plane) || (type && plane)); if (type == PLANE_TYPE_Y_WITH_DC) { assert(xd->mode_info_context->mbmi.txfm_size == tx_size); } else { assert(tx_size == get_uv_tx_size(mbmi)); } pt = combine_entropy_contexts(above_ec, left_ec); if (eob == 0) { // single eob token cost = token_costs[0][0][pt][DCT_EOB_TOKEN]; c = 0; } else { int v, prev_t, band_left = *band_count++;
561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
// dc token v = qcoeff_ptr[0]; prev_t = vp9_dct_value_tokens_ptr[v].token; cost = (*token_costs)[0][pt][prev_t] + vp9_dct_value_cost_ptr[v]; token_cache[0] = vp9_pt_energy_class[prev_t]; ++token_costs; // ac tokens for (c = 1; c < eob; c++) { const int rc = scan[c]; int t; v = qcoeff_ptr[rc]; t = vp9_dct_value_tokens_ptr[v].token; pt = get_coef_context(nb, token_cache, c); cost += (*token_costs)[!prev_t][pt][t] + vp9_dct_value_cost_ptr[v]; token_cache[rc] = vp9_pt_energy_class[t]; prev_t = t; if (!--band_left) { band_left = *band_count++; ++token_costs; } } // eob token if (band_left) { pt = get_coef_context(nb, token_cache, c); cost += (*token_costs)[0][pt][DCT_EOB_TOKEN]; } } // is eob first coefficient; *A = *L = c > 0; return cost; } struct rdcost_block_args { VP9_COMMON *cm; MACROBLOCK *x; ENTROPY_CONTEXT t_above[16]; ENTROPY_CONTEXT t_left[16]; TX_SIZE tx_size; int bw; int bh; int rate; int64_t dist; int64_t sse; int64_t best_rd; int skip; const int16_t *scan, *nb; }; static void dist_block(int plane, int block, BLOCK_SIZE_TYPE bsize, int ss_txfrm_size, void *arg) { struct rdcost_block_args* args = arg; MACROBLOCK* const x = args->x; MACROBLOCKD* const xd = &x->e_mbd; struct macroblock_plane *const p = &x->plane[0]; struct macroblockd_plane *const pd = &xd->plane[0]; int64_t this_sse; int shift = args->tx_size == TX_32X32 ? 0 : 2; int16_t *const coeff = BLOCK_OFFSET(p->coeff, block, 16); int16_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block, 16); args->dist += vp9_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift; args->sse += this_sse >> shift; if (x->skip_encode &&
631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) { // TODO(jingning): tune the model to better capture the distortion. int64_t p = (pd->dequant[1] * pd->dequant[1] * (1 << ss_txfrm_size)) >> shift; args->dist += p; args->sse += p; } } static void rate_block(int plane, int block, BLOCK_SIZE_TYPE bsize, int ss_txfrm_size, void *arg) { struct rdcost_block_args* args = arg; int x_idx, y_idx; MACROBLOCKD * const xd = &args->x->e_mbd; txfrm_block_to_raster_xy(xd, bsize, plane, block, args->tx_size * 2, &x_idx, &y_idx); args->rate += cost_coeffs(args->x, plane, block, xd->plane[plane].plane_type, args->t_above + x_idx, args->t_left + y_idx, args->tx_size, args->scan, args->nb); } // FIXME(jingning): need to make the rd test of chroma components consistent // with that of luma component. this function should be deprecated afterwards. static int rdcost_plane(VP9_COMMON * const cm, MACROBLOCK *x, int plane, BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) { MACROBLOCKD * const xd = &x->e_mbd; const int bwl = plane_block_width_log2by4(bsize, &xd->plane[plane]); const int bhl = plane_block_height_log2by4(bsize, &xd->plane[plane]); const int bw = 1 << bwl, bh = 1 << bhl; int i; struct rdcost_block_args args = { cm, x, { 0 }, { 0 }, tx_size, bw, bh, 0, 0, 0, INT64_MAX, 0 }; switch (tx_size) { case TX_4X4: vpx_memcpy(&args.t_above, xd->plane[plane].above_context, sizeof(ENTROPY_CONTEXT) * bw); vpx_memcpy(&args.t_left, xd->plane[plane].left_context, sizeof(ENTROPY_CONTEXT) * bh); args.scan = vp9_default_scan_4x4; args.nb = vp9_default_scan_4x4_neighbors; break; case TX_8X8: for (i = 0; i < bw; i += 2) args.t_above[i] = !!*(uint16_t *)&xd->plane[plane].above_context[i]; for (i = 0; i < bh; i += 2) args.t_left[i] = !!*(uint16_t *)&xd->plane[plane].left_context[i]; args.scan = vp9_default_scan_8x8; args.nb = vp9_default_scan_8x8_neighbors; break; case TX_16X16: for (i = 0; i < bw; i += 4) args.t_above[i] = !!*(uint32_t *)&xd->plane[plane].above_context[i]; for (i = 0; i < bh; i += 4) args.t_left[i] = !!*(uint32_t *)&xd->plane[plane].left_context[i]; args.scan = vp9_default_scan_16x16; args.nb = vp9_default_scan_16x16_neighbors; break; case TX_32X32: for (i = 0; i < bw; i += 8) args.t_above[i] = !!*(uint64_t *)&xd->plane[plane].above_context[i]; for (i = 0; i < bh; i += 8) args.t_left[i] = !!*(uint64_t *)&xd->plane[plane].left_context[i]; args.scan = vp9_default_scan_32x32; args.nb = vp9_default_scan_32x32_neighbors; break; default:
701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
assert(0); } foreach_transformed_block_in_plane(xd, bsize, plane, rate_block, &args); return args.rate; } static int rdcost_uv(VP9_COMMON *const cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) { int cost = 0, plane; for (plane = 1; plane < MAX_MB_PLANE; plane++) { cost += rdcost_plane(cm, x, plane, bsize, tx_size); } return cost; } static int block_error_sby(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int shift, int64_t *sse) { struct macroblockd_plane *p = &x->e_mbd.plane[0]; const int bwl = plane_block_width_log2by4(bsize, p); const int bhl = plane_block_height_log2by4(bsize, p); int64_t e = vp9_block_error(x->plane[0].coeff, x->e_mbd.plane[0].dqcoeff, 16 << (bwl + bhl), sse) >> shift; *sse >>= shift; return e; } static int64_t block_error_sbuv(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int shift, int64_t *sse) { int64_t sum = 0, this_sse; int plane; *sse = 0; for (plane = 1; plane < MAX_MB_PLANE; plane++) { struct macroblockd_plane *p = &x->e_mbd.plane[plane]; const int bwl = plane_block_width_log2by4(bsize, p); const int bhl = plane_block_height_log2by4(bsize, p); sum += vp9_block_error(x->plane[plane].coeff, x->e_mbd.plane[plane].dqcoeff, 16 << (bwl + bhl), &this_sse); *sse += this_sse; } *sse >>= shift; return sum >> shift; } static void block_yrd_txfm(int plane, int block, BLOCK_SIZE_TYPE bsize, int ss_txfrm_size, void *arg) { struct rdcost_block_args *args = arg; MACROBLOCK *const x = args->x; MACROBLOCKD *const xd = &x->e_mbd; struct encode_b_args encode_args = {args->cm, x, NULL}; int64_t rd1, rd2, rd; if (args->skip) return; rd1 = RDCOST(x->rdmult, x->rddiv, args->rate, args->dist); rd2 = RDCOST(x->rdmult, x->rddiv, 0, args->sse); rd = MIN(rd1, rd2); if (rd > args->best_rd) { args->skip = 1; args->rate = INT_MAX; args->dist = INT64_MAX; args->sse = INT64_MAX; return; } if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) encode_block_intra(plane, block, bsize, ss_txfrm_size, &encode_args); else
771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
xform_quant(plane, block, bsize, ss_txfrm_size, &encode_args); dist_block(plane, block, bsize, ss_txfrm_size, args); rate_block(plane, block, bsize, ss_txfrm_size, args); } static void super_block_yrd_for_txfm(VP9_COMMON *const cm, MACROBLOCK *x, int *rate, int64_t *distortion, int *skippable, int64_t *sse, int64_t ref_best_rd, BLOCK_SIZE_TYPE bsize, TX_SIZE tx_size) { MACROBLOCKD *const xd = &x->e_mbd; struct macroblockd_plane *const pd = &xd->plane[0]; const int bwl = plane_block_width_log2by4(bsize, pd); const int bhl = plane_block_height_log2by4(bsize, pd); const int bw = 1 << bwl, bh = 1 << bhl; int i; struct rdcost_block_args args = { cm, x, { 0 }, { 0 }, tx_size, bw, bh, 0, 0, 0, ref_best_rd, 0 }; xd->mode_info_context->mbmi.txfm_size = tx_size; switch (tx_size) { case TX_4X4: vpx_memcpy(&args.t_above, pd->above_context, sizeof(ENTROPY_CONTEXT) * bw); vpx_memcpy(&args.t_left, pd->left_context, sizeof(ENTROPY_CONTEXT) * bh); get_scan_nb_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, 0), &args.scan, &args.nb); break; case TX_8X8: for (i = 0; i < bw; i += 2) args.t_above[i] = !!*(uint16_t *)&pd->above_context[i]; for (i = 0; i < bh; i += 2) args.t_left[i] = !!*(uint16_t *)&pd->left_context[i]; get_scan_nb_8x8(get_tx_type_8x8(PLANE_TYPE_Y_WITH_DC, xd), &args.scan, &args.nb); break; case TX_16X16: for (i = 0; i < bw; i += 4) args.t_above[i] = !!*(uint32_t *)&pd->above_context[i]; for (i = 0; i < bh; i += 4) args.t_left[i] = !!*(uint32_t *)&pd->left_context[i]; get_scan_nb_16x16(get_tx_type_16x16(PLANE_TYPE_Y_WITH_DC, xd), &args.scan, &args.nb); break; case TX_32X32: for (i = 0; i < bw; i += 8) args.t_above[i] = !!*(uint64_t *)&pd->above_context[i]; for (i = 0; i < bh; i += 8) args.t_left[i] = !!*(uint64_t *)&pd->left_context[i]; args.scan = vp9_default_scan_32x32; args.nb = vp9_default_scan_32x32_neighbors; break; default: assert(0); } foreach_transformed_block_in_plane(xd, bsize, 0, block_yrd_txfm, &args); *distortion = args.dist; *rate = args.rate; *sse = args.sse; *skippable = vp9_sby_is_skippable(xd, bsize) && (!args.skip); } static void choose_largest_txfm_size(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *distortion, int *skip, int64_t *sse, int64_t ref_best_rd, BLOCK_SIZE_TYPE bs) { const TX_SIZE max_txfm_size = TX_32X32
841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
- (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16); VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; if (max_txfm_size == TX_32X32 && (cm->tx_mode == ALLOW_32X32 || cm->tx_mode == TX_MODE_SELECT)) { mbmi->txfm_size = TX_32X32; } else if (max_txfm_size >= TX_16X16 && (cm->tx_mode == ALLOW_16X16 || cm->tx_mode == ALLOW_32X32 || cm->tx_mode == TX_MODE_SELECT)) { mbmi->txfm_size = TX_16X16; } else if (cm->tx_mode != ONLY_4X4) { mbmi->txfm_size = TX_8X8; } else { mbmi->txfm_size = TX_4X4; } super_block_yrd_for_txfm(cm, x, rate, distortion, skip, &sse[mbmi->txfm_size], ref_best_rd, bs, mbmi->txfm_size); cpi->txfm_stepdown_count[0]++; } static void choose_txfm_size_from_rd(VP9_COMP *cpi, MACROBLOCK *x, int (*r)[2], int *rate, int64_t *d, int64_t *distortion, int *s, int *skip, int64_t txfm_cache[TX_MODES], BLOCK_SIZE_TYPE bs) { const TX_SIZE max_txfm_size = TX_32X32 - (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16); VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); int64_t rd[TX_SIZES][2]; int n, m; int s0, s1; const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs); for (n = TX_4X4; n <= max_txfm_size; n++) { r[n][1] = r[n][0]; if (r[n][0] == INT_MAX) continue; for (m = 0; m <= n - (n == max_txfm_size); m++) { if (m == n) r[n][1] += vp9_cost_zero(tx_probs[m]); else r[n][1] += vp9_cost_one(tx_probs[m]); } } assert(skip_prob > 0); s0 = vp9_cost_bit(skip_prob, 0); s1 = vp9_cost_bit(skip_prob, 1); for (n = TX_4X4; n <= max_txfm_size; n++) { if (d[n] == INT64_MAX) { rd[n][0] = rd[n][1] = INT64_MAX; continue; } if (s[n]) { rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]); } else { rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]); rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]); } }
911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
if (max_txfm_size == TX_32X32 && (cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] && rd[TX_32X32][1] < rd[TX_4X4][1]))) { mbmi->txfm_size = TX_32X32; } else if (max_txfm_size >= TX_16X16 && (cm->tx_mode == ALLOW_16X16 || cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1]))) { mbmi->txfm_size = TX_16X16; } else if (cm->tx_mode == ALLOW_8X8 || cm->tx_mode == ALLOW_16X16 || cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_8X8][1] < rd[TX_4X4][1])) { mbmi->txfm_size = TX_8X8; } else { mbmi->txfm_size = TX_4X4; } *distortion = d[mbmi->txfm_size]; *rate = r[mbmi->txfm_size][cm->tx_mode == TX_MODE_SELECT]; *skip = s[mbmi->txfm_size]; txfm_cache[ONLY_4X4] = rd[TX_4X4][0]; txfm_cache[ALLOW_8X8] = rd[TX_8X8][0]; txfm_cache[ALLOW_16X16] = rd[MIN(max_txfm_size, TX_16X16)][0]; txfm_cache[ALLOW_32X32] = rd[MIN(max_txfm_size, TX_32X32)][0]; if (max_txfm_size == TX_32X32 && rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] && rd[TX_32X32][1] < rd[TX_4X4][1]) txfm_cache[TX_MODE_SELECT] = rd[TX_32X32][1]; else if (max_txfm_size >= TX_16X16 && rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1]) txfm_cache[TX_MODE_SELECT] = rd[TX_16X16][1]; else txfm_cache[TX_MODE_SELECT] = rd[TX_4X4][1] < rd[TX_8X8][1] ? rd[TX_4X4][1] : rd[TX_8X8][1]; if (max_txfm_size == TX_32X32 && rd[TX_32X32][1] < rd[TX_16X16][1] && rd[TX_32X32][1] < rd[TX_8X8][1] && rd[TX_32X32][1] < rd[TX_4X4][1]) { cpi->txfm_stepdown_count[0]++; } else if (max_txfm_size >= TX_16X16 && rd[TX_16X16][1] < rd[TX_8X8][1] && rd[TX_16X16][1] < rd[TX_4X4][1]) { cpi->txfm_stepdown_count[max_txfm_size - TX_16X16]++; } else if (rd[TX_8X8][1] < rd[TX_4X4][1]) { cpi->txfm_stepdown_count[max_txfm_size - TX_8X8]++; } else { cpi->txfm_stepdown_count[max_txfm_size - TX_4X4]++; } } static void choose_txfm_size_from_modelrd(VP9_COMP *cpi, MACROBLOCK *x, int (*r)[2], int *rate, int64_t *d, int64_t *distortion, int *s, int *skip, int64_t *sse, int64_t ref_best_rd, BLOCK_SIZE_TYPE bs, int *model_used) { const TX_SIZE max_txfm_size = TX_32X32 - (bs < BLOCK_SIZE_SB32X32) - (bs < BLOCK_SIZE_MB16X16); VP9_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
vp9_prob skip_prob = vp9_get_pred_prob_mbskip(cm, xd); int64_t rd[TX_SIZES][2]; int n, m; int s0, s1; double scale_rd[TX_SIZES] = {1.73, 1.44, 1.20, 1.00}; // double scale_r[TX_SIZES] = {2.82, 2.00, 1.41, 1.00}; const vp9_prob *tx_probs = get_tx_probs2(xd, &cm->fc.tx_probs); // for (n = TX_4X4; n <= max_txfm_size; n++) // r[n][0] = (r[n][0] * scale_r[n]); for (n = TX_4X4; n <= max_txfm_size; n++) { r[n][1] = r[n][0]; for (m = 0; m <= n - (n == max_txfm_size); m++) { if (m == n) r[n][1] += vp9_cost_zero(tx_probs[m]); else r[n][1] += vp9_cost_one(tx_probs[m]); } } assert(skip_prob > 0); s0 = vp9_cost_bit(skip_prob, 0); s1 = vp9_cost_bit(skip_prob, 1); for (n = TX_4X4; n <= max_txfm_size; n++) { if (s[n]) { rd[n][0] = rd[n][1] = RDCOST(x->rdmult, x->rddiv, s1, d[n]); } else { rd[n][0] = RDCOST(x->rdmult, x->rddiv, r[n][0] + s0, d[n]); rd[n][1] = RDCOST(x->rdmult, x->rddiv, r[n][1] + s0, d[n]); } } for (n = TX_4X4; n <= max_txfm_size; n++) { rd[n][0] = (scale_rd[n] * rd[n][0]); rd[n][1] = (scale_rd[n] * rd[n][1]); } if (max_txfm_size == TX_32X32 && (cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_32X32][1] <= rd[TX_16X16][1] && rd[TX_32X32][1] <= rd[TX_8X8][1] && rd[TX_32X32][1] <= rd[TX_4X4][1]))) { mbmi->txfm_size = TX_32X32; } else if (max_txfm_size >= TX_16X16 && (cm->tx_mode == ALLOW_16X16 || cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_16X16][1] <= rd[TX_8X8][1] && rd[TX_16X16][1] <= rd[TX_4X4][1]))) { mbmi->txfm_size = TX_16X16; } else if (cm->tx_mode == ALLOW_8X8 || cm->tx_mode == ALLOW_16X16 || cm->tx_mode == ALLOW_32X32 || (cm->tx_mode == TX_MODE_SELECT && rd[TX_8X8][1] <= rd[TX_4X4][1])) { mbmi->txfm_size = TX_8X8; } else { mbmi->txfm_size = TX_4X4; } if (model_used[mbmi->txfm_size]) { // Actually encode using the chosen mode if a model was used, but do not // update the r, d costs super_block_yrd_for_txfm(cm, x, rate, distortion, skip, &sse[mbmi->txfm_size], ref_best_rd, bs, mbmi->txfm_size); } else {
1051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120
*distortion = d[mbmi->txfm_size]; *rate = r[mbmi->txfm_size][cm->tx_mode == TX_MODE_SELECT]; *skip = s[mbmi->txfm_size]; } if (max_txfm_size == TX_32X32 && rd[TX_32X32][1] <= rd[TX_16X16][1] && rd[TX_32X32][1] <= rd[TX_8X8][1] && rd[TX_32X32][1] <= rd[TX_4X4][1]) { cpi->txfm_stepdown_count[0]++; } else if (max_txfm_size >= TX_16X16 && rd[TX_16X16][1] <= rd[TX_8X8][1] && rd[TX_16X16][1] <= rd[TX_4X4][1]) { cpi->txfm_stepdown_count[max_txfm_size - TX_16X16]++; } else if (rd[TX_8X8][1] <= rd[TX_4X4][1]) { cpi->txfm_stepdown_count[max_txfm_size - TX_8X8]++; } else { cpi->txfm_stepdown_count[max_txfm_size - TX_4X4]++; } } static void super_block_yrd(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int64_t *distortion, int *skip, int64_t *psse, BLOCK_SIZE_TYPE bs, int64_t txfm_cache[TX_MODES], int64_t ref_best_rd) { VP9_COMMON *const cm = &cpi->common; int r[TX_SIZES][2], s[TX_SIZES]; int64_t d[TX_SIZES], sse[TX_SIZES]; MACROBLOCKD *xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; assert(bs == mbmi->sb_type); if (mbmi->ref_frame[0] > INTRA_FRAME) vp9_subtract_sby(x, bs); if (cpi->sf.tx_size_search_method == USE_LARGESTALL || (cpi->sf.tx_size_search_method != USE_FULL_RD && mbmi->ref_frame[0] == INTRA_FRAME)) { vpx_memset(txfm_cache, 0, TX_MODES * sizeof(int64_t)); choose_largest_txfm_size(cpi, x, rate, distortion, skip, sse, ref_best_rd, bs); if (psse) *psse = sse[mbmi->txfm_size]; return; } if (cpi->sf.tx_size_search_method == USE_LARGESTINTRA_MODELINTER && mbmi->ref_frame[0] > INTRA_FRAME) { int model_used[TX_SIZES] = {1, 1, 1, 1}; if (bs >= BLOCK_SIZE_SB32X32) { if (model_used[TX_32X32]) { model_rd_for_sb_y_tx(cpi, bs, TX_32X32, x, xd, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32]); } else { super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32], &sse[TX_32X32], INT64_MAX, bs, TX_32X32); } } if (bs >= BLOCK_SIZE_MB16X16) { if (model_used[TX_16X16]) { model_rd_for_sb_y_tx(cpi, bs, TX_16X16, x, xd, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16]); } else { super_block_yrd_for_txfm(cm, x, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16], &sse[TX_16X16], INT64_MAX, bs, TX_16X16); } }
1121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190
if (model_used[TX_8X8]) { model_rd_for_sb_y_tx(cpi, bs, TX_8X8, x, xd, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8]); } else { super_block_yrd_for_txfm(cm, x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8], &sse[TX_8X8], INT64_MAX, bs, TX_8X8); } if (model_used[TX_4X4]) { model_rd_for_sb_y_tx(cpi, bs, TX_4X4, x, xd, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4]); } else { super_block_yrd_for_txfm(cm, x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4], &sse[TX_4X4], INT64_MAX, bs, TX_4X4); } choose_txfm_size_from_modelrd(cpi, x, r, rate, d, distortion, s, skip, sse, ref_best_rd, bs, model_used); } else { if (bs >= BLOCK_SIZE_SB32X32) super_block_yrd_for_txfm(cm, x, &r[TX_32X32][0], &d[TX_32X32], &s[TX_32X32], &sse[TX_32X32], ref_best_rd, bs, TX_32X32); if (bs >= BLOCK_SIZE_MB16X16) super_block_yrd_for_txfm(cm, x, &r[TX_16X16][0], &d[TX_16X16], &s[TX_16X16], &sse[TX_16X16], ref_best_rd, bs, TX_16X16); super_block_yrd_for_txfm(cm, x, &r[TX_8X8][0], &d[TX_8X8], &s[TX_8X8], &sse[TX_8X8], ref_best_rd, bs, TX_8X8); super_block_yrd_for_txfm(cm, x, &r[TX_4X4][0], &d[TX_4X4], &s[TX_4X4], &sse[TX_4X4], ref_best_rd, bs, TX_4X4); choose_txfm_size_from_rd(cpi, x, r, rate, d, distortion, s, skip, txfm_cache, bs); } if (psse) *psse = sse[mbmi->txfm_size]; } static int conditional_skipintra(MB_PREDICTION_MODE mode, MB_PREDICTION_MODE best_intra_mode) { if (mode == D117_PRED && best_intra_mode != V_PRED && best_intra_mode != D135_PRED) return 1; if (mode == D63_PRED && best_intra_mode != V_PRED && best_intra_mode != D45_PRED) return 1; if (mode == D27_PRED && best_intra_mode != H_PRED && best_intra_mode != D45_PRED) return 1; if (mode == D153_PRED && best_intra_mode != H_PRED && best_intra_mode != D135_PRED) return 1; return 0; } static int64_t rd_pick_intra4x4block(VP9_COMP *cpi, MACROBLOCK *x, int ib, MB_PREDICTION_MODE *best_mode, int *bmode_costs, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, int *bestrate, int *bestratey, int64_t *bestdistortion, BLOCK_SIZE_TYPE bsize, int64_t rd_thresh) { MB_PREDICTION_MODE mode; MACROBLOCKD *xd = &x->e_mbd; int64_t best_rd = rd_thresh; int rate = 0; int64_t distortion;
1191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
struct macroblock_plane *p = &x->plane[0]; struct macroblockd_plane *pd = &xd->plane[0]; const int src_stride = p->src.stride; const int dst_stride = pd->dst.stride; uint8_t *src_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib, p->src.buf, src_stride); uint8_t *dst_init = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, ib, pd->dst.buf, dst_stride); int16_t *src_diff, *coeff; ENTROPY_CONTEXT ta[2], tempa[2]; ENTROPY_CONTEXT tl[2], templ[2]; TX_TYPE tx_type = DCT_DCT; int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; int idx, idy, block; uint8_t best_dst[8 * 8]; assert(ib < 4); vpx_memcpy(ta, a, sizeof(ta)); vpx_memcpy(tl, l, sizeof(tl)); xd->mode_info_context->mbmi.txfm_size = TX_4X4; for (mode = DC_PRED; mode <= TM_PRED; ++mode) { int64_t this_rd; int ratey = 0; // Only do the oblique modes if the best so far is // one of the neighboring directional modes if (cpi->sf.mode_search_skip_flags & FLAG_SKIP_INTRA_DIRMISMATCH) { if (conditional_skipintra(mode, *best_mode)) continue; } rate = bmode_costs[mode]; distortion = 0; vpx_memcpy(tempa, ta, sizeof(ta)); vpx_memcpy(templ, tl, sizeof(tl)); for (idy = 0; idy < num_4x4_blocks_high; ++idy) { for (idx = 0; idx < num_4x4_blocks_wide; ++idx) { int64_t ssz; const int16_t *scan; uint8_t *src = src_init + idx * 4 + idy * 4 * src_stride; uint8_t *dst = dst_init + idx * 4 + idy * 4 * dst_stride; block = ib + idy * 2 + idx; xd->mode_info_context->bmi[block].as_mode = mode; src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, block, p->src_diff); coeff = BLOCK_OFFSET(x->plane[0].coeff, block, 16); vp9_predict_intra_block(xd, block, 1, TX_4X4, mode, x->skip_encode ? src : dst, x->skip_encode ? src_stride : dst_stride, dst, dst_stride); vp9_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride); tx_type = get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block); if (tx_type != DCT_DCT) { vp9_short_fht4x4(src_diff, coeff, 8, tx_type); x->quantize_b_4x4(x, block, tx_type, 16); } else { x->fwd_txm4x4(src_diff, coeff, 16); x->quantize_b_4x4(x, block, tx_type, 16); }
1261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
scan = get_scan_4x4(get_tx_type_4x4(PLANE_TYPE_Y_WITH_DC, xd, block)); ratey += cost_coeffs(x, 0, block, PLANE_TYPE_Y_WITH_DC, tempa + idx, templ + idy, TX_4X4, scan, vp9_get_coef_neighbors_handle(scan)); distortion += vp9_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block, 16), 16, &ssz) >> 2; if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd) goto next; if (tx_type != DCT_DCT) vp9_short_iht4x4_add(BLOCK_OFFSET(pd->dqcoeff, block, 16), dst, pd->dst.stride, tx_type); else xd->inv_txm4x4_add(BLOCK_OFFSET(pd->dqcoeff, block, 16), dst, pd->dst.stride); } } rate += ratey; this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion); if (this_rd < best_rd) { *bestrate = rate; *bestratey = ratey; *bestdistortion = distortion; best_rd = this_rd; *best_mode = mode; vpx_memcpy(a, tempa, sizeof(tempa)); vpx_memcpy(l, templ, sizeof(templ)); for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) vpx_memcpy(best_dst + idy * 8, dst_init + idy * dst_stride, num_4x4_blocks_wide * 4); } next: {} } if (best_rd >= rd_thresh || x->skip_encode) return best_rd; for (idy = 0; idy < num_4x4_blocks_high * 4; ++idy) vpx_memcpy(dst_init + idy * dst_stride, best_dst + idy * 8, num_4x4_blocks_wide * 4); return best_rd; } static int64_t rd_pick_intra4x4mby_modes(VP9_COMP *cpi, MACROBLOCK *mb, int *Rate, int *rate_y, int64_t *Distortion, int64_t best_rd) { int i, j; MACROBLOCKD *const xd = &mb->e_mbd; BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type; int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[bsize]; int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize]; int idx, idy; int cost = 0; int64_t distortion = 0; int tot_rate_y = 0; int64_t total_rd = 0; ENTROPY_CONTEXT t_above[4], t_left[4]; int *bmode_costs; MODE_INFO *const mic = xd->mode_info_context; vpx_memcpy(t_above, xd->plane[0].above_context, sizeof(t_above)); vpx_memcpy(t_left, xd->plane[0].left_context, sizeof(t_left)); bmode_costs = mb->mbmode_cost;
1331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
// Pick modes for each sub-block (of size 4x4, 4x8, or 8x4) in an 8x8 block. for (idy = 0; idy < 2; idy += num_4x4_blocks_high) { for (idx = 0; idx < 2; idx += num_4x4_blocks_wide) { const int mis = xd->mode_info_stride; MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode); int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry); int64_t UNINITIALIZED_IS_SAFE(d), this_rd; i = idy * 2 + idx; if (cpi->common.frame_type == KEY_FRAME) { const MB_PREDICTION_MODE A = above_block_mode(mic, i, mis); const MB_PREDICTION_MODE L = (xd->left_available || idx) ? left_block_mode(mic, i) : DC_PRED; bmode_costs = mb->y_mode_costs[A][L]; } this_rd = rd_pick_intra4x4block(cpi, mb, i, &best_mode, bmode_costs, t_above + idx, t_left + idy, &r, &ry, &d, bsize, best_rd - total_rd); if (this_rd >= best_rd - total_rd) return INT64_MAX; total_rd += this_rd; cost += r; distortion += d; tot_rate_y += ry; mic->bmi[i].as_mode = best_mode; for (j = 1; j < num_4x4_blocks_high; ++j) mic->bmi[i + j * 2].as_mode = best_mode; for (j = 1; j < num_4x4_blocks_wide; ++j) mic->bmi[i + j].as_mode = best_mode; if (total_rd >= best_rd) return INT64_MAX; } } *Rate = cost; *rate_y = tot_rate_y; *Distortion = distortion; xd->mode_info_context->mbmi.mode = mic->bmi[3].as_mode; return RDCOST(mb->rdmult, mb->rddiv, cost, distortion); } static int64_t rd_pick_intra_sby_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int64_t *distortion, int *skippable, BLOCK_SIZE_TYPE bsize, int64_t txfm_cache[TX_MODES], int64_t best_rd) { MB_PREDICTION_MODE mode; MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); MACROBLOCKD *const xd = &x->e_mbd; int this_rate, this_rate_tokenonly, s; int64_t this_distortion, this_rd; TX_SIZE UNINITIALIZED_IS_SAFE(best_tx); int i; int *bmode_costs = x->mbmode_cost; if (cpi->sf.tx_size_search_method == USE_FULL_RD) { for (i = 0; i < TX_MODES; i++) txfm_cache[i] = INT64_MAX; } /* Y Search for intra prediction mode */ for (mode = DC_PRED; mode <= TM_PRED; mode++) {
1401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470
int64_t local_txfm_cache[TX_MODES]; MODE_INFO *const mic = xd->mode_info_context; const int mis = xd->mode_info_stride; if (cpi->common.frame_type == KEY_FRAME) { const MB_PREDICTION_MODE A = above_block_mode(mic, 0, mis); const MB_PREDICTION_MODE L = xd->left_available ? left_block_mode(mic, 0) : DC_PRED; bmode_costs = x->y_mode_costs[A][L]; } x->e_mbd.mode_info_context->mbmi.mode = mode; super_block_yrd(cpi, x, &this_rate_tokenonly, &this_distortion, &s, NULL, bsize, local_txfm_cache, best_rd); if (this_rate_tokenonly == INT_MAX) continue; this_rate = this_rate_tokenonly + bmode_costs[mode]; this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); if (this_rd < best_rd) { mode_selected = mode; best_rd = this_rd; best_tx = x->e_mbd.mode_info_context->mbmi.txfm_size; *rate = this_rate; *rate_tokenonly = this_rate_tokenonly; *distortion = this_distortion; *skippable = s; } if (cpi->sf.tx_size_search_method == USE_FULL_RD && this_rd < INT64_MAX) { for (i = 0; i < TX_MODES; i++) { int64_t adj_rd = this_rd + local_txfm_cache[i] - local_txfm_cache[cpi->common.tx_mode]; if (adj_rd < txfm_cache[i]) { txfm_cache[i] = adj_rd; } } } } x->e_mbd.mode_info_context->mbmi.mode = mode_selected; x->e_mbd.mode_info_context->mbmi.txfm_size = best_tx; return best_rd; } static void super_block_uvrd_for_txfm(VP9_COMMON *const cm, MACROBLOCK *x, int *rate, int64_t *distortion, int *skippable, int64_t *sse, BLOCK_SIZE_TYPE bsize, TX_SIZE uv_tx_size) { MACROBLOCKD *const xd = &x->e_mbd; int64_t dummy; if (xd->mode_info_context->mbmi.ref_frame[0] == INTRA_FRAME) vp9_encode_intra_block_uv(cm, x, bsize); else vp9_xform_quant_sbuv(cm, x, bsize); *distortion = block_error_sbuv(x, bsize, uv_tx_size == TX_32X32 ? 0 : 2, sse ? sse : &dummy); *rate = rdcost_uv(cm, x, bsize, uv_tx_size); *skippable = vp9_sbuv_is_skippable(xd, bsize); } static void super_block_uvrd(VP9_COMMON *const cm, MACROBLOCK *x, int *rate, int64_t *distortion, int *skippable, int64_t *sse, BLOCK_SIZE_TYPE bsize) {
1471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540
MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi; TX_SIZE uv_txfm_size = get_uv_tx_size(mbmi); if (mbmi->ref_frame[0] > INTRA_FRAME) vp9_subtract_sbuv(x, bsize); super_block_uvrd_for_txfm(cm, x, rate, distortion, skippable, sse, bsize, uv_txfm_size); } static int64_t rd_pick_intra_sbuv_mode(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int64_t *distortion, int *skippable, BLOCK_SIZE_TYPE bsize) { MB_PREDICTION_MODE mode; MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected); int64_t best_rd = INT64_MAX, this_rd; int this_rate_tokenonly, this_rate, s; int64_t this_distortion; MB_PREDICTION_MODE last_mode = bsize <= BLOCK_SIZE_SB8X8 ? TM_PRED : cpi->sf.last_chroma_intra_mode; for (mode = DC_PRED; mode <= last_mode; mode++) { x->e_mbd.mode_info_context->mbmi.uv_mode = mode; super_block_uvrd(&cpi->common, x, &this_rate_tokenonly, &this_distortion, &s, NULL, bsize); this_rate = this_rate_tokenonly + x->intra_uv_mode_cost[cpi->common.frame_type][mode]; this_rd = RDCOST(x->rdmult, x->rddiv, this_rate, this_distortion); if (this_rd < best_rd) { mode_selected = mode; best_rd = this_rd; *rate = this_rate; *rate_tokenonly = this_rate_tokenonly; *distortion = this_distortion; *skippable = s; } } x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected; return best_rd; } static int64_t rd_sbuv_dcpred(VP9_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int64_t *distortion, int *skippable, BLOCK_SIZE_TYPE bsize) { int64_t this_rd; x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; super_block_uvrd(&cpi->common, x, rate_tokenonly, distortion, skippable, NULL, bsize); *rate = *rate_tokenonly + x->intra_uv_mode_cost[cpi->common.frame_type][DC_PRED]; this_rd = RDCOST(x->rdmult, x->rddiv, *rate, *distortion); return this_rd; } static void choose_intra_uv_mode(VP9_COMP *cpi, BLOCK_SIZE_TYPE bsize, int *rate_uv, int *rate_uv_tokenonly, int64_t *dist_uv, int *skip_uv, MB_PREDICTION_MODE *mode_uv) { MACROBLOCK *const x = &cpi->mb; // Use an estimated rd for uv_intra based on DC_PRED if the
1541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
// appropriate speed flag is set. if (cpi->sf.use_uv_intra_rd_estimate) { rd_sbuv_dcpred(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); // Else do a proper rd search for each possible transform size that may // be considered in the main rd loop. } else { rd_pick_intra_sbuv_mode(cpi, x, rate_uv, rate_uv_tokenonly, dist_uv, skip_uv, (bsize < BLOCK_SIZE_SB8X8) ? BLOCK_SIZE_SB8X8 : bsize); } *mode_uv = x->e_mbd.mode_info_context->mbmi.uv_mode; } static int cost_mv_ref(VP9_COMP *cpi, MB_PREDICTION_MODE mode, int mode_context) { MACROBLOCK *const x = &cpi->mb; MACROBLOCKD *const xd = &x->e_mbd; const int segment_id = xd->mode_info_context->mbmi.segment_id; // Don't account for mode here if segment skip is enabled. if (!vp9_segfeature_active(&xd->seg, segment_id, SEG_LVL_SKIP)) { assert(is_inter_mode(mode)); return x->inter_mode_cost[mode_context][mode - NEARESTMV]; } else { return 0; } } void vp9_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv) { x->e_mbd.mode_info_context->mbmi.mode = mb; x->e_mbd.mode_info_context->mbmi.mv[0].as_int = mv->as_int; } static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int_mv *frame_mv, int mi_row, int mi_col, int_mv single_newmv[MAX_REF_FRAMES], int *rate_mv); static void single_motion_search(VP9_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize, int mi_row, int mi_col, int_mv *tmp_mv, int *rate_mv); static int labels2mode(MACROBLOCK *x, int i, MB_PREDICTION_MODE this_mode, int_mv *this_mv, int_mv *this_second_mv, int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES], int_mv seg_mvs[MAX_REF_FRAMES], int_mv *best_ref_mv, int_mv *second_best_ref_mv, int *mvjcost, int *mvcost[2], VP9_COMP *cpi) { MACROBLOCKD *const xd = &x->e_mbd; MODE_INFO *const mic = xd->mode_info_context; MB_MODE_INFO * mbmi = &mic->mbmi; int cost = 0, thismvcost = 0; int idx, idy; int num_4x4_blocks_wide = num_4x4_blocks_wide_lookup[mbmi->sb_type]; int num_4x4_blocks_high = num_4x4_blocks_high_lookup[mbmi->sb_type]; /* We have to be careful retrieving previously-encoded motion vectors. Ones from this macroblock have to be pulled from the BLOCKD array as they have not yet made it to the bmi array in our MB_MODE_INFO. */ MB_PREDICTION_MODE m; // the only time we should do costing for new motion vector or mode // is when we are on a new label (jbb May 08, 2007)
1611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680
switch (m = this_mode) { case NEWMV: this_mv->as_int = seg_mvs[mbmi->ref_frame[0]].as_int; thismvcost = vp9_mv_bit_cost(this_mv, best_ref_mv, mvjcost, mvcost, 102); if (mbmi->ref_frame[1] > 0) { this_second_mv->as_int = seg_mvs[mbmi->ref_frame[1]].as_int; thismvcost += vp9_mv_bit_cost(this_second_mv, second_best_ref_mv, mvjcost, mvcost, 102); } break; case NEARESTMV: this_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame[0]].as_int; if (mbmi->ref_frame[1] > 0) this_second_mv->as_int = frame_mv[NEARESTMV][mbmi->ref_frame[1]].as_int; break; case NEARMV: this_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame[0]].as_int; if (mbmi->ref_frame[1] > 0) this_second_mv->as_int = frame_mv[NEARMV][mbmi->ref_frame[1]].as_int; break; case ZEROMV: this_mv->as_int = 0; if (mbmi->ref_frame[1] > 0) this_second_mv->as_int = 0; break; default: break; } cost = cost_mv_ref(cpi, this_mode, mbmi->mb_mode_context[mbmi->ref_frame[0]]); mic->bmi[i].as_mv[0].as_int = this_mv->as_int; if (mbmi->ref_frame[1] > 0) mic->bmi[i].as_mv[1].as_int = this_second_mv->as_int; x->partition_info->bmi[i].mode = m; for (idy = 0; idy < num_4x4_blocks_high; ++idy) for (idx = 0; idx < num_4x4_blocks_wide; ++idx) vpx_memcpy(&mic->bmi[i + idy * 2 + idx], &mic->bmi[i], sizeof(mic->bmi[i])); cost += thismvcost; return cost; } static int64_t encode_inter_mb_segment(VP9_COMP *cpi, MACROBLOCK *x, int64_t best_yrd, int i, int *labelyrate, int64_t *distortion, int64_t *sse, ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl) { int k; MACROBLOCKD *xd = &x->e_mbd; BLOCK_SIZE_TYPE bsize = xd->mode_info_context->mbmi.sb_type; const int width = plane_block_width(bsize, &xd->plane[0]); const int height = plane_block_height(bsize, &xd->plane[0]); int idx, idy; const int src_stride = x->plane[0].src.stride; uint8_t* const src = raster_block_offset_uint8(xd, BLOCK_SIZE_SB8X8, 0, i, x->plane[0].src.buf, src_stride); int16_t* src_diff = raster_block_offset_int16(xd, BLOCK_SIZE_SB8X8, 0, i, x->plane[0].src_diff); int16_t* coeff = BLOCK_OFFSET(x->plane[0].coeff, 16, i);