Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/alloccommon.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
#include "vpx_scale/vpxscale.h"
#include "vpx_scale/yv12extend.h"
#include "dequantize.h"
#include "dboolhuff.h"
#include "vp8/common/entropy.h"
#define COEFCOUNT_TESTING
static int merge_index(int v, int n, int modulus) {
int max1 = (n - 1 - modulus / 2) / modulus + 1;
if (v < max1) v = v * modulus + modulus / 2;
else {
int w;
v -= max1;
w = v;
v += (v + modulus - modulus / 2) / modulus;
while (v % modulus == modulus / 2 ||
w != v - (v + modulus - modulus / 2) / modulus) v++;
}
return v;
static int inv_remap_prob(int v, int m) {
const int n = 256;
const int modulus = MODULUS_PARAM;
v = merge_index(v, n - 1, modulus);
if ((m << 1) <= n) {
i = inv_recenter_nonneg(v + 1, m);
} else {
i = n - 1 - inv_recenter_nonneg(v + 1, n - 1 - m);
}
return i;
static vp8_prob read_prob_diff_update(vp8_reader *const bc, int oldp) {
int delp = vp8_decode_term_subexp(bc, SUBEXP_PARAM, 255);
return (vp8_prob)inv_remap_prob(delp, oldp);
void vp8cx_init_de_quantizer(VP8D_COMP *pbi) {
int i;
int Q;
VP8_COMMON *const pc = & pbi->common;
for (Q = 0; Q < QINDEX_RANGE; Q++) {
pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
/* all the ac values =; */
for (i = 1; i < 16; i++) {
int rc = vp8_default_zig_zag1d[i];
pc->Y1dequant[Q][rc] = (short)vp8_ac_yquant(Q);
pc->Y2dequant[Q][rc] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
pc->UVdequant[Q][rc] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) {
int i;
int QIndex;
VP8_COMMON *const pc = & pbi->common;
int segment_id = xd->mode_info_context->mbmi.segment_id;
// Set the Q baseline allowing for any segment level adjustment
if (segfeature_active(xd, segment_id, SEG_LVL_ALT_Q)) {
/* Abs Value */
if (xd->mb_segment_abs_delta == SEGMENT_ABSDATA)
QIndex = get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
/* Delta Value */
else {
QIndex = pc->base_qindex +
get_segdata(xd, segment_id, SEG_LVL_ALT_Q);
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; /* Clamp to valid range */
/* Set up the block level dequant pointers */
for (i = 0; i < 16; i++) {
xd->block[i].dequant = pc->Y1dequant[QIndex];
}
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM8X8 || CONFIG_HYBRIDTRANSFORM16X16
if (!QIndex) {
pbi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
pbi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_lossless_c;
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_lossless_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_lossless_c;
pbi->dequant.dc_idct_add_y_block = vp8_dequant_dc_idct_add_y_block_lossless_c;
pbi->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_lossless_c;
pbi->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_lossless_c;
} else {
pbi->common.rtcd.idct.idct1 = vp8_short_idct4x4llm_1_c;
pbi->common.rtcd.idct.idct16 = vp8_short_idct4x4llm_c;
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_c;
pbi->dequant.dc_idct_add_y_block = vp8_dequant_dc_idct_add_y_block_c;
pbi->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_c;
pbi->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_c;
}
for (i = 16; i < 24; i++) {
xd->block[i].dequant = pc->UVdequant[QIndex];
}
}
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
#else
#define RTCD_VTABLE(x) NULL
#endif
/* skip_recon_mb() is Modified: Instead of writing the result to predictor buffer and then copying it
* to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy.
*/
static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd) {
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_sby_s)(xd);
} else {
#endif
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby_s)(xd);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
vp8_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
} else {
#endif
vp8_build_1st_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
if (xd->mode_info_context->mbmi.second_ref_frame) {
vp8_build_2nd_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
#if CONFIG_SUPERBLOCKS
VP8_COMMON *pc = &pbi->common;
int orig_skip_flag = xd->mode_info_context->mbmi.mb_skip_coeff;
#endif
#if CONFIG_HYBRIDTRANSFORM || CONFIG_HYBRIDTRANSFORM16X16
#if CONFIG_HYBRIDTRANSFORM16X16
int active_ht16;
#endif
// re-initialize macroblock dequantizer before detokenization
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.mode <= TM_PRED ||
xd->mode_info_context->mbmi.mode == NEWMV ||
xd->mode_info_context->mbmi.mode == ZEROMV ||
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV)
xd->mode_info_context->mbmi.txfm_size = TX_16X16;
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != B_PRED)
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
} else {
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
if (xd->mode_info_context->mbmi.mode <= TM_PRED ||
xd->mode_info_context->mbmi.mode == NEWMV ||
xd->mode_info_context->mbmi.mode == ZEROMV ||
xd->mode_info_context->mbmi.mode == NEARMV ||
xd->mode_info_context->mbmi.mode == NEARESTMV)
if (pbi->common.txfm_mode == ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV)
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
tx_type = xd->mode_info_context->mbmi.txfm_size;
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_HYBRIDTRANSFORM
// parse transform types for intra 4x4 mode
QIndex = xd->q_index;
active_ht = (QIndex < ACTIVE_HT);
if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
if(active_ht)
txfm_map(b, b_mode);
} // loop over 4x4 blocks
}
#endif
#if CONFIG_HYBRIDTRANSFORM8X8
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
BLOCKD *b = &xd->block[ib];
int i8x8mode = b->bmi.as_mode.first;
txfm_map(b, pred_mode_conv(i8x8mode));
}
}
#endif
#if CONFIG_HYBRIDTRANSFORM16X16
active_ht16 = (QIndex < ACTIVE_HT16);
if (mode < I8X8_PRED) {
BLOCKD *b = &xd->block[0];
if(active_ht16)
txfm_map(b, pred_mode_conv(mode));
}
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
vp8_reset_mb_tokens_context(xd);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->above_context++;
xd->left_context++;
vp8_reset_mb_tokens_context(xd);
xd->above_context--;
xd->left_context--;
}
#endif
} else if (!vp8dx_bool_error(xd->current_bc)) {
for (i = 0; i < 25; i++) {
xd->block[i].eob = 0;
xd->eobs[i] = 0;
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
eobtotal = vp8_decode_mb_tokens_16x16(pbi, xd);
else
#endif
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
}
//mode = xd->mode_info_context->mbmi.mode;
#if CONFIG_SWITCHABLE_INTERP
if (pbi->common.frame_type != KEY_FRAME)
vp8_setup_interp_filters(xd, xd->mode_info_context->mbmi.interp_filter,
&pbi->common);
#endif
if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV
&& mode != I8X8_PRED
&& !vp8dx_bool_error(xd->current_bc)
) {
/* Special case: Force the loopfilter to skip when eobtotal and
* mb_skip_coeff are zero.
* */
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
#if CONFIG_SUPERBLOCKS
if (!xd->mode_info_context->mbmi.encoded_as_sb || orig_skip_flag)
#endif
{
skip_recon_mb(pbi, xd);
return;
}
// moved to be performed before detokenization
// if (xd->segmentation_enabled)
// mb_init_dequantizer(pbi, xd);
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sby_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_sbuv_s)(xd);
} else
#endif
if (mode != I8X8_PRED) {
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
if (mode != B_PRED) {
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby)(xd);
}
// Intra-modes requiring recon data from top-right
// MB have been temporarily disabled.
else {
vp8_intra_prediction_down_copy(xd);
}
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
vp8_build_inter32x32_predictors_sb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
} else
#endif
vp8_build_inter_predictors_mb(xd);
}
/* dequantization and idct */
if (mode == I8X8_PRED) {
for (i = 0; i < 4; i++) {
int ib = vp8_i8x8_block[i];
int idx = (ib & 0x02) ? (ib + 2) : ib;
short *q = xd->block[idx].qcoeff;
short *dq = xd->block[0].dequant;
unsigned char *pre = xd->block[ib].predictor;
unsigned char *dst = *(xd->block[ib].base_dst) + xd->block[ib].dst;
int stride = xd->dst.y_stride;
b = &xd->block[ib];
i8x8mode = b->bmi.as_mode.first;
RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)
(b, i8x8mode, b->predictor);
#if CONFIG_HYBRIDTRANSFORM8X8
vp8_ht_dequant_idct_add_8x8_c(b->bmi.as_mode.tx_type,
q, dq, pre, dst, 16, stride);
vp8_dequant_idct_add_8x8_c(q, dq, pre, dst, 16, stride);
q += 64;
b = &xd->block[16 + i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
b = &xd->block[20 + i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
} else if (mode == B_PRED) {
for (i = 0; i < 16; i++) {
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
int b_mode2 = xd->mode_info_context->bmi[i].as_mode.second;
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
(b, b_mode, b->predictor);
#if CONFIG_COMP_INTRA_PRED
} else {
RECON_INVOKE(RTCD_VTABLE(recon), comp_intra4x4_predict)
(b, b_mode, b_mode2, b->predictor);
}
#if CONFIG_HYBRIDTRANSFORM
if(active_ht)
vp8_ht_dequant_idct_add_c( (TX_TYPE)b->bmi.as_mode.tx_type, b->qcoeff,
b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
else
vp8_dequant_idct_add_c(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
#else
if (xd->eobs[i] > 1)
{
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
} else {
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
((int *)b->qcoeff)[0] = 0;
} else if (mode == SPLITMV) {
DEQUANT_INVOKE(&pbi->dequant, idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
} else {
BLOCKD *b = &xd->block[24];
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
#if CONFIG_HYBRIDTRANSFORM16X16
if (mode < I8X8_PRED && active_ht16) {
BLOCKD *bd = &xd->block[0];
TX_TYPE txfm;
txfm_map(bd, pred_mode_conv(mode));
txfm = bd->bmi.as_mode.tx_type;
vp8_ht_dequant_idct_add_16x16_c(txfm, xd->qcoeff,
xd->block[0].dequant, xd->predictor,
xd->dst.y_buffer, 16, xd->dst.y_stride);
} else {
vp8_dequant_idct_add_16x16_c(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride);
}
#else
vp8_dequant_idct_add_16x16_c(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
16, xd->dst.y_stride);
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
#if CONFIG_SUPERBLOCKS
void *orig = xd->mode_info_context;
int n, num = xd->mode_info_context->mbmi.encoded_as_sb ? 4 : 1;
for (n = 0; n < num; n++) {
if (n != 0) {
for (i = 0; i < 25; i++) {
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
xd->above_context = pc->above_context + mb_col + (n & 1);
xd->left_context = pc->left_context + (n >> 1);
xd->mode_info_context = orig;
xd->mode_info_context += (n & 1);
xd->mode_info_context += (n >> 1) * pc->mode_info_stride;
if (!orig_skip_flag) {
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
if (eobtotal == 0) // skip loopfilter
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
} else {
vp8_reset_mb_tokens_context(xd);
}
}
if (xd->mode_info_context->mbmi.mb_skip_coeff)
continue; // only happens for SBs, which are already in dest buffer
#endif
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;// 2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
vp8_dequant_dc_idct_add_y_block_8x8_inplace_c(xd->qcoeff,
xd->block[0].dequant,
xd->dst.y_buffer + (n >> 1) * 16 * xd->dst.y_stride + (n & 1) * 16,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
// do UV inline also
vp8_dequant_idct_add_uv_block_8x8_inplace_c(xd->qcoeff + 16 * 16,
xd->block[16].dequant,
xd->dst.u_buffer + (n >> 1) * 8 * xd->dst.uv_stride + (n & 1) * 8,
xd->dst.v_buffer + (n >> 1) * 8 * xd->dst.uv_stride + (n & 1) * 8,
xd->dst.uv_stride, xd->eobs + 16, xd);
} else
#endif
DEQUANT_INVOKE(&pbi->dequant, dc_idct_add_y_block_8x8)(xd->qcoeff,
xd->block[0].dequant, xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
#if CONFIG_SUPERBLOCKS
}
xd->mode_info_context = orig;
#endif
} else {
DEQUANT_INVOKE(&pbi->dequant, block)(b);
if (xd->eobs[24] > 1) {
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
} else {
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
}
DEQUANT_INVOKE(&pbi->dequant, dc_idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff);
if (!xd->mode_info_context->mbmi.encoded_as_sb) {
if ((tx_type == TX_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED)
#if CONFIG_TX16X16 || CONFIG_HYBRIDTRANSFORM16X16
|| tx_type == TX_16X16
#endif
)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block_8x8) //
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16, xd); //
else if (xd->mode_info_context->mbmi.mode != I8X8_PRED)
DEQUANT_INVOKE(&pbi->dequant, idct_add_uv_block)
(xd->qcoeff + 16 * 16, xd->block[16].dequant,
xd->predictor + 16 * 16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16);
static int get_delta_q(vp8_reader *bc, int prev, int *q_update) {
int ret_val = 0;
if (vp8_read_bit(bc)) {
ret_val = vp8_read_literal(bc, 4);
/* Trigger a quantizer update if the delta-q value has changed */
if (ret_val != prev)
*q_update = 1;
}
#ifdef PACKET_TESTING
#include <stdio.h>
FILE *vpxlog = 0;
#endif
/* Decode a row of Superblocks (2x2 region of MBs) */
decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd) {
int i;
int sb_col;
int mb_row, mb_col;
int recon_yoffset, recon_uvoffset;
int ref_fb_idx = pc->lst_fb_idx;
int dst_fb_idx = pc->new_fb_idx;
int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = { +1, -1, +1, +1};
int sb_cols = (pc->mb_cols + 1) >> 1;
// For a SB there are 2 left contexts, each pertaining to a MB row within
vpx_memset(pc->left_context, 0, sizeof(pc->left_context));
mb_row = mbrow;
mb_col = 0;
for (sb_col = 0; sb_col < sb_cols; sb_col++) {
MODE_INFO *mi = xd->mode_info_context;
#if CONFIG_SUPERBLOCKS
if (pbi->interleaved_decoding)
mi->mbmi.encoded_as_sb = vp8_read(&pbi->bc, pc->sb_coded);
#endif
// Process the 4 MBs within the SB in the order:
// top-left, top-right, bottom-left, bottom-right
for (i = 0; i < 4; i++) {
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols)) {
// MB lies outside frame, skip on to next
mb_row += dy;
mb_col += dx;
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
/* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to
* values that are in 1/8th pel units
*/
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if CONFIG_SUPERBLOCKS
if (i)
mi->mbmi.encoded_as_sb = 0;
#endif
if(pbi->interleaved_decoding)
vpx_decode_mb_mode_mv(pbi, xd, mb_row, mb_col);
update_blockd_bmi(xd);
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame) {
int second_ref_fb_idx;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame ==
GOLDEN_FRAME)
second_ref_fb_idx = pc->gld_fb_idx;
else
second_ref_fb_idx = pc->alt_fb_idx;
xd->second_pre.y_buffer =
pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
xd->second_pre.u_buffer =
pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
xd->second_pre.v_buffer =
pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
}
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME) {
/* propagate errors from reference frames */
xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted;
}
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
mi[1] = mi[0];
mi[pc->mode_info_stride] = mi[0];
mi[pc->mode_info_stride + 1] = mi[0];
}
#endif
decode_macroblock(pbi, xd, mb_col);
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
mi[1].mbmi.txfm_size = mi[0].mbmi.txfm_size;
mi[pc->mode_info_stride].mbmi.txfm_size = mi[0].mbmi.txfm_size;
mi[pc->mode_info_stride + 1].mbmi.txfm_size = mi[0].mbmi.txfm_size;
}
#endif
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
#if CONFIG_SUPERBLOCKS
if (mi->mbmi.encoded_as_sb) {
assert(!i);
mb_col += 2;
xd->mode_info_context += 2;
xd->prev_mode_info_context += 2;
break;
}
#endif
// skip to next MB
xd->mode_info_context += offset_extended;
xd->prev_mode_info_context += offset_extended;
/* skip prediction column */
xd->mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride;
xd->prev_mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride;
static unsigned int read_partition_size(const unsigned char *cx_size) {
const unsigned int size =
cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
return size;
static int read_is_valid(const unsigned char *start,
size_t len,
const unsigned char *end) {
return (start + len > start && start + len <= end);
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
const unsigned char *cx_data) {
VP8_COMMON *pc = &pbi->common;
const unsigned char *user_data_end = pbi->Source + pbi->source_sz;
vp8_reader *bool_decoder;
const unsigned char *partition;
ptrdiff_t partition_size;
ptrdiff_t bytes_left;
// Dummy read for now
vp8_read_literal(&pbi->bc, 2);
// Set up pointers to token partition
partition = cx_data;
bool_decoder = &pbi->bc2;
bytes_left = user_data_end - partition;
partition_size = bytes_left;
/* Validate the calculated partition length. If the buffer
* described by the partition can't be fully read, then restrict
* it to the portion that can be (for EC mode) or throw an error.
*/
if (!read_is_valid(partition, partition_size, user_data_end)) {
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length", 1);
}
if (vp8dx_start_decode(bool_decoder, partition, partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
static void init_frame(VP8D_COMP *pbi) {
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
if (pc->frame_type == KEY_FRAME) {
/* Various keyframe initializations */
vp8_default_coef_probs(pc);
vp8_kf_default_bmode_probs(pc->kf_bmode_prob);
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
clearall_segfeatures(xd);
/* reset the mode ref deltasa for loop filter */
vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
/* All buffers are implicitly updated on key frames. */
pc->refresh_golden_frame = 1;
pc->refresh_alt_ref_frame = 1;
pc->copy_buffer_to_gf = 0;
pc->copy_buffer_to_arf = 0;
/* Note that Golden and Altref modes cannot be used on a key frame so
* ref_frame_sign_bias[] is undefined and meaningless
*/
pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
vp8_init_mode_contexts(&pbi->common);
vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
vpx_memcpy(&pc->lfc_a, &pc->fc, sizeof(pc->fc));
vpx_memcpy(pbi->common.fc.vp8_mode_contexts,
pbi->common.fc.mode_context,
sizeof(pbi->common.fc.mode_context));
} else {
if (!pc->use_bilinear_mc_filter)
pc->mcomp_filter_type = EIGHTTAP;
else
pc->mcomp_filter_type = BILINEAR;
/* To enable choice of different interpolation filters */
vp8_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
xd->prev_mode_info_context = pc->prev_mi;
xd->frame_type = pc->frame_type;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->mode_info_stride = pc->mode_info_stride;
xd->corrupted = 0; /* init without corruption */
xd->fullpixel_mask = 0xffffffff;
if (pc->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
static void read_coef_probs2(VP8D_COMP *pbi) {
const vp8_prob grpupd = 192;
int i, j, k, l;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
for (l = 0; l < ENTROPY_NODES; l++) {
if (vp8_read(bc, grpupd)) {
// printf("Decoding %d\n", l);
for (i = 0; i < BLOCK_TYPES; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB);
if (u) *p = read_prob_diff_update(bc, *p);
}
}
}
}
if (pbi->common.txfm_mode == ALLOW_8X8) {
for (l = 0; l < ENTROPY_NODES; l++) {
if (vp8_read(bc, grpupd)) {
for (i = 0; i < BLOCK_TYPES_8X8; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
{
vp8_prob *const p = pc->fc.coef_probs_8x8 [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB_8X8);
if (u) *p = read_prob_diff_update(bc, *p);
}
static void read_coef_probs(VP8D_COMP *pbi) {
int i, j, k, l;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
{
if (vp8_read_bit(bc)) {
/* read coef probability tree */
for (i = 0; i < BLOCK_TYPES; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
for (l = 0; l < ENTROPY_NODES; l++) {
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
if (vp8_read(bc, COEF_UPDATE_PROB)) {
*p = read_prob_diff_update(bc, *p);
}
}
}
#if CONFIG_HYBRIDTRANSFORM
{
if (vp8_read_bit(bc)) {
/* read coef probability tree */
for (i = 0; i < BLOCK_TYPES; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++) {
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
for (l = 0; l < ENTROPY_NODES; l++) {
vp8_prob *const p = pc->fc.hybrid_coef_probs [i][j][k] + l;
if (vp8_read(bc, COEF_UPDATE_PROB)) {