Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/alloccommon.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
#include "vpx_scale/vpxscale.h"
#include "vpx_scale/yv12extend.h"
#include "dequantize.h"
#include "dboolhuff.h"
#include "vp8/common/entropy.h"
#ifdef DEC_DEBUG
int dec_debug = 0;
#endif
#define COEFCOUNT_TESTING
static int merge_index(int v, int n, int modulus)
{
int max1 = (n-1 - modulus/2)/modulus + 1;
if (v < max1) v = v * modulus + modulus/2;
else
{
int w;
v -= max1;
w = v;
v += (v + modulus-modulus/2)/modulus;
while (v%modulus == modulus/2 ||
w != v - (v + modulus-modulus/2)/modulus) v++;
}
return v;
}
static int inv_remap_prob(int v, int m)
{
const int n = 256;
const int modulus = MODULUS_PARAM;
int i, w;
v = merge_index(v, n-1, modulus);
if ((m<<1)<=n) {
i = inv_recenter_nonneg(v+1, m);
} else {
i = n-1-inv_recenter_nonneg(v+1, n-1-m);
}
return i;
}
static vp8_prob read_prob_diff_update(vp8_reader *const bc, int oldp)
{
int delp = vp8_decode_term_subexp(bc, SUBEXP_PARAM, 255);
return (vp8_prob)inv_remap_prob(delp, oldp);
}
void vp8cx_init_de_quantizer(VP8D_COMP *pbi)
{
int i;
int Q;
VP8_COMMON *const pc = & pbi->common;
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
pc->Y1dequant[Q][rc] = (short)vp8_ac_yquant(Q);
pc->Y2dequant[Q][rc] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
pc->UVdequant[Q][rc] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int i;
int QIndex;
VP8_COMMON *const pc = & pbi->common;
int segment_id = xd->mode_info_context->mbmi.segment_id;
// Set the Q baseline allowing for any segment level adjustment
if ( segfeature_active( xd, segment_id, SEG_LVL_ALT_Q ) )
QIndex = get_segdata( xd, segment_id, SEG_LVL_ALT_Q );
get_segdata( xd, segment_id, SEG_LVL_ALT_Q );
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; /* Clamp to valid range */
/* Set up the block level dequant pointers */
for (i = 0; i < 16; i++)
{
xd->block[i].dequant = pc->Y1dequant[QIndex];
}
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#if CONFIG_LOSSLESS
if(!QIndex)
{
pbi->common.rtcd.idct.idct1 = vp8_short_inv_walsh4x4_1_x8_c;
pbi->common.rtcd.idct.idct16 = vp8_short_inv_walsh4x4_x8_c;
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_inv_walsh_add_c;
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_lossless_c;
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_lossless_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_lossless_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_lossless_c;
pbi->dequant.dc_idct_add_y_block = vp8_dequant_dc_idct_add_y_block_lossless_c;
pbi->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_lossless_c;
pbi->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_lossless_c;
}
else
{
pbi->common.rtcd.idct.idct1 = vp8_short_idct4x4llm_1_c;
pbi->common.rtcd.idct.idct16 = vp8_short_idct4x4llm_c;
pbi->common.rtcd.idct.idct1_scalar_add = vp8_dc_only_idct_add_c;
pbi->common.rtcd.idct.iwalsh1 = vp8_short_inv_walsh4x4_1_c;
pbi->common.rtcd.idct.iwalsh16 = vp8_short_inv_walsh4x4_c;
pbi->dequant.idct_add = vp8_dequant_idct_add_c;
pbi->dequant.dc_idct_add = vp8_dequant_dc_idct_add_c;
pbi->dequant.dc_idct_add_y_block = vp8_dequant_dc_idct_add_y_block_c;
pbi->dequant.idct_add_y_block = vp8_dequant_idct_add_y_block_c;
pbi->dequant.idct_add_uv_block = vp8_dequant_idct_add_uv_block_c;
}
#endif
for (i = 16; i < 24; i++)
{
xd->block[i].dequant = pc->UVdequant[QIndex];
}
xd->block[24].dequant = pc->Y2dequant[QIndex];
}
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
#else
#define RTCD_VTABLE(x) NULL
#endif
/* skip_recon_mb() is Modified: Instead of writing the result to predictor buffer and then copying it
* to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy.
*/
static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby_s)(xd);
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
if (xd->mode_info_context->mbmi.second_ref_frame)
{
vp8_build_2nd_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i=0;i<16;i++) {
for (j=0;j<16;j++) printf("%3d ", xd->dst.y_buffer[i*xd->dst.y_stride+j]);
printf("\n");
}
}
#endif
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx)
if(pbi->common.frame_type == KEY_FRAME)
if( pbi->common.txfm_mode==ALLOW_8X8 &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != B_PRED)
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
else
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
if( pbi->common.txfm_mode==ONLY_4X4 )
{
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
}
else if( pbi->common.txfm_mode == ALLOW_8X8 )
{
if( xd->mode_info_context->mbmi.mode ==B_PRED
||xd->mode_info_context->mbmi.mode ==I8X8_PRED
||xd->mode_info_context->mbmi.mode ==SPLITMV)
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
else
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
}
tx_type = xd->mode_info_context->mbmi.txfm_size;
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
else if (!vp8dx_bool_error(xd->current_bc))
for(i = 0; i < 25; i++)
{
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
printf("\nTokens (%d)\n", eobtotal);
for (i =0; i<400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i%16 == 15) printf("\n");
}
printf("\n");
}
#endif
mode = xd->mode_info_context->mbmi.mode;
if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV
&& mode != I8X8_PRED
&&!vp8dx_bool_error(xd->current_bc)
)
/* Special case: Force the loopfilter to skip when eobtotal and
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
skip_recon_mb(pbi, xd);
return;
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i=0;i<16;i++) {
for (j=0;j<16;j++) printf("%3d ", xd->dst.y_buffer[i*xd->dst.y_stride+j]);
printf("\n");
}
}
#endif
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
if (mode != B_PRED)
{
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby)(xd);
#if 0
// Intra-modes requiring recon data from top-right
// MB have been temporarily disabled.
}
else
{
vp8_build_inter_predictors_mb(xd);
}
if (mode == I8X8_PRED)
{
for (i = 0; i < 4; i++)
{
int ib = vp8_i8x8_block[i];
const int iblock[4]={0,1,4,5};
int j;
int i8x8mode;
BLOCKD *b;
b = &xd->block[ib];
i8x8mode= b->bmi.as_mode.first;
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)
(b, i8x8mode, b->predictor);
for(j = 0; j < 4; j++)
{
b = &xd->block[ib+iblock[j]];
if (xd->eobs[ib+iblock[j]] > 1)
{
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
}
else
{
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
((int *)b->qcoeff)[0] = 0;
}
}
b = &xd->block[16+i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
b = &xd->block[20+i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
}
}
int b_mode = xd->mode_info_context->bmi[i].as_mode.first;
#if CONFIG_COMP_INTRA_PRED
int b_mode2 = xd->mode_info_context->bmi[i].as_mode.second;
if (b_mode2 == (B_PREDICTION_MODE) (B_DC_PRED - 1))
{
#endif
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
#if CONFIG_COMP_INTRA_PRED
}
else
{
RECON_INVOKE(RTCD_VTABLE(recon), comp_intra4x4_predict)
(b, b_mode, b_mode2, b->predictor);
}
#endif
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
DEQUANT_INVOKE (&pbi->dequant, idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
else
{
BLOCKD *b = &xd->block[24];
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
#ifdef DEC_DEBUG
if (dec_debug)
{
int j;
printf("DQcoeff Haar\n");
for (j=0;j<16;j++) {
printf("%d ", b->dqcoeff[j]);
}
printf("\n");
}
#endif
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;//2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block_8x8)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
DEQUANT_INVOKE(&pbi->dequant, block)(b);
if (xd->eobs[24] > 1)
{
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
}
else
{
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
}
DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff);
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block_8x8)//
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16, xd);//
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block)
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
static int get_delta_q(vp8_reader *bc, int prev, int *q_update)
{
int ret_val = 0;
if (vp8_read_bit(bc))
{
ret_val = vp8_read_literal(bc, 4);
if (vp8_read_bit(bc))
ret_val = -ret_val;
}
/* Trigger a quantizer update if the delta-q value has changed */
if (ret_val != prev)
*q_update = 1;
return ret_val;
}
#ifdef PACKET_TESTING
#include <stdio.h>
FILE *vpxlog = 0;
#endif
/* Decode a row of Superblocks (2x2 region of MBs) */
decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd)
int i;
int sb_col;
int mb_row, mb_col;
int ref_fb_idx = pc->lst_fb_idx;
int dst_fb_idx = pc->new_fb_idx;
int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = {+1, -1, +1, +1};
int sb_cols = (pc->mb_cols + 1)>>1;
ENTROPY_CONTEXT_PLANES left_context[2];
// For a SB there are 2 left contexts, each pertaining to a MB row within
vpx_memset(left_context, 0, sizeof(left_context));
// Process the 4 MBs within the SB in the order:
// top-left, top-right, bottom-left, bottom-right
for ( i=0; i<4; i++ )
{
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols))
{
// MB lies outside frame, skip on to next
mb_row += dy;
mb_col += dx;
xd->mode_info_context += offset_extended;
continue;
}
#ifdef DEC_DEBUG
dec_debug = (pc->current_video_frame==0 && mb_row==0 && mb_col==0);
#endif
// Copy in the appropriate left context for this MB row
vpx_memcpy (&pc->left_context,
&left_context[i>>1],
sizeof(ENTROPY_CONTEXT_PLANES));
// Set above context pointer
xd->above_context = pc->above_context + mb_col;
/* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to
* values that are in 1/8th pel units
*/
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
update_blockd_bmi(xd);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer +recon_yoffset;
xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer +recon_uvoffset;
xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer +recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame)
{
int second_ref_fb_idx;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame ==
GOLDEN_FRAME)
second_ref_fb_idx = pc->gld_fb_idx;
else
second_ref_fb_idx = pc->alt_fb_idx;
xd->second_pre.y_buffer =
pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
xd->second_pre.u_buffer =
pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
xd->second_pre.v_buffer =
pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
}
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME)
{
/* propagate errors from reference frames */
xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted;
}
decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col);
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
// Store the modified left context for the MB row locally
vpx_memcpy (&left_context[i>>1],
&pc->left_context,
sizeof(ENTROPY_CONTEXT_PLANES));
// skip to next MB
xd->mode_info_context += offset_extended;
mb_row += dy;
mb_col += dx;
}
/* skip prediction column */
xd->mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride;
}
static unsigned int read_partition_size(const unsigned char *cx_size)
{
const unsigned int size =
cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
return size;
}
static int read_is_valid(const unsigned char *start,
size_t len,
const unsigned char *end)
{
return (start + len > start && start + len <= end);
}
static void setup_token_decoder(VP8D_COMP *pbi,
const unsigned char *cx_data)
{
VP8_COMMON *pc = &pbi->common;
const unsigned char *user_data_end = pbi->Source + pbi->source_sz;
vp8_reader *bool_decoder;
const unsigned char *partition;
// Dummy read for now
vp8_read_literal(&pbi->bc, 2);
bytes_left = user_data_end - partition;
partition_size = bytes_left;
/* Validate the calculated partition length. If the buffer
* described by the partition can't be fully read, then restrict
* it to the portion that can be (for EC mode) or throw an error.
*/
if (!read_is_valid(partition, partition_size, user_data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length", 1);
if (vp8dx_start_decode(bool_decoder, partition, partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
static void init_frame(VP8D_COMP *pbi)
{
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
if (pc->frame_type == KEY_FRAME)
{
vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
#if CONFIG_HIGH_PRECISION_MV
vpx_memcpy(pc->fc.mvc_hp, vp8_default_mv_context_hp,
sizeof(vp8_default_mv_context_hp));
#endif
vp8_init_mbmode_probs(pc);
vp8_default_coef_probs(pc);
vp8_kf_default_bmode_probs(pc->kf_bmode_prob);
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
/* reset the mode ref deltasa for loop filter */
vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
/* All buffers are implicitly updated on key frames. */
pc->refresh_golden_frame = 1;
pc->refresh_alt_ref_frame = 1;
pc->copy_buffer_to_gf = 0;
pc->copy_buffer_to_arf = 0;
/* Note that Golden and Altref modes cannot be used on a key frame so
* ref_frame_sign_bias[] is undefined and meaningless
*/
pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
vpx_memcpy(&pc->lfc_a, &pc->fc, sizeof(pc->fc));
vpx_memcpy( pbi->common.fc.vp8_mode_contexts,
pbi->common.fc.mode_context,
sizeof(pbi->common.fc.mode_context));
#if CONFIG_ENHANCED_INTERP
pc->mcomp_filter_type = EIGHTTAP;
#else
/* To enable choice of different interploation filters */
if (pc->mcomp_filter_type == SIXTAP)
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg16x16);
#if CONFIG_ENHANCED_INTERP
else if (pc->mcomp_filter_type == EIGHTTAP)
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
RTCD_VTABLE(subpix), eighttap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
RTCD_VTABLE(subpix), eighttap_avg16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap_avg4x4);
}
else if (pc->mcomp_filter_type == EIGHTTAP_SHARP)
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap4x4_sharp);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap8x4_sharp);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap8x8_sharp);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap16x16_sharp);
xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), eighttap_avg4x4_sharp);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(
RTCD_VTABLE(subpix), eighttap_avg8x8_sharp);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(
RTCD_VTABLE(subpix), eighttap_avg16x16_sharp);
}
#endif
else
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear16x16);
xd->subpixel_predict_avg = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg4x4);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg16x16);
xd->left_context = &pc->left_context;
xd->mode_info_context = pc->mi;
xd->frame_type = pc->frame_type;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->corrupted = 0; /* init without corruption */
xd->fullpixel_mask = 0xffffffff;
if(pc->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
#if CONFIG_NEWUPDATE
static void read_coef_probs3(VP8D_COMP *pbi)
{
const vp8_prob grpupd = 216;
int i, j, k, l;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
for (i = 0; i < BLOCK_TYPES; i++)
for (l = 0; l < ENTROPY_NODES; l++)
{
if(vp8_read(bc, grpupd))
{
//printf("Decoding %d\n", l);
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
#if CONFIG_EXPANDED_COEF_CONTEXT
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
#endif
{
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB);
if (u) *p = read_prob_diff_update(bc, *p);
}
}
}
if(pbi->common.txfm_mode == ALLOW_8X8)
{
for (i = 0; i < BLOCK_TYPES_8X8; i++)
for (l = 0; l < ENTROPY_NODES; l++)
{
if(vp8_read(bc, grpupd))
{
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
#if CONFIG_EXPANDED_COEF_CONTEXT
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
#endif
{
vp8_prob *const p = pc->fc.coef_probs_8x8 [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB_8X8);
if (u) *p = read_prob_diff_update(bc, *p);
}
}
}
}
}
static void read_coef_probs2(VP8D_COMP *pbi)
{
const vp8_prob grpupd = 192;
int i, j, k, l;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
for (l = 0; l < ENTROPY_NODES; l++)
{
if(vp8_read(bc, grpupd))
{
//printf("Decoding %d\n", l);
for (i = 0; i < BLOCK_TYPES; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
#if CONFIG_EXPANDED_COEF_CONTEXT
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
#endif
{
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB);
if (u) *p = read_prob_diff_update(bc, *p);
}
}
}
if(pbi->common.txfm_mode == ALLOW_8X8)
{
for (l = 0; l < ENTROPY_NODES; l++)
{
if(vp8_read(bc, grpupd))
{
for (i = 0; i < BLOCK_TYPES_8X8; i++)
for (j = !i; j < COEF_BANDS; j++)
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
#if CONFIG_EXPANDED_COEF_CONTEXT
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
#endif
{
vp8_prob *const p = pc->fc.coef_probs_8x8 [i][j][k] + l;
int u = vp8_read(bc, COEF_UPDATE_PROB_8X8);
if (u) *p = read_prob_diff_update(bc, *p);
}
}
}
}
}
#endif
static void read_coef_probs(VP8D_COMP *pbi)
{
int i, j, k, l;
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
{
if(vp8_read_bit(bc))
{
/* read coef probability tree */
for (i = 0; i < BLOCK_TYPES; i++)
#if CONFIG_NEWUPDATE
for (j = !i; j < COEF_BANDS; j++)
#else
for (j = 0; j < COEF_BANDS; j++)
#endif
for (k = 0; k < PREV_COEF_CONTEXTS; k++)
{
#if CONFIG_EXPANDED_COEF_CONTEXT
if (k >= 3 && ((i == 0 && j == 1) ||
(i > 0 && j == 0)))
continue;
#endif
for (l = 0; l < ENTROPY_NODES; l++)
{
vp8_prob *const p = pc->fc.coef_probs [i][j][k] + l;
if (vp8_read(bc, COEF_UPDATE_PROB))
*p = read_prob_diff_update(bc, *p);
#else
*p = (vp8_prob)vp8_read_literal(bc, 8);
#endif
}
}
}
}
if(pbi->common.txfm_mode == ALLOW_8X8 && vp8_read_bit(bc))
{
// read coef probability tree
for (i = 0; i < BLOCK_TYPES_8X8; i++)
#if CONFIG_NEWUPDATE
for (j = !i; j < COEF_BANDS; j++)