Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "vp8/common/header.h"
#include "vp8/common/reconintra.h"
#include "vp8/common/reconintra4x4.h"
#include "vp8/common/recon.h"
#include "vp8/common/reconinter.h"
#include "vp8/common/invtrans.h"
#include "vp8/common/alloccommon.h"
#include "vp8/common/entropymode.h"
#include "vp8/common/quant_common.h"
#include "vpx_scale/vpxscale.h"
#include "vpx_scale/yv12extend.h"
#include "dequantize.h"
#include "dboolhuff.h"
#ifdef DEC_DEBUG
int dec_debug = 0;
#endif
void vp8cx_init_de_quantizer(VP8D_COMP *pbi)
{
int i;
int Q;
VP8_COMMON *const pc = & pbi->common;
for (Q = 0; Q < QINDEX_RANGE; Q++)
{
pc->Y1dequant[Q][0] = (short)vp8_dc_quant(Q, pc->y1dc_delta_q);
pc->Y2dequant[Q][0] = (short)vp8_dc2quant(Q, pc->y2dc_delta_q);
pc->UVdequant[Q][0] = (short)vp8_dc_uv_quant(Q, pc->uvdc_delta_q);
for (i = 1; i < 16; i++)
{
int rc = vp8_default_zig_zag1d[i];
pc->Y1dequant[Q][rc] = (short)vp8_ac_yquant(Q);
pc->Y2dequant[Q][rc] = (short)vp8_ac2quant(Q, pc->y2ac_delta_q);
pc->UVdequant[Q][rc] = (short)vp8_ac_uv_quant(Q, pc->uvac_delta_q);
void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
int i;
int QIndex;
VP8_COMMON *const pc = & pbi->common;
int segment_id = xd->mode_info_context->mbmi.segment_id;
// Set the Q baseline allowing for any segment level adjustment
if ( segfeature_active( xd, segment_id, SEG_LVL_ALT_Q ) )
QIndex = get_segdata( xd, segment_id, SEG_LVL_ALT_Q );
QIndex = (QIndex >= 0) ? ((QIndex <= MAXQ) ? QIndex : MAXQ) : 0; /* Clamp to valid range */
/* Set up the block level dequant pointers */
for (i = 0; i < 16; i++)
{
xd->block[i].dequant = pc->Y1dequant[QIndex];
}
for (i = 16; i < 24; i++)
{
xd->block[i].dequant = pc->UVdequant[QIndex];
}
xd->block[24].dequant = pc->Y2dequant[QIndex];
}
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
#else
#define RTCD_VTABLE(x) NULL
#endif
/* skip_recon_mb() is Modified: Instead of writing the result to predictor buffer and then copying it
* to dst buffer, we can write the result directly to dst buffer. This eliminates unnecessary copy.
*/
static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
{
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv_s)(xd);
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby_s)(xd);
vp8_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
if (xd->mode_info_context->mbmi.second_ref_frame)
{
vp8_build_2nd_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.uv_stride);
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i=0;i<16;i++) {
for (j=0;j<16;j++) printf("%3d ", xd->dst.y_buffer[i*xd->dst.y_stride+j]);
printf("\n");
}
}
#endif
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx)
int tx_type;
if( pbi->common.txfm_mode==ONLY_4X4 )
{
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
}
else if( pbi->common.txfm_mode == ALLOW_8X8 )
{
if( xd->mode_info_context->mbmi.mode ==B_PRED
||xd->mode_info_context->mbmi.mode ==I8X8_PRED
||xd->mode_info_context->mbmi.mode ==SPLITMV)
xd->mode_info_context->mbmi.txfm_size = TX_4X4;
else
xd->mode_info_context->mbmi.txfm_size = TX_8X8;
}
tx_type = xd->mode_info_context->mbmi.txfm_size;
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
vp8_reset_mb_tokens_context(xd);
}
else if (!vp8dx_bool_error(xd->current_bc))
#if CONFIG_T8X8
for(i = 0; i < 25; i++)
{
xd->block[i].eob = 0;
xd->eobs[i] = 0;
}
eobtotal = vp8_decode_mb_tokens_8x8(pbi, xd);
else
#endif
eobtotal = vp8_decode_mb_tokens(pbi, xd);
#ifdef DEC_DEBUG
if (dec_debug) {
printf("\nTokens (%d)\n", eobtotal);
for (i =0; i<400; i++) {
printf("%3d ", xd->qcoeff[i]);
if (i%16 == 15) printf("\n");
}
printf("\n");
}
#endif
mode = xd->mode_info_context->mbmi.mode;
if (eobtotal == 0 && mode != B_PRED && mode != SPLITMV
&& mode != I8X8_PRED
&&!vp8dx_bool_error(xd->current_bc)
)
/* Special case: Force the loopfilter to skip when eobtotal and
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
skip_recon_mb(pbi, xd);
return;
}
#ifdef DEC_DEBUG
if (dec_debug) {
int i, j;
printf("Generating predictors\n");
for (i=0;i<16;i++) {
for (j=0;j<16;j++) printf("%3d ", xd->dst.y_buffer[i*xd->dst.y_stride+j]);
printf("\n");
}
}
#endif
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
RECON_INVOKE(&pbi->common.rtcd.recon, build_intra_predictors_mbuv)(xd);
if (mode != B_PRED)
{
RECON_INVOKE(&pbi->common.rtcd.recon,
build_intra_predictors_mby)(xd);
} else {
}
else
{
vp8_build_inter_predictors_mb(xd);
}
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
if (mode == I8X8_PRED)
{
for (i = 0; i < 4; i++)
{
int ib = vp8_i8x8_block[i];
const int iblock[4]={0,1,4,5};
int j;
int i8x8mode;
BLOCKD *b;
b = &xd->block[ib];
i8x8mode= b->bmi.as_mode;
RECON_INVOKE(RTCD_VTABLE(recon), intra8x8_predict)
(b, i8x8mode, b->predictor);
for(j = 0; j < 4; j++)
{
b = &xd->block[ib+iblock[j]];
if (xd->eobs[ib+iblock[j]] > 1)
{
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
}
else
{
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
((int *)b->qcoeff)[0] = 0;
}
}
b = &xd->block[16+i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
b = &xd->block[20+i];
RECON_INVOKE(RTCD_VTABLE(recon), intra_uv4x4_predict)
(b, i8x8mode, b->predictor);
DEQUANT_INVOKE(&pbi->dequant, idct_add)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 8, b->dst_stride);
}
}
int b_mode = xd->mode_info_context->bmi[i].as_mode;
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
(b->qcoeff, b->dequant, b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
IDCT_INVOKE(RTCD_VTABLE(idct), idct1_scalar_add)
(b->qcoeff[0] * b->dequant[0], b->predictor,
*(b->base_dst) + b->dst, 16, b->dst_stride);
DEQUANT_INVOKE (&pbi->dequant, idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
else
{
BLOCKD *b = &xd->block[24];
DEQUANT_INVOKE(&pbi->dequant, block_2x2)(b);
#ifdef DEC_DEBUG
if (dec_debug)
{
int j;
printf("DQcoeff Haar\n");
for (j=0;j<16;j++) {
printf("%d ", b->dqcoeff[j]);
}
printf("\n");
}
#endif
IDCT_INVOKE(RTCD_VTABLE(idct), ihaar2)(&b->dqcoeff[0], b->diff, 8);
((int *)b->qcoeff)[0] = 0;//2nd order block are set to 0 after inverse transform
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block_8x8)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff, xd);
DEQUANT_INVOKE(&pbi->dequant, block)(b);
if (xd->eobs[24] > 1)
{
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh16)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
}
else
{
IDCT_INVOKE(RTCD_VTABLE(idct), iwalsh1)(&b->dqcoeff[0], b->diff);
((int *)b->qcoeff)[0] = 0;
}
DEQUANT_INVOKE (&pbi->dequant, dc_idct_add_y_block)
(xd->qcoeff, xd->block[0].dequant,
xd->predictor, xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs, xd->block[24].diff);
{
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block_8x8)//
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16, xd);//
}
else
#endif
if(xd->mode_info_context->mbmi.mode!=I8X8_PRED)
DEQUANT_INVOKE (&pbi->dequant, idct_add_uv_block)
(xd->qcoeff+16*16, xd->block[16].dequant,
xd->predictor+16*16, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
static int get_delta_q(vp8_reader *bc, int prev, int *q_update)
{
int ret_val = 0;
if (vp8_read_bit(bc))
{
ret_val = vp8_read_literal(bc, 4);
if (vp8_read_bit(bc))
ret_val = -ret_val;
}
/* Trigger a quantizer update if the delta-q value has changed */
if (ret_val != prev)
*q_update = 1;
return ret_val;
}
#ifdef PACKET_TESTING
#include <stdio.h>
FILE *vpxlog = 0;
#endif
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
#if CONFIG_SUPERBLOCKS
static void
decode_sb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mbrow, MACROBLOCKD *xd)
{
int i;
int recon_yoffset, recon_uvoffset;
int mb_row, mb_col;
int ref_fb_idx = pc->lst_fb_idx;
int dst_fb_idx = pc->new_fb_idx;
int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;
int sb_col;
int row_delta[4] = { 0, +1, 0, -1};
int col_delta[4] = {+1, -1, +1, +1};
int sb_cols = (pc->mb_cols + 1)>>1;
ENTROPY_CONTEXT_PLANES left_context[2];
vpx_memset(left_context, 0, sizeof(left_context));
mb_row = mbrow;
mb_col = 0;
for (sb_col=0; sb_col<sb_cols; sb_col++)
{
for ( i=0; i<4; i++ )
{
int dy = row_delta[i];
int dx = col_delta[i];
int offset_extended = dy * xd->mode_info_stride + dx;
if ((mb_row >= pc->mb_rows) || (mb_col >= pc->mb_cols))
{
// Skip on to the next MB
mb_row += dy;
mb_col += dx;
xd->mode_info_context += offset_extended;
continue;
}
// Copy in the appropriate left context
vpx_memcpy (&pc->left_context,
&left_context[(i>>1) & 0x1],
sizeof(ENTROPY_CONTEXT_PLANES));
// reset above block coeffs
xd->above_context = pc->above_context + mb_col;
/* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to
* values that are in 1/8th pel units
*/
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->up_available = (mb_row != 0);
xd->left_available = (mb_col != 0);
update_blockd_bmi(xd);
recon_yoffset = (mb_row * recon_y_stride * 16) + (mb_col * 16);
recon_uvoffset = (mb_row * recon_uv_stride * 8) + (mb_col * 8);
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
else
ref_fb_idx = pc->alt_fb_idx;
xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer +recon_yoffset;
xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer +recon_uvoffset;
xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer +recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame)
{
int second_ref_fb_idx;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame ==
GOLDEN_FRAME)
second_ref_fb_idx = pc->gld_fb_idx;
else
second_ref_fb_idx = pc->alt_fb_idx;
xd->second_pre.y_buffer =
pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
xd->second_pre.u_buffer =
pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
xd->second_pre.v_buffer =
pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
}
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME)
{
/* propagate errors from reference frames */
xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted;
}
decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col);
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
// Copy in the appropriate left context
vpx_memcpy (&left_context[(i>>1) & 0x1],
&pc->left_context,
sizeof(ENTROPY_CONTEXT_PLANES));
// skip to next MB
xd->mode_info_context += offset_extended;
mb_row += dy;
mb_col += dx;
}
}
/* skip prediction column */
xd->mode_info_context += 1 - (pc->mb_cols & 0x1) + xd->mode_info_stride;
}
#else
static void
decode_mb_row(VP8D_COMP *pbi, VP8_COMMON *pc, int mb_row, MACROBLOCKD *xd)
{
int recon_yoffset, recon_uvoffset;
int mb_col;
int ref_fb_idx = pc->lst_fb_idx;
int dst_fb_idx = pc->new_fb_idx;
int recon_y_stride = pc->yv12_fb[ref_fb_idx].y_stride;
int recon_uv_stride = pc->yv12_fb[ref_fb_idx].uv_stride;
vpx_memset(&pc->left_context, 0, sizeof(pc->left_context));
recon_yoffset = mb_row * recon_y_stride * 16;
recon_uvoffset = mb_row * recon_uv_stride * 8;
xd->above_context = pc->above_context;
xd->up_available = (mb_row != 0);
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
/* Distance of Mb to the various image edges.
* These are specified to 8th pel as they are always compared to values
* that are in 1/8th pel units
*/
xd->mb_to_left_edge = -((mb_col * 16) << 3);
xd->mb_to_right_edge = ((pc->mb_cols - 1 - mb_col) * 16) << 3;
xd->dst.y_buffer = pc->yv12_fb[dst_fb_idx].y_buffer + recon_yoffset;
xd->dst.u_buffer = pc->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = pc->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.ref_frame == LAST_FRAME)
ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.ref_frame == GOLDEN_FRAME)
ref_fb_idx = pc->gld_fb_idx;
ref_fb_idx = pc->alt_fb_idx;
xd->pre.y_buffer = pc->yv12_fb[ref_fb_idx].y_buffer + recon_yoffset;
xd->pre.u_buffer = pc->yv12_fb[ref_fb_idx].u_buffer + recon_uvoffset;
xd->pre.v_buffer = pc->yv12_fb[ref_fb_idx].v_buffer + recon_uvoffset;
if (xd->mode_info_context->mbmi.second_ref_frame)
{
int second_ref_fb_idx;
/* Select the appropriate reference frame for this MB */
if (xd->mode_info_context->mbmi.second_ref_frame == LAST_FRAME)
second_ref_fb_idx = pc->lst_fb_idx;
else if (xd->mode_info_context->mbmi.second_ref_frame == GOLDEN_FRAME)
second_ref_fb_idx = pc->gld_fb_idx;
else
second_ref_fb_idx = pc->alt_fb_idx;
xd->second_pre.y_buffer = pc->yv12_fb[second_ref_fb_idx].y_buffer + recon_yoffset;
xd->second_pre.u_buffer = pc->yv12_fb[second_ref_fb_idx].u_buffer + recon_uvoffset;
xd->second_pre.v_buffer = pc->yv12_fb[second_ref_fb_idx].v_buffer + recon_uvoffset;
}
if (xd->mode_info_context->mbmi.ref_frame != INTRA_FRAME)
{
/* propagate errors from reference frames */
xd->corrupted |= pc->yv12_fb[ref_fb_idx].corrupted;
}
dec_debug = (pc->current_video_frame==1 && mb_row==4 && mb_col==0);
decode_macroblock(pbi, xd, mb_row * pc->mb_cols + mb_col);
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
recon_yoffset += 16;
recon_uvoffset += 8;
++xd->mode_info_context; /* next mb */
&pc->yv12_fb[dst_fb_idx],
xd->dst.y_buffer + 16, xd->dst.u_buffer + 8, xd->dst.v_buffer + 8
);
++xd->mode_info_context; /* skip prediction column */
}
static unsigned int read_partition_size(const unsigned char *cx_size)
{
const unsigned int size =
cx_size[0] + (cx_size[1] << 8) + (cx_size[2] << 16);
return size;
}
static int read_is_valid(const unsigned char *start,
size_t len,
const unsigned char *end)
{
return (start + len > start && start + len <= end);
}
static void setup_token_decoder(VP8D_COMP *pbi,
const unsigned char *cx_data)
{
VP8_COMMON *pc = &pbi->common;
const unsigned char *user_data_end = pbi->Source + pbi->source_sz;
vp8_reader *bool_decoder;
const unsigned char *partition;
// Dummy read for now
vp8_read_literal(&pbi->bc, 2);
bytes_left = user_data_end - partition;
partition_size = bytes_left;
/* Validate the calculated partition length. If the buffer
* described by the partition can't be fully read, then restrict
* it to the portion that can be (for EC mode) or throw an error.
*/
if (!read_is_valid(partition, partition_size, user_data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition "
"%d length", 1);
if (vp8dx_start_decode(bool_decoder, partition, partition_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
}
static void init_frame(VP8D_COMP *pbi)
{
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
if (pc->frame_type == KEY_FRAME)
{
vpx_memcpy(pc->fc.mvc, vp8_default_mv_context, sizeof(vp8_default_mv_context));
vp8_init_mbmode_probs(pc);
vp8_default_coef_probs(pc);
vp8_kf_default_bmode_probs(pc->kf_bmode_prob);
// Reset the segment feature data to the default stats:
// Features disabled, 0, with delta coding (Default state).
/* reset the mode ref deltasa for loop filter */
vpx_memset(xd->ref_lf_deltas, 0, sizeof(xd->ref_lf_deltas));
vpx_memset(xd->mode_lf_deltas, 0, sizeof(xd->mode_lf_deltas));
/* All buffers are implicitly updated on key frames. */
pc->refresh_golden_frame = 1;
pc->refresh_alt_ref_frame = 1;
pc->copy_buffer_to_gf = 0;
pc->copy_buffer_to_arf = 0;
/* Note that Golden and Altref modes cannot be used on a key frame so
* ref_frame_sign_bias[] is undefined and meaningless
*/
pc->ref_frame_sign_bias[GOLDEN_FRAME] = 0;
pc->ref_frame_sign_bias[ALTREF_FRAME] = 0;
vpx_memcpy(&pc->lfc, &pc->fc, sizeof(pc->fc));
vpx_memcpy(&pc->lfc_a, &pc->fc, sizeof(pc->fc));
vp8_init_mode_contexts(&pbi->common);
vpx_memcpy( pbi->common.vp8_mode_contexts,
pbi->common.mode_context,
sizeof(pbi->common.mode_context));
if (!pc->use_bilinear_mc_filter)
pc->mcomp_filter_type = SIXTAP;
else
pc->mcomp_filter_type = BILINEAR;
/* To enable choice of different interploation filters */
if (pc->mcomp_filter_type == SIXTAP)
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), sixtap_avg16x16);
}
else
{
xd->subpixel_predict = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear4x4);
xd->subpixel_predict8x4 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x4);
xd->subpixel_predict8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear8x8);
xd->subpixel_predict16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear16x16);
xd->subpixel_predict_avg8x8 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg8x8);
xd->subpixel_predict_avg16x16 = SUBPIX_INVOKE(RTCD_VTABLE(subpix), bilinear_avg16x16);
xd->left_context = &pc->left_context;
xd->mode_info_context = pc->mi;
xd->frame_type = pc->frame_type;
xd->mode_info_context->mbmi.mode = DC_PRED;
xd->corrupted = 0; /* init without corruption */
xd->fullpixel_mask = 0xffffffff;
if(pc->full_pixel)
xd->fullpixel_mask = 0xfffffff8;
}
int vp8_decode_frame(VP8D_COMP *pbi)
{
vp8_reader *const bc = & pbi->bc;
VP8_COMMON *const pc = & pbi->common;
MACROBLOCKD *const xd = & pbi->mb;
const unsigned char *data = (const unsigned char *)pbi->Source;
const unsigned char *data_end = data + pbi->source_sz;
ptrdiff_t first_partition_length_in_bytes;
int corrupt_tokens = 0;
/* start with no corruption of current frame */
xd->corrupted = 0;
pc->yv12_fb[pc->new_fb_idx].corrupted = 0;
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet");
}
else
{
pc->frame_type = (FRAME_TYPE)(data[0] & 1);
pc->version = (data[0] >> 1) & 7;
pc->show_frame = (data[0] >> 4) & 1;
first_partition_length_in_bytes =
(data[0] | (data[1] << 8) | (data[2] << 16)) >> 5;
if ((data + first_partition_length_in_bytes > data_end
|| data + first_partition_length_in_bytes < data))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt partition 0 length");
data += 3;
if (pc->frame_type == KEY_FRAME)
const int Width = pc->Width;
const int Height = pc->Height;
/* vet via sync code */
/* When error concealment is enabled we should only check the sync
* code if we have enough bits available
*/
if (data[0] != 0x9d || data[1] != 0x01 || data[2] != 0x2a)
vpx_internal_error(&pc->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
/* If error concealment is enabled we should only parse the new size
* if we have enough data. Otherwise we will end up with the wrong
* size.
*/
pc->Width = (data[3] | (data[4] << 8)) & 0x3fff;
pc->horiz_scale = data[4] >> 6;
pc->Height = (data[5] | (data[6] << 8)) & 0x3fff;
pc->vert_scale = data[6] >> 6;
data += 7;
if (Width != pc->Width || Height != pc->Height)
{
int prev_mb_rows = pc->mb_rows;
if (pc->Width <= 0)
{
pc->Width = Width;
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid frame width");
}
if (pc->Height <= 0)
{
pc->Height = Height;
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid frame height");
}
if (vp8_alloc_frame_buffers(pc, pc->Width, pc->Height))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffers");
}
if ((!pbi->decoded_key_frame && pc->frame_type != KEY_FRAME) ||
pc->Width == 0 || pc->Height == 0)
{
return -1;
}
init_frame(pbi);
if (vp8dx_start_decode(bc, data, data_end - data))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
if (pc->frame_type == KEY_FRAME) {
pc->clr_type = (YUV_TYPE)vp8_read_bit(bc);
pc->clamp_type = (CLAMP_TYPE)vp8_read_bit(bc);
}
xd->segmentation_enabled = (unsigned char)vp8_read_bit(bc);
// Read whether or not the segmentation map is being explicitly
// updated this frame.
xd->update_mb_segmentation_map = (unsigned char)vp8_read_bit(bc);
// If so what method will be used.
if ( xd->update_mb_segmentation_map )
pc->temporal_update = (unsigned char)vp8_read_bit(bc);
xd->update_mb_segmentation_data = (unsigned char)vp8_read_bit(bc);
if (xd->update_mb_segmentation_data)
{
xd->mb_segment_abs_delta = (unsigned char)vp8_read_bit(bc);
for (i = 0; i < MAX_MB_SEGMENTS; i++)
{
// For each of the segments features...
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
#if CONFIG_FEATUREUPDATES
// feature updated?
if (vp8_read_bit(bc))
{
int active=1;
if ( segfeature_active( xd, i, j ))
active=vp8_read_bit(bc);
// Is the feature enabled
if (active)
{
// Update the feature data and mask
enable_segfeature(xd, i, j);
data = (signed char)vp8_read_literal(
bc, seg_feature_data_bits(j));
// Is the segment data signed..
if ( is_segfeature_signed(j) )
{
if (vp8_read_bit(bc))
data = - data;
}
}
else
data = 0;
set_segdata(xd, i, j, data);
}
#else
data = (signed char)vp8_read_literal(
bc, seg_feature_data_bits(j));