Newer
Older
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
#include "./vp9_rtcd.h"
#include "vpx_mem/vpx_mem.h"
#include "vpx_scale/vpx_scale.h"
#include "vp9/common/vp9_extend.h"
#include "vp9/common/vp9_pred_common.h"
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_reconintra.h"
#include "vp9/common/vp9_reconinter.h"
#include "vp9/common/vp9_seg_common.h"
#include "vp9/common/vp9_tile_common.h"
#include "vp9/decoder/vp9_dboolhuff.h"
#include "vp9/decoder/vp9_decodframe.h"
#include "vp9/decoder/vp9_detokenize.h"
#include "vp9/decoder/vp9_decodemv.h"
#include "vp9/decoder/vp9_dsubexp.h"
#include "vp9/decoder/vp9_onyxd_int.h"
#include "vp9/decoder/vp9_read_bit_buffer.h"
#ifdef DEC_DEBUG
int dec_debug = 0;
#endif
static int read_be32(const uint8_t *p) {
return (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
}
// len == 0 is not allowed
static int read_is_valid(const uint8_t *start, size_t len,
const uint8_t *end) {
return start + len > start && start + len <= end;
}
static int decode_unsigned_max(struct vp9_read_bit_buffer *rb, int max) {
const int data = vp9_rb_read_literal(rb, get_unsigned_bits(max));
return data > max ? max : data;
}
static TXFM_MODE read_tx_mode(vp9_reader *r) {
TXFM_MODE txfm_mode = vp9_read_literal(r, 2);
if (txfm_mode == ALLOW_32X32)
txfm_mode += vp9_read_bit(r);
return txfm_mode;
}
static void read_tx_probs(FRAME_CONTEXT *fc, vp9_reader *r) {
int i, j;
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 3; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &fc->tx_probs_8x8p[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 2; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &fc->tx_probs_16x16p[i][j]);
for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
for (j = 0; j < TX_SIZE_MAX_SB - 1; ++j)
if (vp9_read(r, VP9_MODE_UPDATE_PROB))
vp9_diff_update_prob(r, &fc->tx_probs_32x32p[i][j]);
static void mb_init_dequantizer(VP9_COMMON *pc, MACROBLOCKD *xd) {
const int segment_id = xd->mode_info_context->mbmi.segment_id;
xd->q_index = vp9_get_qindex(xd, segment_id, pc->base_qindex);
xd->plane[0].dequant = pc->y_dequant[xd->q_index];
for (i = 1; i < MAX_MB_PLANE; i++)
xd->plane[i].dequant = pc->uv_dequant[xd->q_index];
static void decode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, void *arg) {
MACROBLOCKD* const xd = arg;
struct macroblockd_plane *pd = &xd->plane[plane];
int16_t* const qcoeff = BLOCK_OFFSET(pd->qcoeff, block, 16);
const int stride = pd->dst.stride;
const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
block, ss_txfrm_size);
uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
raster_block,
switch (ss_txfrm_size / 2) {
case TX_4X4:
tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
if (tx_type == DCT_DCT)
xd->itxm_add(qcoeff, dst, stride, pd->eobs[block]);
vp9_iht_add_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
tx_type = plane == 0 ? get_tx_type_8x8(xd) : DCT_DCT;
vp9_iht_add_8x8_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
break;
case TX_16X16:
tx_type = plane == 0 ? get_tx_type_16x16(xd) : DCT_DCT;
vp9_iht_add_16x16_c(tx_type, qcoeff, dst, stride, pd->eobs[block]);
break;
case TX_32X32:
vp9_idct_add_32x32(qcoeff, dst, stride, pd->eobs[block]);
static void decode_block_intra(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, void *arg) {
MACROBLOCKD* const xd = arg;
struct macroblockd_plane *pd = &xd->plane[plane];
const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
block, ss_txfrm_size);
uint8_t* const dst = raster_block_offset_uint8(xd, bsize, plane,
raster_block,
const TX_SIZE tx_size = (TX_SIZE)(ss_txfrm_size / 2);
const int tx_ib = raster_block >> tx_size;
const int mode = plane == 0 ? mi->mbmi.mode
: mi->mbmi.uv_mode;
if (plane == 0 && mi->mbmi.sb_type < BLOCK_SIZE_SB8X8) {
b_mode = mi->bmi[raster_block].as_mode;
if (xd->mb_to_right_edge < 0 || xd->mb_to_bottom_edge < 0)
extend_for_intra(xd, plane, block, bsize, ss_txfrm_size);
plane_b_size = b_width_log2(bsize) - pd->subsampling_x;
vp9_predict_intra_block(xd, tx_ib, plane_b_size, tx_size, b_mode,
// Early exit if there are no coefficients
decode_block(plane, block, bsize, ss_txfrm_size, arg);
static int decode_tokens(VP9D_COMP *pbi, BLOCK_SIZE_TYPE bsize, vp9_reader *r) {
MACROBLOCKD *const xd = &pbi->mb;
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
vp9_reset_sb_tokens_context(xd, bsize);
} else {
if (xd->segmentation_enabled)
mb_init_dequantizer(&pbi->common, xd);
// TODO(dkovalev) if (!vp9_reader_has_error(r))
return vp9_decode_tokens(pbi, r, bsize);
static void set_offsets(VP9D_COMP *pbi, BLOCK_SIZE_TYPE bsize,
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const int bh = 1 << mi_height_log2(bsize);
const int bw = 1 << mi_width_log2(bsize);
const int mi_idx = mi_row * cm->mode_info_stride + mi_col;
xd->mode_info_context = cm->mi + mi_idx;
xd->mode_info_context->mbmi.sb_type = bsize;
// Special case: if prev_mi is NULL, the previous mode info context
// cannot be used.
xd->prev_mode_info_context = cm->prev_mi ? cm->prev_mi + mi_idx : NULL;
for (i = 0; i < MAX_MB_PLANE; i++) {
struct macroblockd_plane *pd = &xd->plane[i];
pd->above_context = cm->above_context[i] +
(mi_col * 2 >> pd->subsampling_x);
pd->left_context = cm->left_context[i] +
(((mi_row * 2) & 15) >> pd->subsampling_y);
set_partition_seg_context(cm, xd, mi_row, mi_col);
// Distance of Mb to the various image edges. These are specified to 8th pel
// as they are always compared to values that are in 1/8th pel units
set_mi_row_col(cm, xd, mi_row, bh, mi_col, bw);
setup_dst_planes(xd, &cm->yv12_fb[cm->new_fb_idx], mi_row, mi_col);
static void set_ref(VP9D_COMP *pbi, int i, int mi_row, int mi_col) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
MB_MODE_INFO *const mbmi = &xd->mode_info_context->mbmi;
const int ref = mbmi->ref_frame[i] - 1;
const YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[ref]];
xd->scale_factor[i] = cm->active_ref_scale[ref];
xd->scale_factor_uv[i] = cm->active_ref_scale[ref];
setup_pre_planes(xd, i, cfg, mi_row, mi_col,
&xd->scale_factor[i], &xd->scale_factor_uv[i]);
xd->corrupted |= cfg->corrupted;
static void decode_modes_b(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader *r, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const cm = &pbi->common;
const int less8x8 = bsize < BLOCK_SIZE_SB8X8;
MB_MODE_INFO *mbmi;
if (xd->ab_index > 0)
return;
set_offsets(pbi, bsize, mi_row, mi_col);
vp9_read_mode_info(pbi, mi_row, mi_col, r);
if (less8x8)
bsize = BLOCK_SIZE_SB8X8;
// Has to be called after set_offsets
mbmi = &xd->mode_info_context->mbmi;
if (mbmi->ref_frame[0] == INTRA_FRAME) {
// Intra reconstruction
decode_tokens(pbi, bsize, r);
foreach_transformed_block(xd, bsize, decode_block_intra, xd);
// Inter reconstruction
int eobtotal;
set_ref(pbi, 0, mi_row, mi_col);
if (mbmi->ref_frame[1] > INTRA_FRAME)
set_ref(pbi, 1, mi_row, mi_col);
vp9_setup_interp_filters(xd, mbmi->interp_filter, cm);
vp9_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
eobtotal = decode_tokens(pbi, bsize, r);
if (less8x8) {
if (eobtotal >= 0)
foreach_transformed_block(xd, bsize, decode_block, xd);
} else {
assert(mbmi->sb_type == bsize);
if (eobtotal == 0)
vp9_set_pred_flag_mbskip(xd, bsize, 1); // skip loopfilter
else if (eobtotal > 0)
foreach_transformed_block(xd, bsize, decode_block, xd);
}
xd->corrupted |= vp9_reader_has_error(r);
static void decode_modes_sb(VP9D_COMP *pbi, int mi_row, int mi_col,
vp9_reader* r, BLOCK_SIZE_TYPE bsize) {
VP9_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
int bs = (1 << mi_width_log2(bsize)) / 2, n;
PARTITION_TYPE partition = PARTITION_NONE;
BLOCK_SIZE_TYPE subsize;
if (mi_row >= pc->mi_rows || mi_col >= pc->mi_cols)
if (bsize < BLOCK_SIZE_SB8X8)
if (xd->ab_index != 0)
return;
if (bsize >= BLOCK_SIZE_SB8X8) {
const int idx = check_bsize_coverage(pc, xd, mi_row, mi_col, bsize);
set_partition_seg_context(pc, xd, mi_row, mi_col);
pl = partition_plane_context(xd, bsize);
if (idx == 0)
partition = treed_read(r, vp9_partition_tree,
pc->fc.partition_prob[pc->frame_type][pl]);
else if (idx > 0 &&
!vp9_read(r, pc->fc.partition_prob[pc->frame_type][pl][idx]))
partition = (idx == 1) ? PARTITION_HORZ : PARTITION_VERT;
else
partition = PARTITION_SPLIT;
pc->fc.partition_counts[pl][partition]++;
subsize = get_subsize(bsize, partition);
*(get_sb_index(xd, subsize)) = 0;
switch (partition) {
case PARTITION_NONE:
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
*(get_sb_index(xd, subsize)) = 1;
decode_modes_b(pbi, mi_row + bs, mi_col, r, subsize);
decode_modes_b(pbi, mi_row, mi_col, r, subsize);
*(get_sb_index(xd, subsize)) = 1;
decode_modes_b(pbi, mi_row, mi_col + bs, r, subsize);
break;
case PARTITION_SPLIT:
for (n = 0; n < 4; n++) {
int j = n >> 1, i = n & 0x01;
decode_modes_sb(pbi, mi_row + j * bs, mi_col + i * bs, r, subsize);
}
break;
default:
assert(0);
}
// update partition context
if (bsize >= BLOCK_SIZE_SB8X8 &&
(bsize == BLOCK_SIZE_SB8X8 || partition != PARTITION_SPLIT)) {
set_partition_seg_context(pc, xd, mi_row, mi_col);
update_partition_context(xd, subsize, bsize);
}
static void setup_token_decoder(VP9D_COMP *pbi,
const uint8_t *data, size_t read_size,
const uint8_t *data_end = pbi->source + pbi->source_sz;
// Validate the calculated partition length. If the buffer
// described by the partition can't be fully read, then restrict
// it to the portion that can be (for EC mode) or throw an error.
if (!read_is_valid(data, read_size, data_end))
"Truncated packet or corrupt tile length");
if (vp9_reader_init(r, data, read_size))
vpx_internal_error(&pc->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder %d", 1);
static void read_coef_probs_common(FRAME_CONTEXT *fc, TX_SIZE tx_size,
vp9_reader *r) {
vp9_coeff_probs_model *coef_probs = fc->coef_probs[tx_size];
int i, j, k, l, m;
if (vp9_read_bit(r))
for (i = 0; i < BLOCK_TYPES; i++)
for (j = 0; j < REF_TYPES; j++)
for (k = 0; k < COEF_BANDS; k++)
for (l = 0; l < PREV_COEF_CONTEXTS; l++)
if (k > 0 || l < 3)
for (m = 0; m < UNCONSTRAINED_NODES; m++)
if (vp9_read(r, VP9_COEF_UPDATE_PROB))
vp9_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
static void read_coef_probs(FRAME_CONTEXT *fc, TXFM_MODE txfm_mode,
vp9_reader *r) {
read_coef_probs_common(fc, TX_4X4, r);
if (txfm_mode > ONLY_4X4)
read_coef_probs_common(fc, TX_8X8, r);
if (txfm_mode > ALLOW_8X8)
read_coef_probs_common(fc, TX_16X16, r);
if (txfm_mode > ALLOW_16X16)
read_coef_probs_common(fc, TX_32X32, r);
static void setup_segmentation(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
xd->update_mb_segmentation_map = 0;
xd->update_mb_segmentation_data = 0;
xd->segmentation_enabled = vp9_rb_read_bit(rb);
if (!xd->segmentation_enabled)
return;
// Segmentation map update
xd->update_mb_segmentation_map = vp9_rb_read_bit(rb);
if (xd->update_mb_segmentation_map) {
for (i = 0; i < MB_SEG_TREE_PROBS; i++)
xd->mb_segment_tree_probs[i] = vp9_rb_read_bit(rb) ?
vp9_rb_read_literal(rb, 8) : MAX_PROB;
cm->temporal_update = vp9_rb_read_bit(rb);
if (cm->temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
cm->segment_pred_probs[i] = vp9_rb_read_bit(rb) ?
vp9_rb_read_literal(rb, 8) : MAX_PROB;
} else {
for (i = 0; i < PREDICTION_PROBS; i++)
cm->segment_pred_probs[i] = MAX_PROB;
// Segmentation data update
xd->update_mb_segmentation_data = vp9_rb_read_bit(rb);
if (xd->update_mb_segmentation_data) {
xd->mb_segment_abs_delta = vp9_rb_read_bit(rb);
vp9_clearall_segfeatures(xd);
for (i = 0; i < MAX_MB_SEGMENTS; i++) {
for (j = 0; j < SEG_LVL_MAX; j++) {
int data = 0;
const int feature_enabled = vp9_rb_read_bit(rb);
if (feature_enabled) {
vp9_enable_segfeature(xd, i, j);
data = decode_unsigned_max(rb, vp9_seg_feature_data_max(j));
if (vp9_is_segfeature_signed(j))
data = vp9_rb_read_bit(rb) ? -data : data;
vp9_set_segdata(xd, i, j, data);
static void setup_loopfilter(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
cm->filter_level = vp9_rb_read_literal(rb, 6);
cm->sharpness_level = vp9_rb_read_literal(rb, 3);
// Read in loop filter deltas applied at the MB level based on mode or ref
// frame.
xd->mode_ref_lf_delta_update = 0;
xd->mode_ref_lf_delta_enabled = vp9_rb_read_bit(rb);
if (xd->mode_ref_lf_delta_enabled) {
xd->mode_ref_lf_delta_update = vp9_rb_read_bit(rb);
if (xd->mode_ref_lf_delta_update) {
for (i = 0; i < MAX_REF_LF_DELTAS; i++)
if (vp9_rb_read_bit(rb))
xd->ref_lf_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
for (i = 0; i < MAX_MODE_LF_DELTAS; i++)
if (vp9_rb_read_bit(rb))
xd->mode_lf_deltas[i] = vp9_rb_read_signed_literal(rb, 6);
static int read_delta_q(struct vp9_read_bit_buffer *rb, int *delta_q) {
const int old = *delta_q;
if (vp9_rb_read_bit(rb))
*delta_q = vp9_rb_read_signed_literal(rb, 4);
return old != *delta_q;
}
static void setup_quantization(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
MACROBLOCKD *const xd = &pbi->mb;
VP9_COMMON *const cm = &pbi->common;
int update = 0;
cm->base_qindex = vp9_rb_read_literal(rb, QINDEX_BITS);
update |= read_delta_q(rb, &cm->y_dc_delta_q);
update |= read_delta_q(rb, &cm->uv_dc_delta_q);
update |= read_delta_q(rb, &cm->uv_ac_delta_q);
if (update)
vp9_init_dequantizer(cm);
xd->lossless = cm->base_qindex == 0 &&
cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
xd->itxm_add = xd->lossless ? vp9_idct_add_lossless_c
: vp9_idct_add;
static INTERPOLATIONFILTERTYPE read_interp_filter_type(
struct vp9_read_bit_buffer *rb) {
return vp9_rb_read_bit(rb) ? SWITCHABLE
: vp9_rb_read_literal(rb, 2);
static void read_frame_size(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb,
int *width, int *height) {
const int w = vp9_rb_read_literal(rb, 16) + 1;
const int h = vp9_rb_read_literal(rb, 16) + 1;
*width = w;
*height = h;
static void setup_display_size(VP9D_COMP *pbi, struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
cm->display_width = cm->width;
cm->display_height = cm->height;
if (vp9_rb_read_bit(rb))
read_frame_size(cm, rb, &cm->display_width, &cm->display_height);
static void apply_frame_size(VP9D_COMP *pbi, int width, int height) {
VP9_COMMON *cm = &pbi->common;
if (cm->width != width || cm->height != height) {
if (!pbi->initial_width || !pbi->initial_height) {
if (vp9_alloc_frame_buffers(cm, width, height))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffers");
pbi->initial_width = width;
pbi->initial_height = height;
} else {
if (width > pbi->initial_width)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
if (height > pbi->initial_height)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
cm->width = width;
cm->height = height;
vp9_realloc_frame_buffer(&cm->yv12_fb[cm->new_fb_idx], cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
VP9BORDERINPIXELS);
static void setup_frame_size(VP9D_COMP *pbi,
struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
int width, height;
read_frame_size(cm, rb, &width, &height);
setup_display_size(pbi, rb);
apply_frame_size(pbi, width, height);
}
static void setup_frame_size_with_refs(VP9D_COMP *pbi,
struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
int width, height;
int found = 0, i;
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
if (vp9_rb_read_bit(rb)) {
YV12_BUFFER_CONFIG *cfg = &cm->yv12_fb[cm->active_ref_idx[i]];
width = cfg->y_crop_width;
height = cfg->y_crop_height;
found = 1;
break;
}
}
if (!found)
read_frame_size(cm, rb, &width, &height);
if (!width || !height)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Referenced frame with invalid size");
setup_display_size(pbi, rb);
apply_frame_size(pbi, width, height);
}
static void update_frame_context(FRAME_CONTEXT *fc) {
vp9_copy(fc->pre_coef_probs, fc->coef_probs);
vp9_copy(fc->pre_y_mode_prob, fc->y_mode_prob);
vp9_copy(fc->pre_uv_mode_prob, fc->uv_mode_prob);
vp9_copy(fc->pre_partition_prob, fc->partition_prob[1]);
vp9_copy(fc->pre_intra_inter_prob, fc->intra_inter_prob);
vp9_copy(fc->pre_comp_inter_prob, fc->comp_inter_prob);
vp9_copy(fc->pre_single_ref_prob, fc->single_ref_prob);
vp9_copy(fc->pre_comp_ref_prob, fc->comp_ref_prob);
fc->pre_nmvc = fc->nmvc;
vp9_copy(fc->pre_switchable_interp_prob, fc->switchable_interp_prob);
vp9_copy(fc->pre_inter_mode_probs, fc->inter_mode_probs);
vp9_copy(fc->pre_tx_probs_8x8p, fc->tx_probs_8x8p);
vp9_copy(fc->pre_tx_probs_16x16p, fc->tx_probs_16x16p);
vp9_copy(fc->pre_tx_probs_32x32p, fc->tx_probs_32x32p);
vp9_copy(fc->pre_mbskip_probs, fc->mbskip_probs);
vp9_zero(fc->coef_counts);
vp9_zero(fc->eob_branch_counts);
vp9_zero(fc->y_mode_counts);
vp9_zero(fc->uv_mode_counts);
vp9_zero(fc->NMVcount);
vp9_zero(fc->inter_mode_counts);
vp9_zero(fc->switchable_interp_count);
vp9_zero(fc->intra_inter_count);
vp9_zero(fc->comp_inter_count);
vp9_zero(fc->single_ref_count);
vp9_zero(fc->comp_ref_count);
vp9_zero(fc->tx_count_8x8p);
vp9_zero(fc->tx_count_16x16p);
vp9_zero(fc->tx_count_32x32p);
static void decode_tile(VP9D_COMP *pbi, vp9_reader *r) {
VP9_COMMON *const pc = &pbi->common;
for (mi_row = pc->cur_tile_mi_row_start; mi_row < pc->cur_tile_mi_row_end;
mi_row += MI_BLOCK_SIZE) {
// For a SB there are 2 left contexts, each pertaining to a MB row within
vpx_memset(&pc->left_context, 0, sizeof(pc->left_context));
vpx_memset(pc->left_seg_context, 0, sizeof(pc->left_seg_context));
for (mi_col = pc->cur_tile_mi_col_start; mi_col < pc->cur_tile_mi_col_end;
mi_col += MI_BLOCK_SIZE)
decode_modes_sb(pbi, mi_row, mi_col, r, BLOCK_SIZE_SB64X64);
static void setup_tile_info(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
int delta_log2_tiles;
vp9_get_tile_n_bits(cm, &cm->log2_tile_columns, &delta_log2_tiles);
if (vp9_rb_read_bit(rb)) {
cm->log2_tile_columns++;
cm->log2_tile_rows = vp9_rb_read_bit(rb);
if (cm->log2_tile_rows)
cm->log2_tile_rows += vp9_rb_read_bit(rb);
cm->tile_columns = 1 << cm->log2_tile_columns;
cm->tile_rows = 1 << cm->log2_tile_rows;
}
static void decode_tiles(VP9D_COMP *pbi,
const uint8_t *data, size_t first_partition_size,
vp9_reader *residual_bc) {
VP9_COMMON *const pc = &pbi->common;
const uint8_t *data_ptr = data + first_partition_size;
const uint8_t *const data_end = pbi->source + pbi->source_sz;
const int aligned_mi_cols = mi_cols_aligned_to_sb(pc->mi_cols);
int tile_row, tile_col;
// Note: this memset assumes above_context[0], [1] and [2]
// are allocated as part of the same buffer.
vpx_memset(pc->above_context[0], 0,
sizeof(ENTROPY_CONTEXT) * 2 * MAX_MB_PLANE * aligned_mi_cols);
vpx_memset(pc->above_seg_context, 0,
sizeof(PARTITION_CONTEXT) * aligned_mi_cols);
if (pbi->oxcf.inv_tile_order) {
const int n_cols = pc->tile_columns;
const uint8_t *data_ptr2[4][1 << 6];
// pre-initialize the offsets, we're going to read in inverse order
data_ptr2[0][0] = data_ptr;
for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
if (tile_row) {
const int size = read_be32(data_ptr2[tile_row - 1][n_cols - 1]);
data_ptr2[tile_row - 1][n_cols - 1] += 4;
data_ptr2[tile_row][0] = data_ptr2[tile_row - 1][n_cols - 1] + size;
}
for (tile_col = 1; tile_col < n_cols; tile_col++) {
const int size = read_be32(data_ptr2[tile_row][tile_col - 1]);
data_ptr2[tile_row][tile_col - 1] += 4;
data_ptr2[tile_row][tile_col] =
data_ptr2[tile_row][tile_col - 1] + size;
}
}
for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(pc, tile_row);
for (tile_col = n_cols - 1; tile_col >= 0; tile_col--) {
vp9_get_tile_col_offsets(pc, tile_col);
setup_token_decoder(pbi, data_ptr2[tile_row][tile_col],
data_end - data_ptr2[tile_row][tile_col],
residual_bc);
decode_tile(pbi, residual_bc);
if (tile_row == pc->tile_rows - 1 && tile_col == n_cols - 1)
bc_bak = *residual_bc;
}
}
*residual_bc = bc_bak;
} else {
int has_more;
for (tile_row = 0; tile_row < pc->tile_rows; tile_row++) {
vp9_get_tile_row_offsets(pc, tile_row);
for (tile_col = 0; tile_col < pc->tile_columns; tile_col++) {
vp9_get_tile_col_offsets(pc, tile_col);
has_more = tile_col < pc->tile_columns - 1 ||
tile_row < pc->tile_rows - 1;
if (has_more) {
if (!read_is_valid(data_ptr, 4, data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt tile length");
size = read_be32(data_ptr);
data_ptr += 4;
} else {
size = data_end - data_ptr;
setup_token_decoder(pbi, data_ptr, size, residual_bc);
decode_tile(pbi, residual_bc);
data_ptr += size;
static void check_sync_code(VP9_COMMON *cm, struct vp9_read_bit_buffer *rb) {
if (vp9_rb_read_literal(rb, 8) != SYNC_CODE_0 ||
vp9_rb_read_literal(rb, 8) != SYNC_CODE_1 ||
vp9_rb_read_literal(rb, 8) != SYNC_CODE_2) {
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame sync code");
}
}
static void error_handler(void *data, size_t bit_offset) {
VP9_COMMON *const cm = (VP9_COMMON *)data;
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
static void setup_inter_inter(VP9_COMMON *cm) {
int i;
cm->allow_comp_inter_inter = 0;
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
cm->allow_comp_inter_inter |= i > 0 &&
cm->ref_frame_sign_bias[i + 1] != cm->ref_frame_sign_bias[1];
}
if (cm->allow_comp_inter_inter) {
// which one is always-on in comp inter-inter?
if (cm->ref_frame_sign_bias[LAST_FRAME] ==
cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
cm->comp_fixed_ref = ALTREF_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = GOLDEN_FRAME;
} else if (cm->ref_frame_sign_bias[LAST_FRAME] ==
cm->ref_frame_sign_bias[ALTREF_FRAME]) {
cm->comp_fixed_ref = GOLDEN_FRAME;
cm->comp_var_ref[0] = LAST_FRAME;
cm->comp_var_ref[1] = ALTREF_FRAME;
} else {
cm->comp_fixed_ref = LAST_FRAME;
cm->comp_var_ref[0] = GOLDEN_FRAME;
cm->comp_var_ref[1] = ALTREF_FRAME;
}
}
}
#define RESERVED \
if (vp9_rb_read_bit(rb)) \
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, \
"Reserved bit must be unset")
static size_t read_uncompressed_header(VP9D_COMP *pbi,
struct vp9_read_bit_buffer *rb) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
cm->last_frame_type = cm->frame_type;
if (vp9_rb_read_literal(rb, 2) != 0x2)
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid frame marker");
cm->version = vp9_rb_read_bit(rb);
RESERVED;
if (vp9_rb_read_bit(rb)) {
// show an existing frame directly
int frame_to_show = cm->ref_frame_map[vp9_rb_read_literal(rb, 3)];
ref_cnt_fb(cm->fb_idx_ref_cnt, &cm->new_fb_idx, frame_to_show);
pbi->refresh_frame_flags = 0;
cm->filter_level = 0;
return 0;
}
cm->frame_type = (FRAME_TYPE) vp9_rb_read_bit(rb);
cm->show_frame = vp9_rb_read_bit(rb);
cm->error_resilient_mode = vp9_rb_read_bit(rb);
if (cm->frame_type == KEY_FRAME) {
check_sync_code(cm, rb);
csp = vp9_rb_read_literal(rb, 3); // colorspace
if (csp != 7) { // != sRGB
vp9_rb_read_bit(rb); // [16,235] (including xvycc) vs [0,255] range
if (cm->version == 1) {
cm->subsampling_x = vp9_rb_read_bit(rb);
cm->subsampling_y = vp9_rb_read_bit(rb);
vp9_rb_read_bit(rb); // has extra plane
} else {
cm->subsampling_y = cm->subsampling_x = 1;
}
if (cm->version == 1) {
cm->subsampling_y = cm->subsampling_x = 0;
vp9_rb_read_bit(rb); // has extra plane
} else {
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"RGB not supported in profile 0");
}
pbi->refresh_frame_flags = (1 << NUM_REF_FRAMES) - 1;
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
cm->active_ref_idx[i] = cm->new_fb_idx;
cm->intra_only = cm->show_frame ? 0 : vp9_rb_read_bit(rb);
cm->reset_frame_context = cm->error_resilient_mode ?
0 : vp9_rb_read_literal(rb, 2);
check_sync_code(cm, rb);
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
setup_frame_size(pbi, rb);
} else {
pbi->refresh_frame_flags = vp9_rb_read_literal(rb, NUM_REF_FRAMES);
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i) {
const int ref = vp9_rb_read_literal(rb, NUM_REF_FRAMES_LG2);
cm->active_ref_idx[i] = cm->ref_frame_map[ref];
cm->ref_frame_sign_bias[LAST_FRAME + i] = vp9_rb_read_bit(rb);
}
setup_frame_size_with_refs(pbi, rb);
xd->allow_high_precision_mv = vp9_rb_read_bit(rb);
cm->mcomp_filter_type = read_interp_filter_type(rb);
for (i = 0; i < ALLOWED_REFS_PER_FRAME; ++i)
vp9_setup_scale_factors(cm, i);
setup_inter_inter(cm);
if (!cm->error_resilient_mode) {
cm->refresh_frame_context = vp9_rb_read_bit(rb);
cm->frame_parallel_decoding_mode = vp9_rb_read_bit(rb);
} else {
cm->refresh_frame_context = 0;
cm->frame_parallel_decoding_mode = 1;
}
cm->frame_context_idx = vp9_rb_read_literal(rb, NUM_FRAME_CONTEXTS_LG2);
if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode || cm->intra_only)
setup_loopfilter(pbi, rb);
setup_quantization(pbi, rb);
setup_segmentation(pbi, rb);
setup_tile_info(cm, rb);
return vp9_rb_read_literal(rb, 16);
}
static int read_compressed_header(VP9D_COMP *pbi, const uint8_t *data,
size_t partition_size) {
VP9_COMMON *const cm = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
vp9_reader r;
if (vp9_reader_init(&r, data, partition_size))
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate bool decoder 0");
cm->txfm_mode = xd->lossless ? ONLY_4X4 : read_tx_mode(&r);
if (cm->txfm_mode == TX_MODE_SELECT)
read_tx_probs(&cm->fc, &r);
read_coef_probs(&cm->fc, cm->txfm_mode, &r);
vp9_prepare_read_mode_info(pbi, &r);
return vp9_reader_has_error(&r);
}
void vp9_init_dequantizer(VP9_COMMON *pc) {
int q;
for (q = 0; q < QINDEX_RANGE; q++) {
// DC value
pc->y_dequant[q][0] = vp9_dc_quant(q, pc->y_dc_delta_q);
pc->uv_dequant[q][0] = vp9_dc_quant(q, pc->uv_dc_delta_q);
// AC values
pc->y_dequant[q][1] = vp9_ac_quant(q, 0);
pc->uv_dequant[q][1] = vp9_ac_quant(q, pc->uv_ac_delta_q);
}
}
int vp9_decode_frame(VP9D_COMP *pbi, const uint8_t **p_data_end) {
int i;
VP9_COMMON *const pc = &pbi->common;
MACROBLOCKD *const xd = &pbi->mb;
const uint8_t *data = pbi->source;
const uint8_t *data_end = pbi->source + pbi->source_sz;
struct vp9_read_bit_buffer rb = { data, data_end, 0,
pc, error_handler };
const size_t first_partition_size = read_uncompressed_header(pbi, &rb);
const int keyframe = pc->frame_type == KEY_FRAME;
YV12_BUFFER_CONFIG *new_fb = &pc->yv12_fb[pc->new_fb_idx];
if (!first_partition_size) {
// showing a frame directly
*p_data_end = data + 1;
return 0;
}
data += vp9_rb_bytes_read(&rb);
xd->corrupted = 0;
new_fb->corrupted = 0;
if (!pbi->decoded_key_frame && !keyframe)
if (!read_is_valid(data, first_partition_size, data_end))
vpx_internal_error(&pc->error, VPX_CODEC_CORRUPT_FRAME,
"Truncated packet or corrupt header length");
xd->mode_info_context = pc->mi;
xd->prev_mode_info_context = pc->prev_mi;
xd->frame_type = pc->frame_type;
xd->mode_info_stride = pc->mode_info_stride;
mb_init_dequantizer(pc, &pbi->mb); // MB level dequantizer setup
vp9_setup_interp_filters(xd, pc->mcomp_filter_type, pc);
pc->fc = pc->frame_contexts[pc->frame_context_idx];
// Initialize xd pointers. Any reference should do for xd->pre, so use 0.
setup_pre_planes(xd, 0, &pc->yv12_fb[pc->active_ref_idx[0]], 0, 0,
NULL, NULL);
setup_dst_planes(xd, new_fb, 0, 0);
new_fb->corrupted |= read_compressed_header(pbi, data, first_partition_size);
// Create the segmentation map structure and set to 0
if (!pc->last_frame_seg_map)
CHECK_MEM_ERROR(pc, pc->last_frame_seg_map,