Commit 975a1447 authored by Stefano Sabatini's avatar Stefano Sabatini Committed by Diego Biurrun
Browse files

Replace deprecated FF_*_TYPE symbols with AV_PICTURE_TYPE_*.


Signed-off-by: default avatarDiego Biurrun <diego@biurrun.de>
parent 6209669d
......@@ -1198,7 +1198,7 @@ static void do_video_out(AVFormatContext *s,
//av_log(NULL, AV_LOG_DEBUG, "%"PRId64" -> encoder\n", ost->sync_opts);
if (ost->forced_kf_index < ost->forced_kf_count &&
big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
big_picture.pict_type = FF_I_TYPE;
big_picture.pict_type = AV_PICTURE_TYPE_I;
ost->forced_kf_index++;
}
ret = avcodec_encode_video(enc,
......
......@@ -783,11 +783,11 @@ static int decode_frame(AVCodecContext *avctx,
}
if(frame_4cc == AV_RL32("ifr2")){
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i2_frame(f, buf-4, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("ifrm")){
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
if(decode_i_frame(f, buf, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("pfrm") || frame_4cc == AV_RL32("pfr2")){
......@@ -799,7 +799,7 @@ static int decode_frame(AVCodecContext *avctx,
}
}
p->pict_type= FF_P_TYPE;
p->pict_type= AV_PICTURE_TYPE_P;
if(decode_p_frame(f, buf, frame_size) < 0)
return -1;
}else if(frame_4cc == AV_RL32("snd_")){
......@@ -808,7 +808,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "ignoring unknown chunk length:%d\n", buf_size);
}
p->key_frame= p->pict_type == FF_I_TYPE;
p->key_frame= p->pict_type == AV_PICTURE_TYPE_I;
*picture= *p;
*data_size = sizeof(AVPicture);
......
......@@ -216,7 +216,7 @@ static av_cold int a64multi_init_encoder(AVCodecContext *avctx)
avcodec_get_frame_defaults(&c->picture);
avctx->coded_frame = &c->picture;
avctx->coded_frame->pict_type = FF_I_TYPE;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
if (!avctx->codec_tag)
avctx->codec_tag = AV_RL32("a64m");
......@@ -290,7 +290,7 @@ static int a64multi_encode_frame(AVCodecContext *avctx, unsigned char *buf,
/* fill up mc_meta_charset with data until lifetime exceeds */
if (c->mc_frame_counter < c->mc_lifetime) {
*p = *pict;
p->pict_type = FF_I_TYPE;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
to_meta_with_crop(avctx, p, meta + 32000 * c->mc_frame_counter);
c->mc_frame_counter++;
......
......@@ -226,7 +226,7 @@ static int execute_code(AVCodecContext * avctx, int c)
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->frame.pict_type = FF_I_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
erase_screen(avctx);
......@@ -323,7 +323,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->frame.pict_type = FF_I_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
memcpy(s->frame.data[1], ff_cga_palette, 16 * 4);
......
......@@ -405,7 +405,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
av_fast_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
......@@ -470,7 +470,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
init_put_bits(&a->pb, buf, buf_size);
*p = *pict;
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
for(mb_y=0; mb_y<a->mb_height2; mb_y++){
......
......@@ -3842,7 +3842,7 @@ typedef struct AVCodecParserContext {
/*!
* Set by parser to 1 for key frames and 0 for non-key frames.
* It is initialized to -1, so if the parser doesn't set this flag,
* old-style fallback using FF_I_TYPE picture type as key frames
* old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
* will be used.
*/
int key_frame;
......
......@@ -63,7 +63,7 @@ avs_decode_frame(AVCodecContext * avctx,
return -1;
}
p->reference = 1;
p->pict_type = FF_P_TYPE;
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
out = avs->picture.data[0];
......@@ -93,7 +93,7 @@ avs_decode_frame(AVCodecContext * avctx,
switch (sub_type) {
case AVS_I_FRAME:
p->pict_type = FF_I_TYPE;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
case AVS_P_FRAME_3X3:
vect_w = 3;
......
......@@ -68,7 +68,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
/* Set frame parameters and palette, if necessary */
if (!avctx->frame_number) {
bfi->frame.pict_type = FF_I_TYPE;
bfi->frame.pict_type = AV_PICTURE_TYPE_I;
bfi->frame.key_frame = 1;
/* Setting the palette */
if(avctx->extradata_size>768) {
......@@ -87,7 +87,7 @@ static int bfi_decode_frame(AVCodecContext * avctx, void *data,
}
bfi->frame.palette_has_changed = 1;
} else {
bfi->frame.pict_type = FF_P_TYPE;
bfi->frame.pict_type = AV_PICTURE_TYPE_P;
bfi->frame.key_frame = 0;
}
......
......@@ -200,7 +200,7 @@ static int bmp_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type = FF_I_TYPE;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
buf = buf0 + hsize;
......
......@@ -74,7 +74,7 @@ static int bmp_encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_s
uint8_t *ptr;
unsigned char* buf0 = buf;
*p = *pict;
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
switch (avctx->pix_fmt) {
case PIX_FMT_RGB565:
......
......@@ -137,10 +137,10 @@ static int decode_frame(AVCodecContext *avctx, void *data,
stride = newpic->linesize[0];
if (buf[0] & C93_FIRST_FRAME) {
newpic->pict_type = FF_I_TYPE;
newpic->pict_type = AV_PICTURE_TYPE_I;
newpic->key_frame = 1;
} else {
newpic->pict_type = FF_P_TYPE;
newpic->pict_type = AV_PICTURE_TYPE_P;
newpic->key_frame = 0;
}
......
......@@ -278,7 +278,7 @@ static inline void set_mv_intra(AVSContext *h) {
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->mv[MV_BWD_X0] = ff_cavs_intra_mv;
set_mvs(&h->mv[MV_BWD_X0], BLK_16X16);
if(h->pic_type != FF_B_TYPE)
if(h->pic_type != AV_PICTURE_TYPE_B)
h->col_type_base[h->mbidx] = I_8X8;
}
......
......@@ -220,7 +220,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code) {
ff_cavs_modify_mb_i(h, &pred_mode_uv);
/* get coded block pattern */
if(h->pic_type == FF_I_TYPE)
if(h->pic_type == AV_PICTURE_TYPE_I)
cbp_code = get_ue_golomb(gb);
if(cbp_code > 63){
av_log(h->s.avctx, AV_LOG_ERROR, "illegal intra cbp\n");
......@@ -424,7 +424,7 @@ static inline int decode_slice_header(AVSContext *h, GetBitContext *gb) {
h->qp = get_bits(gb,6);
}
/* inter frame or second slice can have weighting params */
if((h->pic_type != FF_I_TYPE) || (!h->pic_structure && h->mby >= h->mb_width/2))
if((h->pic_type != AV_PICTURE_TYPE_I) || (!h->pic_structure && h->mby >= h->mb_width/2))
if(get_bits1(gb)) { //slice_weighting_flag
av_log(h->s.avctx, AV_LOG_ERROR,
"weighted prediction not yet supported\n");
......@@ -470,17 +470,17 @@ static int decode_pic(AVSContext *h) {
}
skip_bits(&s->gb,16);//bbv_dwlay
if(h->stc == PIC_PB_START_CODE) {
h->pic_type = get_bits(&s->gb,2) + FF_I_TYPE;
if(h->pic_type > FF_B_TYPE) {
h->pic_type = get_bits(&s->gb,2) + AV_PICTURE_TYPE_I;
if(h->pic_type > AV_PICTURE_TYPE_B) {
av_log(s->avctx, AV_LOG_ERROR, "illegal picture type\n");
return -1;
}
/* make sure we have the reference frames we need */
if(!h->DPB[0].data[0] ||
(!h->DPB[1].data[0] && h->pic_type == FF_B_TYPE))
(!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1;
} else {
h->pic_type = FF_I_TYPE;
h->pic_type = AV_PICTURE_TYPE_I;
if(get_bits1(&s->gb))
skip_bits(&s->gb,24);//time_code
/* old sample clips were all progressive and no low_delay,
......@@ -502,7 +502,7 @@ static int decode_pic(AVSContext *h) {
h->picture.poc = get_bits(&s->gb,8)*2;
/* get temporal distances and MV scaling factors */
if(h->pic_type != FF_B_TYPE) {
if(h->pic_type != AV_PICTURE_TYPE_B) {
h->dist[0] = (h->picture.poc - h->DPB[0].poc + 512) % 512;
} else {
h->dist[0] = (h->DPB[0].poc - h->picture.poc + 512) % 512;
......@@ -510,7 +510,7 @@ static int decode_pic(AVSContext *h) {
h->dist[1] = (h->picture.poc - h->DPB[1].poc + 512) % 512;
h->scale_den[0] = h->dist[0] ? 512/h->dist[0] : 0;
h->scale_den[1] = h->dist[1] ? 512/h->dist[1] : 0;
if(h->pic_type == FF_B_TYPE) {
if(h->pic_type == AV_PICTURE_TYPE_B) {
h->sym_factor = h->dist[0]*h->scale_den[1];
} else {
h->direct_den[0] = h->dist[0] ? 16384/h->dist[0] : 0;
......@@ -529,12 +529,12 @@ static int decode_pic(AVSContext *h) {
skip_bits1(&s->gb); //repeat_first_field
h->qp_fixed = get_bits1(&s->gb);
h->qp = get_bits(&s->gb,6);
if(h->pic_type == FF_I_TYPE) {
if(h->pic_type == AV_PICTURE_TYPE_I) {
if(!h->progressive && !h->pic_structure)
skip_bits1(&s->gb);//what is this?
skip_bits(&s->gb,4); //reserved bits
} else {
if(!(h->pic_type == FF_B_TYPE && h->pic_structure == 1))
if(!(h->pic_type == AV_PICTURE_TYPE_B && h->pic_structure == 1))
h->ref_flag = get_bits1(&s->gb);
skip_bits(&s->gb,4); //reserved bits
h->skip_mode_flag = get_bits1(&s->gb);
......@@ -546,12 +546,12 @@ static int decode_pic(AVSContext *h) {
} else {
h->alpha_offset = h->beta_offset = 0;
}
if(h->pic_type == FF_I_TYPE) {
if(h->pic_type == AV_PICTURE_TYPE_I) {
do {
check_for_slice(h);
decode_mb_i(h, 0);
} while(ff_cavs_next_mb(h));
} else if(h->pic_type == FF_P_TYPE) {
} else if(h->pic_type == AV_PICTURE_TYPE_P) {
do {
if(check_for_slice(h))
skip_count = -1;
......@@ -567,7 +567,7 @@ static int decode_pic(AVSContext *h) {
decode_mb_p(h,mb_type);
}
} while(ff_cavs_next_mb(h));
} else { /* FF_B_TYPE */
} else { /* AV_PICTURE_TYPE_B */
do {
if(check_for_slice(h))
skip_count = -1;
......@@ -584,7 +584,7 @@ static int decode_pic(AVSContext *h) {
}
} while(ff_cavs_next_mb(h));
}
if(h->pic_type != FF_B_TYPE) {
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0];
......@@ -684,7 +684,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
if(decode_pic(h))
break;
*data_size = sizeof(AVPicture);
if(h->pic_type != FF_B_TYPE) {
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) {
*picture = *(AVFrame *) &h->DPB[1];
} else {
......
......@@ -64,7 +64,7 @@ static int decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
init_get_bits(&a->gb, buf, buf_size);
......@@ -100,7 +100,7 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
int size;
*p = *pict;
p->pict_type= FF_I_TYPE;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
emms_c();
......
......@@ -183,7 +183,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
// flip upside down, add difference frame
if (buf[0] & 1) { // keyframe
c->pic.pict_type = FF_I_TYPE;
c->pic.pict_type = AV_PICTURE_TYPE_I;
c->pic.key_frame = 1;
switch (c->bpp) {
case 16:
......@@ -197,7 +197,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
c->linelen, c->height);
}
} else {
c->pic.pict_type = FF_P_TYPE;
c->pic.pict_type = AV_PICTURE_TYPE_P;
c->pic.key_frame = 0;
switch (c->bpp) {
case 16:
......
......@@ -194,7 +194,7 @@ static int dirac_combine_frame(AVCodecParserContext *s, AVCodecContext *avctx,
avctx->has_b_frames = 1;
}
if (avctx->has_b_frames && s->pts == s->dts)
s->pict_type = FF_B_TYPE;
s->pict_type = AV_PICTURE_TYPE_B;
/* Finally have a complete Dirac data unit */
*buf = pc->dirac_unit;
......
......@@ -55,7 +55,7 @@ static av_cold int dnxhd_decode_init(AVCodecContext *avctx)
ctx->avctx = avctx;
dsputil_init(&ctx->dsp, avctx);
avctx->coded_frame = &ctx->picture;
ctx->picture.type = FF_I_TYPE;
ctx->picture.type = AV_PICTURE_TYPE_I;
ctx->picture.key_frame = 1;
return 0;
}
......
......@@ -222,7 +222,7 @@ static int dnxhd_encode_init(AVCodecContext *avctx)
FF_ALLOCZ_OR_GOTO(ctx->m.avctx, ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t) , fail);
ctx->frame.key_frame = 1;
ctx->frame.pict_type = FF_I_TYPE;
ctx->frame.pict_type = AV_PICTURE_TYPE_I;
ctx->m.avctx->coded_frame = &ctx->frame;
if (avctx->thread_count > MAX_THREADS) {
......
......@@ -1095,7 +1095,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx,
s->picture.reference = 0;
s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE;
s->picture.pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->time_base = s->sys->time_base;
avcodec_set_dimensions(avctx, s->sys->width, s->sys->height);
......@@ -1264,7 +1264,7 @@ static int dvvideo_encode_frame(AVCodecContext *c, uint8_t *buf, int buf_size,
c->pix_fmt = s->sys->pix_fmt;
s->picture = *((AVFrame *)data);
s->picture.key_frame = 1;
s->picture.pict_type = FF_I_TYPE;
s->picture.pict_type = AV_PICTURE_TYPE_I;
s->buf = buf;
c->execute(c, dv_encode_video_segment, s->sys->work_chunks, NULL,
......
......@@ -240,13 +240,13 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
switch(compr){
case -1:
c->pic.key_frame = 0;
c->pic.pict_type = FF_P_TYPE;
c->pic.pict_type = AV_PICTURE_TYPE_P;
if(c->prev.data[0])
memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
else{ // Should happen only when first frame is 'NULL'
memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
c->pic.key_frame = 1;
c->pic.pict_type = FF_I_TYPE;
c->pic.pict_type = AV_PICTURE_TYPE_I;
}
break;
case 2:
......@@ -254,7 +254,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 4:
case 5:
c->pic.key_frame = !(compr & 1);
c->pic.pict_type = (compr & 1) ? FF_P_TYPE : FF_I_TYPE;
c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
for(j = 0; j < avctx->height; j++){
if(compr & 1){
for(i = 0; i < avctx->width; i++)
......@@ -269,7 +269,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPac
case 12: // ScummVM coding
case 13:
c->pic.key_frame = 0;
c->pic.pict_type = FF_P_TYPE;
c->pic.pict_type = AV_PICTURE_TYPE_P;
decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
break;
default:
......
......@@ -246,7 +246,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
slice->slice_qs_delta = 0; /* XXX not implemented by Libav */
slice->slice_qp_delta = s->qscale - h->pps.init_qp;
slice->redundant_pic_cnt = h->redundant_pic_count;
if (h->slice_type == FF_B_TYPE)
if (h->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
if (h->deblocking_filter < 2)
......@@ -403,7 +403,7 @@ static int decode_slice(AVCodecContext *avctx,
position, size);
ctx_pic->slice_count++;
if (h->slice_type != FF_I_TYPE && h->slice_type != FF_SI_TYPE)
if (h->slice_type != AV_PICTURE_TYPE_I && h->slice_type != AV_PICTURE_TYPE_SI)
ctx_pic->pp.wBitFields &= ~(1 << 15); /* Set IntraPicFlag to 0 */
return 0;
}
......
......@@ -44,11 +44,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
pp->wDeblockedPictureIndex = 0;
if (s->pict_type != FF_I_TYPE)
if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else
pp->wForwardRefPictureIndex = 0xffff;
if (s->pict_type == FF_B_TYPE)
if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else
pp->wBackwardRefPictureIndex = 0xffff;
......@@ -61,8 +61,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
pp->bBPPminus1 = 7;
pp->bPicStructure = s->picture_structure;
pp->bSecondField = is_field && !s->first_field;
pp->bPicIntra = s->pict_type == FF_I_TYPE;
pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE;
pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = 0;
pp->bMVprecisionAndChromaRelation= 0; /* FIXME */
pp->bChromaFormat = s->chroma_format;
......
......@@ -42,11 +42,11 @@ static void fill_picture_parameters(AVCodecContext *avctx,
memset(pp, 0, sizeof(*pp));
pp->wDecodedPictureIndex =
pp->wDeblockedPictureIndex = ff_dxva2_get_surface_index(ctx, current_picture);
if (s->pict_type != FF_I_TYPE)
if (s->pict_type != AV_PICTURE_TYPE_I)
pp->wForwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->last_picture);
else
pp->wForwardRefPictureIndex = 0xffff;
if (s->pict_type == FF_B_TYPE)
if (s->pict_type == AV_PICTURE_TYPE_B)
pp->wBackwardRefPictureIndex = ff_dxva2_get_surface_index(ctx, &s->next_picture);
else
pp->wBackwardRefPictureIndex = 0xffff;
......@@ -69,8 +69,8 @@ static void fill_picture_parameters(AVCodecContext *avctx,
if (s->picture_structure & PICT_BOTTOM_FIELD)
pp->bPicStructure |= 0x02;
pp->bSecondField = v->interlace && v->fcm != 0x03 && !s->first_field;
pp->bPicIntra = s->pict_type == FF_I_TYPE;
pp->bPicBackwardPrediction = s->pict_type == FF_B_TYPE;
pp->bPicIntra = s->pict_type == AV_PICTURE_TYPE_I;
pp->bPicBackwardPrediction = s->pict_type == AV_PICTURE_TYPE_B;
pp->bBidirectionalAveragingMode = (1 << 7) |
((ctx->cfg->ConfigIntraResidUnsigned != 0) << 6) |
((ctx->cfg->ConfigResidDiffAccelerator != 0) << 5) |
......@@ -108,10 +108,10 @@ static void fill_picture_parameters(AVCodecContext *avctx,
(v->interlace << 5) |
(v->tfcntrflag << 4) |
(v->finterpflag << 3) |
((s->pict_type != FF_B_TYPE) << 2) |
((s->pict_type != AV_PICTURE_TYPE_B) << 2) |
(v->psf << 1) |
(v->extended_dmv );
if (s->pict_type != FF_I_TYPE)
if (s->pict_type != AV_PICTURE_TYPE_I)
pp->bPic4MVallowed = v->mv_mode == MV_PMODE_MIXED_MV ||
(v->mv_mode == MV_PMODE_INTENSITY_COMP &&
v->mv_mode2 == MV_PMODE_MIXED_MV);
......
......@@ -180,10 +180,10 @@ static int cmv_decode_frame(AVCodecContext *avctx,
if ((buf[0]&1)) { // subtype
cmv_decode_inter(s, buf+2, buf_end);
s->frame.key_frame = 0;
s->frame.pict_type = FF_P_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_P;
}else{
s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_I;
cmv_decode_intra(s, buf+2, buf_end);
}
......
......@@ -218,7 +218,7 @@ static int tgq_decode_frame(AVCodecContext *avctx,
if (!s->frame.data[0]) {
s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (avctx->get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
......
......@@ -300,7 +300,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
if(chunk_type==kVGT_TAG) {
s->frame.key_frame = 1;
s->frame.pict_type = FF_I_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_I;
if (unpack(buf, buf_end, s->frame.data[0], s->avctx->width, s->avctx->height)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated intra frame\n");
return -1;
......@@ -311,7 +311,7 @@ static int tgv_decode_frame(AVCodecContext *avctx,
return buf_size;
}
s->frame.key_frame = 0;
s->frame.pict_type = FF_P_TYPE;
s->frame.pict_type = AV_PICTURE_TYPE_P;
if (tgv_decode_inter(s, buf, buf_end)<0) {
av_log(avctx, AV_LOG_WARNING, "truncated inter frame\n");
return -1;
......
......@@ -639,7 +639,7 @@ static int is_intra_more_likely(MpegEncContext *s){
if(undamaged_count < 5) return 0; //almost all MBs damaged -> use temporal prediction
//prevent dsp.sad() check, that requires access to the image
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == FF_I_TYPE)
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration && s->pict_type == AV_PICTURE_TYPE_I)
return 1;
skip_amount= FFMAX(undamaged_count/50, 1); //check only upto 50 MBs
......@@ -658,7 +658,7 @@ static int is_intra_more_likely(MpegEncContext *s){
j++;
if((j%skip_amount) != 0) continue; //skip a few to speed things up
if(s->pict_type==FF_I_TYPE){
if(s->pict_type==AV_PICTURE_TYPE_I){
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
......@@ -972,7 +972,7 @@ void ff_er_frame_end(MpegEncContext *s){
}
/* guess MVs */
if(s->pict_type==FF_B_TYPE){
if(s->pict_type==AV_PICTURE_TYPE_B){
for(mb_y=0; mb_y<s->mb_height; mb_y++){
for(mb_x=0; mb_x<s->mb_width; mb_x++){
int xy= mb_x*2 + mb_y*2*s->b8_stride;
......@@ -1107,7 +1107,7 @@ ec_clean:
const int mb_xy= s->mb_index2xy[i];
int error= s->error_status_table[mb_xy];