Commit efd29844 authored by Martin Storsjö's avatar Martin Storsjö

mpegvideo: Add ff_ prefix to nonstatic functions

Signed-off-by: default avatarMartin Storsjö <martin@martin.st>
parent 0ca1bdb3
......@@ -103,7 +103,7 @@ static void dct_unquantize_h263_inter_axp(MpegEncContext *s, DCTELEM *block,
dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
}
void MPV_common_init_axp(MpegEncContext *s)
void ff_MPV_common_init_axp(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;
......
......@@ -38,17 +38,17 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, DCTELEM *block,
void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, DCTELEM *block,
int n, int qscale);
void MPV_common_init_arm(MpegEncContext *s)
void ff_MPV_common_init_arm(MpegEncContext *s)
{
/* IWMMXT support is a superset of armv5te, so
* allow optimized functions for armv5te unless
* a better iwmmxt function exists
*/
#if HAVE_ARMV5TE
MPV_common_init_armv5te(s);
ff_MPV_common_init_armv5te(s);
#endif
#if HAVE_IWMMXT
MPV_common_init_iwmmxt(s);
ff_MPV_common_init_iwmmxt(s);
#endif
if (HAVE_NEON) {
......
......@@ -21,7 +21,7 @@
#include "libavcodec/mpegvideo.h"
void MPV_common_init_iwmmxt(MpegEncContext *s);
void MPV_common_init_armv5te(MpegEncContext *s);
void ff_MPV_common_init_iwmmxt(MpegEncContext *s);
void ff_MPV_common_init_armv5te(MpegEncContext *s);
#endif /* AVCODEC_ARM_MPEGVIDEO_H */
......@@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
}
void MPV_common_init_armv5te(MpegEncContext *s)
void ff_MPV_common_init_armv5te(MpegEncContext *s)
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
......
......@@ -93,7 +93,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
block_orig[0] = level;
}
void MPV_common_init_iwmmxt(MpegEncContext *s)
void ff_MPV_common_init_iwmmxt(MpegEncContext *s)
{
if (!(mm_flags & AV_CPU_FLAG_IWMMXT)) return;
......
......@@ -141,7 +141,7 @@ static int dct_quantize_bfin (MpegEncContext *s,
return last_non_zero;
}
void MPV_common_init_bfin (MpegEncContext *s)
void ff_MPV_common_init_bfin (MpegEncContext *s)
{
/* s->dct_quantize= dct_quantize_bfin; */
}
......@@ -671,7 +671,7 @@ av_cold int ff_cavs_init(AVCodecContext *avctx) {
AVSContext *h = avctx->priv_data;
MpegEncContext * const s = &h->s;
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
ff_cavsdsp_init(&h->cdsp, avctx);
s->avctx = avctx;
......
......@@ -469,7 +469,7 @@ static int decode_pic(AVSContext *h) {
if (!s->context_initialized) {
s->avctx->idct_algo = FF_IDCT_CAVS;
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
ff_init_scantable(s->dsp.idct_permutation,&h->scantable,ff_zigzag_direct);
}
......
......@@ -66,7 +66,7 @@ static void decode_mb(MpegEncContext *s, int ref)
ff_h264_hl_decode_mb(h);
} else {
assert(ref == 0);
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
}
}
......
......@@ -89,9 +89,9 @@ AVCodec ff_flv_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_FLV1,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
};
......@@ -76,7 +76,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx){
MpegEncContext * const s = &h->s;
// set defaults
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->avctx = avctx;
s->width = s->avctx->coded_width;
......@@ -221,7 +221,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mb_skipped = 1;
h->mtype &= ~MB_TYPE_H261_FIL;
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
}
return 0;
......@@ -349,7 +349,7 @@ intra:
s->block_last_index[i]= -1;
}
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
return SLICE_OK;
}
......@@ -565,7 +565,7 @@ retry:
init_get_bits(&s->gb, buf, buf_size*8);
if(!s->context_initialized){
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
......@@ -588,7 +588,7 @@ retry:
if (s->width != avctx->coded_width || s->height != avctx->coded_height){
ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
s->parse_context.buffer=0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
......@@ -606,7 +606,7 @@ retry:
|| avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size);
if(MPV_frame_start(s, avctx) < 0)
if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
......@@ -620,7 +620,7 @@ retry:
break;
h261_decode_gob(h);
}
MPV_frame_end(s);
ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
......@@ -637,7 +637,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx)
H261Context *h= avctx->priv_data;
MpegEncContext *s = &h->s;
MPV_common_end(s);
ff_MPV_common_end(s);
return 0;
}
......
......@@ -326,9 +326,9 @@ AVCodec ff_h261_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_H261,
.priv_data_size = sizeof(H261Context),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
};
......@@ -54,7 +54,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
s->workaround_bugs= avctx->workaround_bugs;
// set defaults
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->quant_precision=5;
s->decode_mb= ff_h263_decode_mb;
s->low_delay= 1;
......@@ -110,7 +110,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
/* for h263, we allocate the images after having read the header */
if (avctx->codec->id != CODEC_ID_H263 && avctx->codec->id != CODEC_ID_MPEG4)
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
ff_h263_decode_init_vlc(s);
......@@ -122,7 +122,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
MPV_common_end(s);
ff_MPV_common_end(s);
return 0;
}
......@@ -220,7 +220,7 @@ static int decode_slice(MpegEncContext *s){
if(ret<0){
const int xy= s->mb_x + s->mb_y*s->mb_stride;
if(ret==SLICE_END){
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
......@@ -232,7 +232,7 @@ static int decode_slice(MpegEncContext *s){
if(++s->mb_x >= s->mb_width){
s->mb_x=0;
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_y++;
}
return 0;
......@@ -247,13 +247,13 @@ static int decode_slice(MpegEncContext *s){
return -1;
}
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if(s->loop_filter)
ff_h263_loop_filter(s);
}
ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_x= 0;
}
......@@ -390,7 +390,7 @@ retry:
s->bitstream_buffer_size=0;
if (!s->context_initialized) {
if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
if (ff_MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
return -1;
}
......@@ -572,7 +572,7 @@ retry:
/* H.263 could change picture size any time */
ParseContext pc= s->parse_context; //FIXME move these demuxng hack to avformat
s->parse_context.buffer=0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context= pc;
}
if (!s->context_initialized) {
......@@ -613,7 +613,7 @@ retry:
s->me.qpel_avg= s->dsp.avg_qpel_pixels_tab;
}
if(MPV_frame_start(s, avctx) < 0)
if(ff_MPV_frame_start(s, avctx) < 0)
return -1;
if (!s->divx_packed) ff_thread_finish_setup(avctx);
......@@ -631,7 +631,7 @@ retry:
ff_er_frame_start(s);
//the second part of the wmv2 header contains the MB skip bits which are stored in current_picture->mb_type
//which is not available before MPV_frame_start()
//which is not available before ff_MPV_frame_start()
if (CONFIG_WMV2_DECODER && s->msmpeg4_version==5){
ret = ff_wmv2_decode_secondary_picture_header(s);
if(ret<0) return ret;
......@@ -707,7 +707,7 @@ intrax8_decoded:
return -1;
}
MPV_frame_end(s);
ff_MPV_frame_end(s);
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
assert(s->current_picture.f.pict_type == s->pict_type);
......
......@@ -1100,7 +1100,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx){
MpegEncContext * const s = &h->s;
int i;
MPV_decode_defaults(s);
ff_MPV_decode_defaults(s);
s->avctx = avctx;
common_init(h);
......@@ -1281,11 +1281,11 @@ int ff_h264_frame_start(H264Context *h){
int i;
const int pixel_shift = h->pixel_shift;
if(MPV_frame_start(s, s->avctx) < 0)
if(ff_MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
/*
* MPV_frame_start uses pict_type to derive key_frame.
* ff_MPV_frame_start uses pict_type to derive key_frame.
* This is incorrect for H.264; IDR markings must be used.
* Zero here; IDR markings per slice in frame or fields are ORed in later.
* See decode_nal_units().
......@@ -1319,7 +1319,7 @@ int ff_h264_frame_start(H264Context *h){
// We mark the current picture as non-reference after allocating it, so
// that if we break out due to an error it can be released automatically
// in the next MPV_frame_start().
// in the next ff_MPV_frame_start().
// SVQ3 as well as most other codecs have only last/next/current and thus
// get released even with set reference, besides SVQ3 and others do not
// mark frames as reference later "naturally".
......@@ -2562,7 +2562,7 @@ static int field_end(H264Context *h, int in_setup){
if (!FIELD_PICTURE)
ff_er_frame_end(s);
MPV_frame_end(s);
ff_MPV_frame_end(s);
h->current_slice=0;
......@@ -2625,7 +2625,7 @@ int ff_h264_get_profile(SPS *sps)
/**
* Decode a slice header.
* This will also call MPV_common_init() and frame_start() as needed.
* This will also call ff_MPV_common_init() and frame_start() as needed.
*
* @param h h264context
* @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
......@@ -2734,7 +2734,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
}
free_tables(h, 0);
flush_dpb(s->avctx);
MPV_common_end(s);
ff_MPV_common_end(s);
}
if (!s->context_initialized) {
if (h != h0) {
......@@ -2806,8 +2806,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0){
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
if (MPV_common_init(s) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "MPV_common_init() failed.\n");
if (ff_MPV_common_init(s) < 0) {
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
return -1;
}
s->first_field = 0;
......@@ -4119,7 +4119,7 @@ av_cold int ff_h264_decode_end(AVCodecContext *avctx)
ff_h264_free_context(h);
MPV_common_end(s);
ff_MPV_common_end(s);
// memset(h, 0, sizeof(H264Context));
......
......@@ -191,8 +191,8 @@ AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need t
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_LJPEG,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.init = ff_MPV_encode_init,
.encode = encode_picture_lossless,
.close = MPV_encode_end,
.close = ff_MPV_encode_end,
.long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
};
......@@ -80,7 +80,7 @@ static void dct_unquantize_h263_mmi(MpegEncContext *s,
}
void MPV_common_init_mmi(MpegEncContext *s)
void ff_MPV_common_init_mmi(MpegEncContext *s)
{
s->dct_unquantize_h263_intra =
s->dct_unquantize_h263_inter = dct_unquantize_h263_mmi;
......
......@@ -450,9 +450,9 @@ AVCodec ff_mjpeg_encoder = {
.type = AVMEDIA_TYPE_VIDEO,
.id = CODEC_ID_MJPEG,
.priv_data_size = sizeof(MpegEncContext),
.init = MPV_encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.init = ff_MPV_encode_init,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
};
......@@ -1117,7 +1117,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
for (i = 0; i < 64; i++)
s2->dsp.idct_permutation[i]=i;
MPV_decode_defaults(s2);
ff_MPV_decode_defaults(s2);
s->mpeg_enc_ctx.avctx = avctx;
s->mpeg_enc_ctx.flags = avctx->flags;
......@@ -1219,7 +1219,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
if (s1->mpeg_enc_ctx_allocated) {
ParseContext pc = s->parse_context;
s->parse_context.buffer = 0;
MPV_common_end(s);
ff_MPV_common_end(s);
s->parse_context = pc;
}
......@@ -1298,7 +1298,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
* if DCT permutation is changed. */
memcpy(old_permutation, s->dsp.idct_permutation, 64 * sizeof(uint8_t));
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -2;
quant_matrix_rebuild(s->intra_matrix, old_permutation, s->dsp.idct_permutation);
......@@ -1563,7 +1563,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
/* start frame decoding */
if (s->first_field || s->picture_structure == PICT_FRAME) {
if (MPV_frame_start(s, avctx) < 0)
if (ff_MPV_frame_start(s, avctx) < 0)
return -1;
ff_er_frame_start(s);
......@@ -1753,13 +1753,13 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
MPV_decode_mb(s, s->block);
ff_MPV_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) {
const int mb_size = 16 >> s->avctx->lowres;
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
MPV_report_decode_progress(s);
ff_MPV_report_decode_progress(s);
s->mb_x = 0;
s->mb_y += 1 << field_pic;
......@@ -1912,7 +1912,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
ff_er_frame_end(s);
MPV_frame_end(s);
ff_MPV_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
*pict = *(AVFrame*)s->current_picture_ptr;
......@@ -2022,7 +2022,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
/* start new MPEG-1 context decoding */
s->out_format = FMT_MPEG1;
if (s1->mpeg_enc_ctx_allocated) {
MPV_common_end(s);
ff_MPV_common_end(s);
}
s->width = avctx->coded_width;
s->height = avctx->coded_height;
......@@ -2037,7 +2037,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
if (avctx->idct_algo == FF_IDCT_AUTO)
avctx->idct_algo = FF_IDCT_SIMPLE;
if (MPV_common_init(s) < 0)
if (ff_MPV_common_init(s) < 0)
return -1;
exchange_uv(s); // common init reset pblocks, so we swap them here
s->swap_uv = 1; // in case of xvmc we need to swap uv for each MB
......@@ -2478,7 +2478,7 @@ static int mpeg_decode_end(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data;
if (s->mpeg_enc_ctx_allocated)
MPV_common_end(&s->mpeg_enc_ctx);
ff_MPV_common_end(&s->mpeg_enc_ctx);
return 0;
}
......
......@@ -131,7 +131,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
if(MPV_encode_init(avctx) < 0)
if(ff_MPV_encode_init(avctx) < 0)
return -1;
if(find_frame_rate_index(s) < 0){
......@@ -954,8 +954,8 @@ AVCodec ff_mpeg1video_encoder = {
.id = CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
......@@ -969,8 +969,8 @@ AVCodec ff_mpeg2video_encoder = {
.id = CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.supported_framerates= avpriv_frame_rate_tab+1,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
......
......@@ -1222,7 +1222,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
int ret;
static int done = 0;
if((ret=MPV_encode_init(avctx)) < 0)
if((ret=ff_MPV_encode_init(avctx)) < 0)
return ret;
if (!done) {
......@@ -1336,8 +1336,8 @@ AVCodec ff_mpeg4_encoder = {
.id = CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
.encode = MPV_encode_picture,
.close = MPV_encode_end,
.encode = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
......
......@@ -188,17 +188,17 @@ av_cold int ff_dct_common_init(MpegEncContext *s)
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
#if HAVE_MMX
MPV_common_init_mmx(s);
ff_MPV_common_init_mmx(s);
#elif ARCH_ALPHA
MPV_common_init_axp(s);
ff_MPV_common_init_axp(s);
#elif HAVE_MMI
MPV_common_init_mmi(s);
ff_MPV_common_init_mmi(s);
#elif ARCH_ARM
MPV_common_init_arm(s);
ff_MPV_common_init_arm(s);
#elif HAVE_ALTIVEC
MPV_common_init_altivec(s);
ff_MPV_common_init_altivec(s);
#elif ARCH_BFIN
MPV_common_init_bfin(s);
ff_MPV_common_init_bfin(s);
#endif
/* load & permutate scantables
......@@ -458,7 +458,7 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
return 0;
fail:
return -1; // free() through MPV_common_end()
return -1; // free() through ff_MPV_common_end()
}
static void free_duplicate_context(MpegEncContext *s)
......@@ -543,7 +543,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->bitstream_buffer = NULL;
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
MPV_common_init(s);
ff_MPV_common_init(s);
}
s->avctx->coded_height = s1->avctx->coded_height;
......@@ -615,7 +615,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
* The changed fields will not depend upon the
* prior state of the MpegEncContext.
*/
void MPV_common_defaults(MpegEncContext *s)
void ff_MPV_common_defaults(MpegEncContext *s)
{
s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
......@@ -644,16 +644,16 @@ void MPV_common_defaults(MpegEncContext *s)
* the changed fields will not depend upon
* the prior state of the MpegEncContext.
*/
void MPV_decode_defaults(MpegEncContext *s)
void ff_MPV_decode_defaults(MpegEncContext *s)
{
MPV_common_defaults(s);
ff_MPV_common_defaults(s);
}
/**
* init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set
*/
av_cold int MPV_common_init(MpegEncContext *s)
av_cold int ff_MPV_common_init(MpegEncContext *s)
{
int y_size, c_