Commit 59ee9f78 authored by Anton Khirnov's avatar Anton Khirnov
Browse files

lavfi: do not use av_pix_fmt_descriptors directly.

parent 50ba57e0
...@@ -214,7 +214,7 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end) ...@@ -214,7 +214,7 @@ void ff_dlog_link(void *ctx, AVFilterLink *link, int end)
av_dlog(ctx, av_dlog(ctx,
"link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s", "link[%p s:%dx%d fmt:%-16s %-16s->%-16s]%s",
link, link->w, link->h, link, link->w, link->h,
av_pix_fmt_descriptors[link->format].name, av_get_pix_fmt_name(link->format),
link->src ? link->src->filter->name : "", link->src ? link->src->filter->name : "",
link->dst ? link->dst->filter->name : "", link->dst ? link->dst->filter->name : "",
end ? "\n" : ""); end ? "\n" : "");
......
...@@ -185,7 +185,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args) ...@@ -185,7 +185,7 @@ static av_cold int init_video(AVFilterContext *ctx, const char *args)
if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*)))) if (!(c->fifo = av_fifo_alloc(sizeof(AVFilterBufferRef*))))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name); av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt));
return 0; return 0;
} }
......
...@@ -32,7 +32,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t ...@@ -32,7 +32,7 @@ int ff_fill_line_with_color(uint8_t *line[4], int pixel_step[4], int w, uint8_t
{ {
uint8_t rgba_map[4] = {0}; uint8_t rgba_map[4] = {0};
int i; int i;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(pix_fmt);
int hsub = pix_desc->log2_chroma_w; int hsub = pix_desc->log2_chroma_w;
*is_packed_rgba = 1; *is_packed_rgba = 1;
......
...@@ -89,7 +89,7 @@ int main(int argc, char **argv) ...@@ -89,7 +89,7 @@ int main(int argc, char **argv)
for (j = 0; j < fmts->format_count; j++) for (j = 0; j < fmts->format_count; j++)
printf("INPUT[%d] %s: %s\n", printf("INPUT[%d] %s: %s\n",
i, filter_ctx->filter->inputs[i].name, i, filter_ctx->filter->inputs[i].name,
av_pix_fmt_descriptors[fmts->formats[j]].name); av_get_pix_fmt_name(fmts->formats[j]));
} }
/* print the supported formats in output */ /* print the supported formats in output */
...@@ -98,7 +98,7 @@ int main(int argc, char **argv) ...@@ -98,7 +98,7 @@ int main(int argc, char **argv)
for (j = 0; j < fmts->format_count; j++) for (j = 0; j < fmts->format_count; j++)
printf("OUTPUT[%d] %s: %s\n", printf("OUTPUT[%d] %s: %s\n",
i, filter_ctx->filter->outputs[i].name, i, filter_ctx->filter->outputs[i].name,
av_pix_fmt_descriptors[fmts->formats[j]].name); av_get_pix_fmt_name(fmts->formats[j]));
} }
avfilter_free(filter_ctx); avfilter_free(filter_ctx);
......
...@@ -213,10 +213,12 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type) ...@@ -213,10 +213,12 @@ AVFilterFormats *ff_all_formats(enum AVMediaType type)
int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB : int num_formats = type == AVMEDIA_TYPE_VIDEO ? AV_PIX_FMT_NB :
type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0; type == AVMEDIA_TYPE_AUDIO ? AV_SAMPLE_FMT_NB : 0;
for (fmt = 0; fmt < num_formats; fmt++) for (fmt = 0; fmt < num_formats; fmt++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
if ((type != AVMEDIA_TYPE_VIDEO) || if ((type != AVMEDIA_TYPE_VIDEO) ||
(type == AVMEDIA_TYPE_VIDEO && !(av_pix_fmt_descriptors[fmt].flags & PIX_FMT_HWACCEL))) (type == AVMEDIA_TYPE_VIDEO && !(desc->flags & PIX_FMT_HWACCEL)))
ff_add_format(&ret, fmt); ff_add_format(&ret, fmt);
}
return ret; return ret;
} }
......
...@@ -139,7 +139,7 @@ static int query_formats(AVFilterContext *ctx) ...@@ -139,7 +139,7 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink) static int config_input(AVFilterLink *inlink)
{ {
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
BoxBlurContext *boxblur = ctx->priv; BoxBlurContext *boxblur = ctx->priv;
int w = inlink->w, h = inlink->h; int w = inlink->w, h = inlink->h;
......
...@@ -158,7 +158,7 @@ static int config_input(AVFilterLink *link) ...@@ -158,7 +158,7 @@ static int config_input(AVFilterLink *link)
{ {
AVFilterContext *ctx = link->dst; AVFilterContext *ctx = link->dst;
CropContext *crop = ctx->priv; CropContext *crop = ctx->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[link->format]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(link->format);
int ret; int ret;
const char *expr; const char *expr;
double res; double res;
...@@ -177,8 +177,8 @@ static int config_input(AVFilterLink *link) ...@@ -177,8 +177,8 @@ static int config_input(AVFilterLink *link)
crop->var_values[VAR_POS] = NAN; crop->var_values[VAR_POS] = NAN;
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc); av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
crop->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; crop->hsub = pix_desc->log2_chroma_w;
crop->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; crop->vsub = pix_desc->log2_chroma_h;
if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr), if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
var_names, crop->var_values, var_names, crop->var_values,
...@@ -248,6 +248,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -248,6 +248,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
AVFilterContext *ctx = link->dst; AVFilterContext *ctx = link->dst;
CropContext *crop = ctx->priv; CropContext *crop = ctx->priv;
AVFilterBufferRef *ref2; AVFilterBufferRef *ref2;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i; int i;
ref2 = avfilter_ref_buffer(picref, ~0); ref2 = avfilter_ref_buffer(picref, ~0);
...@@ -281,8 +282,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -281,8 +282,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
ref2->data[0] += crop->y * ref2->linesize[0]; ref2->data[0] += crop->y * ref2->linesize[0];
ref2->data[0] += crop->x * crop->max_step[0]; ref2->data[0] += crop->x * crop->max_step[0];
if (!(av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PAL || if (!(desc->flags & PIX_FMT_PAL || desc->flags & PIX_FMT_PSEUDOPAL)) {
av_pix_fmt_descriptors[link->format].flags & PIX_FMT_PSEUDOPAL)) {
for (i = 1; i < 3; i ++) { for (i = 1; i < 3; i ++) {
if (ref2->data[i]) { if (ref2->data[i]) {
ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i]; ref2->data[i] += (crop->y >> crop->vsub) * ref2->linesize[i];
......
...@@ -107,7 +107,7 @@ static int config_input(AVFilterLink *inlink) ...@@ -107,7 +107,7 @@ static int config_input(AVFilterLink *inlink)
CropDetectContext *cd = ctx->priv; CropDetectContext *cd = ctx->priv;
av_image_fill_max_pixsteps(cd->max_pixsteps, NULL, av_image_fill_max_pixsteps(cd->max_pixsteps, NULL,
&av_pix_fmt_descriptors[inlink->format]); av_pix_fmt_desc_get(inlink->format));
cd->x1 = inlink->w - 1; cd->x1 = inlink->w - 1;
cd->y1 = inlink->h - 1; cd->y1 = inlink->h - 1;
......
...@@ -226,9 +226,10 @@ static int end_frame(AVFilterLink *inlink) ...@@ -226,9 +226,10 @@ static int end_frame(AVFilterLink *inlink)
AVFilterLink *outlink = inlink->dst->outputs[0]; AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *inpicref = inlink ->cur_buf; AVFilterBufferRef *inpicref = inlink ->cur_buf;
AVFilterBufferRef *outpicref = outlink->out_buf; AVFilterBufferRef *outpicref = outlink->out_buf;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int direct = inpicref->buf == outpicref->buf; int direct = inpicref->buf == outpicref->buf;
int hsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; int hsub0 = desc->log2_chroma_w;
int vsub0 = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; int vsub0 = desc->log2_chroma_h;
int plane; int plane;
int ret; int ret;
......
...@@ -81,9 +81,10 @@ static int query_formats(AVFilterContext *ctx) ...@@ -81,9 +81,10 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink) static int config_input(AVFilterLink *inlink)
{ {
DrawBoxContext *drawbox = inlink->dst->priv; DrawBoxContext *drawbox = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
drawbox->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; drawbox->hsub = desc->log2_chroma_w;
drawbox->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; drawbox->vsub = desc->log2_chroma_h;
if (drawbox->w == 0) drawbox->w = inlink->w; if (drawbox->w == 0) drawbox->w = inlink->w;
if (drawbox->h == 0) drawbox->h = inlink->h; if (drawbox->h == 0) drawbox->h = inlink->h;
......
...@@ -569,7 +569,7 @@ static int config_input(AVFilterLink *inlink) ...@@ -569,7 +569,7 @@ static int config_input(AVFilterLink *inlink)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
DrawTextContext *dtext = ctx->priv; DrawTextContext *dtext = ctx->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
int ret; int ret;
dtext->hsub = pix_desc->log2_chroma_w; dtext->hsub = pix_desc->log2_chroma_w;
......
...@@ -89,7 +89,7 @@ static int query_formats(AVFilterContext *ctx) ...@@ -89,7 +89,7 @@ static int query_formats(AVFilterContext *ctx)
static int config_props(AVFilterLink *inlink) static int config_props(AVFilterLink *inlink)
{ {
FadeContext *fade = inlink->dst->priv; FadeContext *fade = inlink->dst->priv;
const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(inlink->format);
fade->hsub = pixdesc->log2_chroma_w; fade->hsub = pixdesc->log2_chroma_w;
fade->vsub = pixdesc->log2_chroma_h; fade->vsub = pixdesc->log2_chroma_h;
......
...@@ -78,15 +78,16 @@ static int query_formats(AVFilterContext *ctx) ...@@ -78,15 +78,16 @@ static int query_formats(AVFilterContext *ctx)
* a bitstream format, and does not have vertically sub-sampled chroma */ * a bitstream format, and does not have vertically sub-sampled chroma */
if (ctx->inputs[0]) { if (ctx->inputs[0]) {
formats = NULL; formats = NULL;
for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) for (pix_fmt = 0; pix_fmt < AV_PIX_FMT_NB; pix_fmt++) {
if (!( av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_HWACCEL const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
|| av_pix_fmt_descriptors[pix_fmt].flags & PIX_FMT_BITSTREAM) if (!(desc->flags & PIX_FMT_HWACCEL ||
&& av_pix_fmt_descriptors[pix_fmt].nb_components desc->flags & PIX_FMT_BITSTREAM) &&
&& !av_pix_fmt_descriptors[pix_fmt].log2_chroma_h desc->nb_components && !desc->log2_chroma_h &&
&& (ret = ff_add_format(&formats, pix_fmt)) < 0) { (ret = ff_add_format(&formats, pix_fmt)) < 0) {
ff_formats_unref(&formats); ff_formats_unref(&formats);
return ret; return ret;
} }
}
ff_formats_ref(formats, &ctx->inputs[0]->out_formats); ff_formats_ref(formats, &ctx->inputs[0]->out_formats);
ff_formats_ref(formats, &ctx->outputs[0]->in_formats); ff_formats_ref(formats, &ctx->outputs[0]->in_formats);
} }
......
...@@ -167,8 +167,9 @@ static int query_formats(AVFilterContext *ctx) ...@@ -167,8 +167,9 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink) static int config_input(AVFilterLink *inlink)
{ {
GradFunContext *gf = inlink->dst->priv; GradFunContext *gf = inlink->dst->priv;
int hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; int hsub = desc->log2_chroma_w;
int vsub = desc->log2_chroma_h;
gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t)); gf->buf = av_mallocz((FFALIGN(inlink->w, 16) * (gf->radius + 1) / 2 + 32) * sizeof(uint16_t));
if (!gf->buf) if (!gf->buf)
......
...@@ -75,11 +75,11 @@ static int query_formats(AVFilterContext *ctx) ...@@ -75,11 +75,11 @@ static int query_formats(AVFilterContext *ctx)
static int config_props(AVFilterLink *inlink) static int config_props(AVFilterLink *inlink)
{ {
FlipContext *flip = inlink->dst->priv; FlipContext *flip = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc); av_image_fill_max_pixsteps(flip->max_step, NULL, pix_desc);
flip->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; flip->hsub = pix_desc->log2_chroma_w;
flip->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; flip->vsub = pix_desc->log2_chroma_h;
return 0; return 0;
} }
......
...@@ -295,11 +295,12 @@ static int query_formats(AVFilterContext *ctx) ...@@ -295,11 +295,12 @@ static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink) static int config_input(AVFilterLink *inlink)
{ {
HQDN3DContext *hqdn3d = inlink->dst->priv; HQDN3DContext *hqdn3d = inlink->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int i; int i;
hqdn3d->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; hqdn3d->hsub = desc->log2_chroma_w;
hqdn3d->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; hqdn3d->vsub = desc->log2_chroma_h;
hqdn3d->depth = av_pix_fmt_descriptors[inlink->format].comp[0].depth_minus1+1; hqdn3d->depth = desc->comp[0].depth_minus1+1;
hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line)); hqdn3d->line = av_malloc(inlink->w * sizeof(*hqdn3d->line));
if (!hqdn3d->line) if (!hqdn3d->line)
......
...@@ -212,7 +212,7 @@ static int config_props(AVFilterLink *inlink) ...@@ -212,7 +212,7 @@ static int config_props(AVFilterLink *inlink)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
LutContext *lut = ctx->priv; LutContext *lut = ctx->priv;
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int min[4], max[4]; int min[4], max[4];
int val, comp, ret; int val, comp, ret;
......
...@@ -113,7 +113,7 @@ static int query_formats(AVFilterContext *ctx) ...@@ -113,7 +113,7 @@ static int query_formats(AVFilterContext *ctx)
static int config_input_main(AVFilterLink *inlink) static int config_input_main(AVFilterLink *inlink)
{ {
OverlayContext *over = inlink->dst->priv; OverlayContext *over = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
av_image_fill_max_pixsteps(over->max_plane_step, NULL, pix_desc); av_image_fill_max_pixsteps(over->max_plane_step, NULL, pix_desc);
over->hsub = pix_desc->log2_chroma_w; over->hsub = pix_desc->log2_chroma_w;
...@@ -158,10 +158,10 @@ static int config_input_overlay(AVFilterLink *inlink) ...@@ -158,10 +158,10 @@ static int config_input_overlay(AVFilterLink *inlink)
av_log(ctx, AV_LOG_VERBOSE, av_log(ctx, AV_LOG_VERBOSE,
"main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n", "main w:%d h:%d fmt:%s overlay x:%d y:%d w:%d h:%d fmt:%s\n",
ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h, ctx->inputs[MAIN]->w, ctx->inputs[MAIN]->h,
av_pix_fmt_descriptors[ctx->inputs[MAIN]->format].name, av_get_pix_fmt_name(ctx->inputs[MAIN]->format),
over->x, over->y, over->x, over->y,
ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h, ctx->inputs[OVERLAY]->w, ctx->inputs[OVERLAY]->h,
av_pix_fmt_descriptors[ctx->inputs[OVERLAY]->format].name); av_get_pix_fmt_name(ctx->inputs[OVERLAY]->format));
if (over->x < 0 || over->y < 0 || if (over->x < 0 || over->y < 0 ||
over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] || over->x + var_values[VAR_OVERLAY_W] > var_values[VAR_MAIN_W] ||
......
...@@ -144,7 +144,7 @@ static int config_input(AVFilterLink *inlink) ...@@ -144,7 +144,7 @@ static int config_input(AVFilterLink *inlink)
{ {
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
PadContext *pad = ctx->priv; PadContext *pad = ctx->priv;
const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format]; const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
uint8_t rgba_color[4]; uint8_t rgba_color[4];
int ret, is_packed_rgba; int ret, is_packed_rgba;
double var_values[VARS_NB], res; double var_values[VARS_NB], res;
......
...@@ -44,7 +44,7 @@ static int config_props(AVFilterLink *inlink) ...@@ -44,7 +44,7 @@ static int config_props(AVFilterLink *inlink)
{ {
PixdescTestContext *priv = inlink->dst->priv; PixdescTestContext *priv = inlink->dst->priv;
priv->pix_desc = &av_pix_fmt_descriptors[inlink->format]; priv->pix_desc = av_pix_fmt_desc_get(inlink->format);
if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w))) if (!(priv->line = av_malloc(sizeof(*priv->line) * inlink->w)))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
......
...@@ -155,6 +155,7 @@ static int config_props(AVFilterLink *outlink) ...@@ -155,6 +155,7 @@ static int config_props(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0]; AVFilterLink *inlink = outlink->src->inputs[0];
ScaleContext *scale = ctx->priv; ScaleContext *scale = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int64_t w, h; int64_t w, h;
double var_values[VARS_NB], res; double var_values[VARS_NB], res;
char *expr; char *expr;
...@@ -170,8 +171,8 @@ static int config_props(AVFilterLink *outlink) ...@@ -170,8 +171,8 @@ static int config_props(AVFilterLink *outlink)
var_values[VAR_DAR] = var_values[VAR_A] = (double) inlink->w / inlink->h; var_values[VAR_DAR] = var_values[VAR_A] = (double) inlink->w / inlink->h;
var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
(double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
var_values[VAR_HSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_w; var_values[VAR_HSUB] = 1 << desc->log2_chroma_w;
var_values[VAR_VSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_h; var_values[VAR_VSUB] = 1 << desc->log2_chroma_h;
/* evaluate width and height */ /* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = scale->w_expr), av_expr_parse_and_eval(&res, (expr = scale->w_expr),
...@@ -220,12 +221,12 @@ static int config_props(AVFilterLink *outlink) ...@@ -220,12 +221,12 @@ static int config_props(AVFilterLink *outlink)
/* TODO: make algorithm configurable */ /* TODO: make algorithm configurable */
av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n", av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d fmt:%s flags:0x%0x\n",
inlink ->w, inlink ->h, av_pix_fmt_descriptors[ inlink->format].name, inlink ->w, inlink ->h, av_get_pix_fmt_name(inlink->format),
outlink->w, outlink->h, av_pix_fmt_descriptors[outlink->format].name, outlink->w, outlink->h, av_get_pix_fmt_name(outlink->format),
scale->flags); scale->flags);
scale->input_is_pal = av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PAL || scale->input_is_pal = desc->flags & PIX_FMT_PAL ||
av_pix_fmt_descriptors[inlink->format].flags & PIX_FMT_PSEUDOPAL; desc->flags & PIX_FMT_PSEUDOPAL;
if (scale->sws) if (scale->sws)
sws_freeContext(scale->sws); sws_freeContext(scale->sws);
...@@ -261,6 +262,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -261,6 +262,7 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
ScaleContext *scale = link->dst->priv; ScaleContext *scale = link->dst->priv;
AVFilterLink *outlink = link->dst->outputs[0]; AVFilterLink *outlink = link->dst->outputs[0];
AVFilterBufferRef *outpicref, *for_next_filter; AVFilterBufferRef *outpicref, *for_next_filter;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int ret = 0; int ret = 0;
if (!scale->sws) { if (!scale->sws) {
...@@ -270,8 +272,8 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref) ...@@ -270,8 +272,8 @@ static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
return ff_start_frame(outlink, outpicref); return ff_start_frame(outlink, outpicref);
} }
scale->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w; scale->hsub = desc->log2_chroma_w;
scale->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h; scale->vsub = desc->log2_chroma_h;
outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h); outpicref = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
if (!outpicref) if (!outpicref)
......
...@@ -46,8 +46,9 @@ static int end_frame(AVFilterLink *inlink) ...@@ -46,8 +46,9 @@ static int end_frame(AVFilterLink *inlink)
AVFilterContext *ctx = inlink->dst; AVFilterContext *ctx = inlink->dst;
ShowInfoContext *showinfo = ctx->priv; ShowInfoContext *showinfo = ctx->priv;
AVFilterBufferRef *picref = inlink->cur_buf; AVFilterBufferRef *picref = inlink->cur_buf;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
uint32_t plane_checksum[4] = {0}, checksum = 0; uint32_t plane_checksum[4] = {0}, checksum = 0;
int i, plane, vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; int i, plane, vsub = desc->log2_chroma_h;
for (plane = 0; picref->data[plane] && plane < 4; plane++) { for (plane = 0; picref->data[plane] && plane < 4; plane++) {
size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane); size_t linesize = av_image_get_linesize(picref->format, picref->video->w, plane);
...@@ -67,7 +68,7 @@ static int end_frame(AVFilterLink *inlink) ...@@ -67,7 +68,7 @@ static int end_frame(AVFilterLink *inlink)
"checksum:%u plane_checksum:[%u %u %u %u]\n", "checksum:%u plane_checksum:[%u %u %u %u]\n",
showinfo->frame, showinfo->frame,
picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos, picref->pts, picref->pts * av_q2d(inlink->time_base), picref->pos,
av_pix_fmt_descriptors[picref->format].name, desc->name,
picref->video->pixel_aspect.num, picref->video->pixel_aspect.den, picref->video->pixel_aspect.num, picref->video->pixel_aspect.den,
picref->video->w, picref->video->h, picref->video->w, picref->video->h,
!picref->video->interlaced ? 'P' : /* Progressive */ !picref->video->interlaced ? 'P' : /* Progressive */
......
...@@ -54,8 +54,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args) ...@@ -54,8 +54,9 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
static int config_props(AVFilterLink *link) static int config_props(AVFilterLink *link)
{ {
SliceContext *slice = link->dst->priv; SliceContext *slice = link->dst->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
slice->vshift = av_pix_fmt_descriptors[link->format].log2_chroma_h; slice->vshift = desc->log2_chroma_h;
return 0; return 0;
} }
......
...@@ -98,12 +98,13 @@ static int config_props_output(AVFilterLink *outlink) ...@@ -98,12 +98,13 @@ static int config_props_output(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
TransContext *trans = ctx->priv; TransContext *trans = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *inlink = ctx->inputs[0];
const AVPixFmtDescriptor *pixdesc = &av_pix_fmt_descriptors[outlink->format]; const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
trans->hsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_w; trans->hsub = desc_in->log2_chroma_w;
trans->vsub = av_pix_fmt_descriptors[inlink->format].log2_chroma_h; trans->vsub = desc_in->log2_chroma_h;
av_image_fill_max_pixsteps(trans->pixsteps, NULL, pixdesc); av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out);
outlink->w = inlink->h; outlink->w = inlink->h;
outlink->h = inlink->w; outlink->h