本文整理汇总了C++中FF_CEIL_RSHIFT函数的典型用法代码示例。如果您正苦于以下问题:C++ FF_CEIL_RSHIFT函数的具体用法?C++ FF_CEIL_RSHIFT怎么用?C++ FF_CEIL_RSHIFT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FF_CEIL_RSHIFT函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: config_input
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
MaskedMergeContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int vsub, hsub;
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
hsub = desc->log2_chroma_w;
vsub = desc->log2_chroma_h;
s->height[1] = s->height[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
s->height[0] = s->height[3] = inlink->h;
s->width[1] = s->width[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
s->width[0] = s->width[3] = inlink->w;
s->depth = desc->comp[0].depth;
s->max = 1 << s->depth;
s->half = s->max / 2;
if (desc->comp[0].depth == 8)
s->maskedmerge = maskedmerge8;
else
s->maskedmerge = maskedmerge16;
return 0;
}
开发者ID:ChristianFrisson,项目名称:FFmpeg,代码行数:27,代码来源:vf_maskedmerge.c
示例2: chr_h_scale
static int chr_h_scale(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, int sliceH)
{
FilterContext *instance = desc->instance;
int srcW = FF_CEIL_RSHIFT(desc->src->width, desc->src->h_chr_sub_sample);
int dstW = FF_CEIL_RSHIFT(desc->dst->width, desc->dst->h_chr_sub_sample);
int xInc = instance->xInc;
uint8_t ** src1 = desc->src->plane[1].line;
uint8_t ** dst1 = desc->dst->plane[1].line;
uint8_t ** src2 = desc->src->plane[2].line;
uint8_t ** dst2 = desc->dst->plane[2].line;
int src_pos1 = sliceY - desc->src->plane[1].sliceY;
int dst_pos1 = sliceY - desc->dst->plane[1].sliceY;
int src_pos2 = sliceY - desc->src->plane[2].sliceY;
int dst_pos2 = sliceY - desc->dst->plane[2].sliceY;
int i;
for (i = 0; i < sliceH; ++i) {
if (c->hcscale_fast) {
c->hcscale_fast(c, (uint16_t*)dst1[dst_pos1+i], (uint16_t*)dst2[dst_pos2+i], dstW, src1[src_pos1+i], src2[src_pos2+i], srcW, xInc);
} else {
c->hcScale(c, (uint16_t*)dst1[dst_pos1+i], dstW, src1[src_pos1+i], instance->filter, instance->filter_pos, instance->filter_size);
c->hcScale(c, (uint16_t*)dst2[dst_pos2+i], dstW, src2[src_pos2+i], instance->filter, instance->filter_pos, instance->filter_size);
}
if (c->chrConvertRange)
c->chrConvertRange((uint16_t*)dst1[dst_pos1+i], (uint16_t*)dst2[dst_pos2+i], dstW);
desc->dst->plane[1].sliceH += 1;
desc->dst->plane[2].sliceH += 1;
}
return sliceH;
}
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:35,代码来源:hscale.c
示例3: config_input
static int config_input(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
ATADenoiseContext *s = ctx->priv;
int depth;
s->nb_planes = desc->nb_components;
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
depth = desc->comp[0].depth;
if (depth == 8)
s->filter_slice = filter_slice8;
else
s->filter_slice = filter_slice16;
s->thra[0] = s->fthra[0] * (1 << depth) - 1;
s->thra[1] = s->fthra[1] * (1 << depth) - 1;
s->thra[2] = s->fthra[2] * (1 << depth) - 1;
s->thrb[0] = s->fthrb[0] * (1 << depth) - 1;
s->thrb[1] = s->fthrb[1] * (1 << depth) - 1;
s->thrb[2] = s->fthrb[2] * (1 << depth) - 1;
return 0;
}
开发者ID:zhenghuadai,项目名称:FFmpeg,代码行数:29,代码来源:vf_atadenoise.c
示例4: filter_slice_chroma
static int filter_slice_chroma(AVFilterContext *ctx, void *arg, int jobnr,
int nb_jobs)
{
FadeContext *s = ctx->priv;
AVFrame *frame = arg;
int i, j, plane;
const int width = FF_CEIL_RSHIFT(frame->width, s->hsub);
const int height= FF_CEIL_RSHIFT(frame->height, s->vsub);
int slice_start = (height * jobnr ) / nb_jobs;
int slice_end = (height * (jobnr+1)) / nb_jobs;
for (plane = 1; plane < 3; plane++) {
for (i = slice_start; i < slice_end; i++) {
uint8_t *p = frame->data[plane] + i * frame->linesize[plane];
for (j = 0; j < width; j++) {
/* 8421367 = ((128 << 1) + 1) << 15. It is an integer
* representation of 128.5. The .5 is for rounding
* purposes. */
*p = ((*p - 128) * s->factor + 8421367) >> 16;
p++;
}
}
}
return 0;
}
开发者ID:309746069,项目名称:FFmpeg,代码行数:26,代码来源:vf_fade.c
示例5: config_input
static int config_input(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
VectorscopeContext *s = inlink->dst->priv;
if (s->mode == GRAY)
s->pd = 0;
else {
if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
s->pd = 0;
else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))
s->pd = 1;
else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))
s->pd = 2;
}
switch (inlink->format) {
case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRP:
s->bg_color = black_gbrp_color;
break;
default:
s->bg_color = black_yuva_color;
}
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
return 0;
}
开发者ID:quanxinglong,项目名称:FFmpeg,代码行数:32,代码来源:vf_vectorscope.c
示例6: filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
PixdescTestContext *priv = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out;
int i, c, w = inlink->w, h = inlink->h;
const int cw = FF_CEIL_RSHIFT(w, priv->pix_desc->log2_chroma_w);
const int ch = FF_CEIL_RSHIFT(h, priv->pix_desc->log2_chroma_h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
for (i = 0; i < 4; i++) {
const int h1 = i == 1 || i == 2 ? ch : h;
if (out->data[i]) {
uint8_t *data = out->data[i] +
(out->linesize[i] > 0 ? 0 : out->linesize[i] * (h1-1));
memset(data, 0, FFABS(out->linesize[i]) * h1);
}
}
/* copy palette */
if (priv->pix_desc->flags & AV_PIX_FMT_FLAG_PAL ||
priv->pix_desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
memcpy(out->data[1], in->data[1], AVPALETTE_SIZE);
for (c = 0; c < priv->pix_desc->nb_components; c++) {
const int w1 = c == 1 || c == 2 ? cw : w;
const int h1 = c == 1 || c == 2 ? ch : h;
for (i = 0; i < h1; i++) {
av_read_image_line(priv->line,
(void*)in->data,
in->linesize,
priv->pix_desc,
0, i, c, w1, 0);
av_write_image_line(priv->line,
out->data,
out->linesize,
priv->pix_desc,
0, i, c, w1);
}
}
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
开发者ID:markjreed,项目名称:vice-emu,代码行数:53,代码来源:vf_pixdesctest.c
示例7: config_input_ref
static int config_input_ref(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
PSNRContext *s = ctx->priv;
int j;
s->nb_components = desc->nb_components;
if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
ctx->inputs[0]->h != ctx->inputs[1]->h) {
av_log(ctx, AV_LOG_ERROR, "Width and heigth of input videos must be same.\n");
return AVERROR(EINVAL);
}
if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
return AVERROR(EINVAL);
}
switch (inlink->format) {
case AV_PIX_FMT_YUV410P:
case AV_PIX_FMT_YUV411P:
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
case AV_PIX_FMT_YUV440P:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUVA420P:
case AV_PIX_FMT_YUVA422P:
case AV_PIX_FMT_YUVA444P:
s->max[0] = 235;
s->max[3] = 255;
s->max[1] = s->max[2] = 240;
break;
default:
s->max[0] = s->max[1] = s->max[2] = s->max[3] = 255;
}
s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
s->comps[0] = s->is_rgb ? 'r' : 'y' ;
s->comps[1] = s->is_rgb ? 'g' : 'u' ;
s->comps[2] = s->is_rgb ? 'b' : 'v' ;
s->comps[3] = 'a';
for (j = 0; j < s->nb_components; j++)
s->average_max += s->max[j];
s->average_max /= s->nb_components;
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
return 0;
}
开发者ID:StephanieSpanjian,项目名称:FFmpeg,代码行数:53,代码来源:vf_psnr.c
示例8: filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFrame *out;
int hsub0 = desc->log2_chroma_w;
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
AVRational sar;
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
sar = in->sample_aspect_ratio;
/* Assume square pixels if SAR is unknown */
if (!sar.num)
sar.num = sar.den = 1;
for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? hsub0 : 0;
int vsub = plane == 1 || plane == 2 ? vsub0 : 0;
apply_delogo(out->data[plane], out->linesize[plane],
in ->data[plane], in ->linesize[plane],
FF_CEIL_RSHIFT(inlink->w, hsub),
FF_CEIL_RSHIFT(inlink->h, vsub),
sar, s->x>>hsub, s->y>>vsub,
/* Up and left borders were rounded down, inject lost bits
* into width and height to avoid error accumulation */
FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub),
FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub),
s->band>>FFMIN(hsub, vsub),
s->show, direct);
}
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
开发者ID:VFR-maniac,项目名称:ffmpeg,代码行数:52,代码来源:vf_delogo.c
示例9: copy_picture_field
/**
* Copy picture field from src to dst.
*
* @param src_field copy from upper, lower field or both
* @param interleave leave a padding line between each copied line
* @param dst_field copy to upper or lower field,
* only meaningful when interleave is selected
* @param flags context flags
*/
static inline
void copy_picture_field(TInterlaceContext *tinterlace,
uint8_t *dst[4], int dst_linesize[4],
const uint8_t *src[4], int src_linesize[4],
enum AVPixelFormat format, int w, int src_h,
int src_field, int interleave, int dst_field,
int flags)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(format);
int plane, vsub = desc->log2_chroma_h;
int k = src_field == FIELD_UPPER_AND_LOWER ? 1 : 2;
int h;
for (plane = 0; plane < desc->nb_components; plane++) {
int lines = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT(src_h, vsub) : src_h;
int cols = plane == 1 || plane == 2 ? FF_CEIL_RSHIFT( w, desc->log2_chroma_w) : w;
int linesize = av_image_get_linesize(format, w, plane);
uint8_t *dstp = dst[plane];
const uint8_t *srcp = src[plane];
if (linesize < 0)
return;
lines = (lines + (src_field == FIELD_UPPER)) / k;
if (src_field == FIELD_LOWER)
srcp += src_linesize[plane];
if (interleave && dst_field == FIELD_LOWER)
dstp += dst_linesize[plane];
if (flags & TINTERLACE_FLAG_VLPF) {
// Low-pass filtering is required when creating an interlaced destination from
// a progressive source which contains high-frequency vertical detail.
// Filtering will reduce interlace 'twitter' and Moire patterning.
int srcp_linesize = src_linesize[plane] * k;
int dstp_linesize = dst_linesize[plane] * (interleave ? 2 : 1);
for (h = lines; h > 0; h--) {
const uint8_t *srcp_above = srcp - src_linesize[plane];
const uint8_t *srcp_below = srcp + src_linesize[plane];
if (h == lines) srcp_above = srcp; // there is no line above
if (h == 1) srcp_below = srcp; // there is no line below
tinterlace->lowpass_line(dstp, cols, srcp, srcp_above, srcp_below);
dstp += dstp_linesize;
srcp += srcp_linesize;
}
} else {
av_image_copy_plane(dstp, dst_linesize[plane] * (interleave ? 2 : 1),
srcp, src_linesize[plane]*k, linesize, lines);
}
}
}
开发者ID:Brainiarc7,项目名称:ffmpeg-nvenc-plain,代码行数:59,代码来源:vf_tinterlace.c
示例10: config_props
static int config_props(AVFilterLink *inlink)
{
FlipContext *s = inlink->dst->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
const int hsub = pix_desc->log2_chroma_w;
const int vsub = pix_desc->log2_chroma_h;
av_image_fill_max_pixsteps(s->max_step, NULL, pix_desc);
s->planewidth[0] = s->planewidth[3] = inlink->w;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, hsub);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, vsub);
return 0;
}
开发者ID:markjreed,项目名称:vice-emu,代码行数:15,代码来源:vf_hflip.c
示例11: config_input_ref
static int config_input_ref(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterContext *ctx = inlink->dst;
PSNRContext *s = ctx->priv;
unsigned sum;
int j;
s->nb_components = desc->nb_components;
if (ctx->inputs[0]->w != ctx->inputs[1]->w ||
ctx->inputs[0]->h != ctx->inputs[1]->h) {
av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n");
return AVERROR(EINVAL);
}
if (ctx->inputs[0]->format != ctx->inputs[1]->format) {
av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n");
return AVERROR(EINVAL);
}
s->max[0] = (1 << (desc->comp[0].depth_minus1 + 1)) - 1;
s->max[1] = (1 << (desc->comp[1].depth_minus1 + 1)) - 1;
s->max[2] = (1 << (desc->comp[2].depth_minus1 + 1)) - 1;
s->max[3] = (1 << (desc->comp[3].depth_minus1 + 1)) - 1;
s->is_rgb = ff_fill_rgba_map(s->rgba_map, inlink->format) >= 0;
s->comps[0] = s->is_rgb ? 'r' : 'y' ;
s->comps[1] = s->is_rgb ? 'g' : 'u' ;
s->comps[2] = s->is_rgb ? 'b' : 'v' ;
s->comps[3] = 'a';
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
sum = 0;
for (j = 0; j < s->nb_components; j++)
sum += s->planeheight[j] * s->planewidth[j];
for (j = 0; j < s->nb_components; j++) {
s->planeweight[j] = (double) s->planeheight[j] * s->planewidth[j] / sum;
s->average_max += s->max[j] * s->planeweight[j];
}
s->dsp.sse_line = desc->comp[0].depth_minus1 > 7 ? sse_line_16bit : sse_line_8bit;
if (ARCH_X86)
ff_psnr_init_x86(&s->dsp, desc->comp[0].depth_minus1 + 1);
return 0;
}
开发者ID:nanflower,项目名称:FFmpeg,代码行数:48,代码来源:vf_psnr.c
示例12: smv_img_pnt
static inline void smv_img_pnt(uint8_t *dst_data[4], uint8_t *src_data[4],
const int src_linesizes[4],
enum PixelFormat pix_fmt, int width, int height,
int nlines)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int i, planes_nb = 0;
if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return;
for (i = 0; i < desc->nb_components; i++)
planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
for (i = 0; i < planes_nb; i++) {
int h = height;
if (i == 1 || i == 2) {
h = FF_CEIL_RSHIFT(height, desc->log2_chroma_h);
}
smv_img_pnt_plane(&dst_data[i], src_data[i],
src_linesizes[i], h, nlines);
}
if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
dst_data[1] = src_data[1];
}
开发者ID:r-type,项目名称:vice-libretro,代码行数:26,代码来源:smvjpegdec.c
示例13: chr_convert
static int chr_convert(SwsContext *c, SwsFilterDescriptor *desc, int sliceY, int sliceH)
{
int srcW = FF_CEIL_RSHIFT(desc->src->width, desc->src->h_chr_sub_sample);
ColorContext * instance = desc->instance;
uint32_t * pal = instance->pal;
int sp0 = (sliceY - (desc->src->plane[0].sliceY >> desc->src->v_chr_sub_sample)) << desc->src->v_chr_sub_sample;
int sp1 = sliceY - desc->src->plane[1].sliceY;
int i;
desc->dst->plane[1].sliceY = sliceY;
desc->dst->plane[1].sliceH = sliceH;
desc->dst->plane[2].sliceY = sliceY;
desc->dst->plane[2].sliceH = sliceH;
for (i = 0; i < sliceH; ++i) {
const uint8_t * src[4] = { desc->src->plane[0].line[sp0+i],
desc->src->plane[1].line[sp1+i],
desc->src->plane[2].line[sp1+i],
desc->src->plane[3].line[sp0+i]};
uint8_t * dst1 = desc->dst->plane[1].line[i];
uint8_t * dst2 = desc->dst->plane[2].line[i];
if (c->chrToYV12) {
c->chrToYV12(dst1, dst2, src[0], src[1], src[2], srcW, pal);
} else if (c->readChrPlanar) {
c->readChrPlanar(dst1, dst2, src, srcW, c->input_rgb2yuv_table);
}
}
return sliceH;
}
开发者ID:LittleKey,项目名称:FFmpeg,代码行数:32,代码来源:hscale.c
示例14: config_input
static int config_input(AVFilterLink *inlink)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
VectorscopeContext *s = inlink->dst->priv;
s->is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB);
s->size = 1 << desc->comp[0].depth;
s->mult = s->size / 256;
if (s->mode == GRAY && s->is_yuv)
s->pd = 0;
else {
if ((s->x == 1 && s->y == 2) || (s->x == 2 && s->y == 1))
s->pd = 0;
else if ((s->x == 0 && s->y == 2) || (s->x == 2 && s->y == 0))
s->pd = 1;
else if ((s->x == 0 && s->y == 1) || (s->x == 1 && s->y == 0))
s->pd = 2;
}
if (s->size == 256)
s->vectorscope = vectorscope8;
else
s->vectorscope = vectorscope16;
switch (inlink->format) {
case AV_PIX_FMT_GBRP10:
case AV_PIX_FMT_GBRP9:
case AV_PIX_FMT_GBRAP:
case AV_PIX_FMT_GBRP:
s->bg_color = black_gbrp_color;
break;
default:
s->bg_color = black_yuva_color;
}
s->hsub = desc->log2_chroma_w;
s->vsub = desc->log2_chroma_h;
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
return 0;
}
开发者ID:Crawping,项目名称:chromium_extract,代码行数:45,代码来源:vf_vectorscope.c
示例15: config_out_props
static int config_out_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
TInterlaceContext *tinterlace = ctx->priv;
tinterlace->vsub = desc->log2_chroma_h;
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
outlink->w = inlink->w;
outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
inlink->h*2 : inlink->h;
if (tinterlace->mode == MODE_PAD) {
uint8_t black[4] = { 16, 128, 128, 16 };
int i, ret;
if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
black[0] = black[3] = 0;
ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
outlink->w, outlink->h, outlink->format, 1);
if (ret < 0)
return ret;
/* fill black picture with black */
for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
memset(tinterlace->black_data[i], black[i],
tinterlace->black_linesize[i] * h);
}
}
if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
&& !(tinterlace->mode == MODE_INTERLEAVE_TOP
|| tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
tinterlace->mode);
tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
}
if (tinterlace->mode == MODE_INTERLACEX2) {
outlink->time_base.num = inlink->time_base.num;
outlink->time_base.den = inlink->time_base.den * 2;
outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
} else if (tinterlace->mode != MODE_PAD) {
outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
}
if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
tinterlace->lowpass_line = lowpass_line_c;
if (ARCH_X86)
ff_tinterlace_init_x86(tinterlace);
}
av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
inlink->h, outlink->h);
return 0;
}
开发者ID:Brainiarc7,项目名称:ffmpeg-nvenc-plain,代码行数:57,代码来源:vf_tinterlace.c
示例16: filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
HQDN3DContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int direct, c;
if (av_frame_is_writable(in) && !ctx->is_disabled) {
direct = 1;
out = in;
} else {
direct = 0;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
for (c = 0; c < 3; c++) {
denoise(s, in->data[c], out->data[c],
s->line, &s->frame_prev[c],
FF_CEIL_RSHIFT(in->width, (!!c * s->hsub)),
FF_CEIL_RSHIFT(in->height, (!!c * s->vsub)),
in->linesize[c], out->linesize[c],
s->coefs[c ? CHROMA_SPATIAL : LUMA_SPATIAL],
s->coefs[c ? CHROMA_TMP : LUMA_TMP]);
}
if (ctx->is_disabled) {
av_frame_free(&out);
return ff_filter_frame(outlink, in);
}
if (!direct)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
开发者ID:18565773346,项目名称:android-h264-decoder,代码行数:43,代码来源:vf_hqdn3d.c
示例17: request_frame
static int request_frame(AVFilterLink *outlink)
{
MPTestContext *test = outlink->src->priv;
AVFrame *picref;
int w = WIDTH, h = HEIGHT,
cw = FF_CEIL_RSHIFT(w, test->hsub), ch = FF_CEIL_RSHIFT(h, test->vsub);
unsigned int frame = outlink->frame_count;
enum test_type tt = test->test;
int i;
if (test->max_pts >= 0 && test->pts > test->max_pts)
return AVERROR_EOF;
picref = ff_get_video_buffer(outlink, w, h);
if (!picref)
return AVERROR(ENOMEM);
picref->pts = test->pts++;
// clean image
for (i = 0; i < h; i++)
memset(picref->data[0] + i*picref->linesize[0], 0, w);
for (i = 0; i < ch; i++) {
memset(picref->data[1] + i*picref->linesize[1], 128, cw);
memset(picref->data[2] + i*picref->linesize[2], 128, cw);
}
if (tt == TEST_ALL && frame%30) /* draw a black frame at the beginning of each test */
tt = (frame/30)%(TEST_NB-1);
switch (tt) {
case TEST_DC_LUMA: dc_test(picref->data[0], picref->linesize[0], 256, 256, frame%30); break;
case TEST_DC_CHROMA: dc_test(picref->data[1], picref->linesize[1], 256, 256, frame%30); break;
case TEST_FREQ_LUMA: freq_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_FREQ_CHROMA: freq_test(picref->data[1], picref->linesize[1], frame%30); break;
case TEST_AMP_LUMA: amp_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_AMP_CHROMA: amp_test(picref->data[1], picref->linesize[1], frame%30); break;
case TEST_CBP: cbp_test(picref->data , picref->linesize , frame%30); break;
case TEST_MV: mv_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_RING1: ring1_test(picref->data[0], picref->linesize[0], frame%30); break;
case TEST_RING2: ring2_test(picref->data[0], picref->linesize[0], frame%30); break;
}
return ff_filter_frame(outlink, picref);
}
开发者ID:r-type,项目名称:vice-libretro,代码行数:43,代码来源:vsrc_mptestsrc.c
示例18: process_frame
static int process_frame(FFFrameSync *fs)
{
AVFilterContext *ctx = fs->parent;
AVFilterLink *outlink = ctx->outputs[0];
StackContext *s = fs->opaque;
AVFrame **in = s->frames;
AVFrame *out;
int i, p, ret, offset[4] = { 0 };
for (i = 0; i < s->nb_inputs; i++) {
if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0)
return ret;
}
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base);
for (i = 0; i < s->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
int linesize[4];
int height[4];
if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) {
av_frame_free(&out);
return ret;
}
height[1] = height[2] = FF_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h);
height[0] = height[3] = inlink->h;
for (p = 0; p < s->nb_planes; p++) {
if (s->is_vertical) {
av_image_copy_plane(out->data[p] + offset[p] * out->linesize[p],
out->linesize[p],
in[i]->data[p],
in[i]->linesize[p],
linesize[p], height[p]);
offset[p] += height[p];
} else {
av_image_copy_plane(out->data[p] + offset[p],
out->linesize[p],
in[i]->data[p],
in[i]->linesize[p],
linesize[p], height[p]);
offset[p] += linesize[p];
}
}
}
return ff_filter_frame(outlink, out);
}
开发者ID:LinuxCao,项目名称:ffmpeg-2.8.4-for-x86-linux,代码行数:53,代码来源:vf_stack.c
示例19: config_input
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
PullupContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int mp = s->metric_plane;
s->nb_planes = av_pix_fmt_count_planes(inlink->format);
if (mp + 1 > s->nb_planes) {
av_log(ctx, AV_LOG_ERROR, "input format does not have such plane\n");
return AVERROR(EINVAL);
}
s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = FF_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
s->metric_w = (s->planewidth[mp] - ((s->junk_left + s->junk_right) << 3)) >> 3;
s->metric_h = (s->planeheight[mp] - ((s->junk_top + s->junk_bottom) << 1)) >> 3;
s->metric_offset = (s->junk_left << 3) + (s->junk_top << 1) * s->planewidth[mp];
s->metric_length = s->metric_w * s->metric_h;
av_log(ctx, AV_LOG_DEBUG, "w: %d h: %d\n", s->metric_w, s->metric_h);
av_log(ctx, AV_LOG_DEBUG, "offset: %d length: %d\n", s->metric_offset, s->metric_length);
s->head = make_field_queue(s, 8);
if (!s->head)
return AVERROR(ENOMEM);
s->diff = diff_c;
s->comb = comb_c;
s->var = var_c;
if (ARCH_X86)
ff_pullup_init_x86(s);
return 0;
}
开发者ID:Bjelijah,项目名称:EcamTurnH265,代码行数:39,代码来源:vf_pullup.c
示例20: get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int ret, i;
if (!desc)
return AVERROR(EINVAL);
if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
return ret;
if (!frame->linesize[0]) {
for(i=1; i<=align; i+=i) {
ret = av_image_fill_linesizes(frame->linesize, frame->format,
FFALIGN(frame->width, i));
if (ret < 0)
return ret;
if (!(frame->linesize[0] & (align-1)))
break;
}
for (i = 0; i < 4 && frame->linesize[i]; i++)
frame->linesize[i] = FFALIGN(frame->linesize[i], align);
}
for (i = 0; i < 4 && frame->linesize[i]; i++) {
int h = FFALIGN(frame->height, 32);
if (i == 1 || i == 2)
h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
if (!frame->buf[i])
goto fail;
frame->data[i] = frame->buf[i]->data;
}
if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
av_buffer_unref(&frame->buf[1]);
frame->buf[1] = av_buffer_alloc(1024);
if (!frame->buf[1])
goto fail;
frame->data[1] = frame->buf[1]->data;
}
frame->extended_data = frame->data;
return 0;
fail:
av_frame_unref(frame);
return AVERROR(ENOMEM);
}
开发者ID:venkatarajasekhar,项目名称:Qt,代码行数:51,代码来源:frame.c
注:本文中的FF_CEIL_RSHIFT函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论