本文整理汇总了C++中FFALIGN函数的典型用法代码示例。如果您正苦于以下问题:C++ FFALIGN函数的具体用法?C++ FFALIGN怎么用?C++ FFALIGN使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FFALIGN函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
enum AVSampleFormat sample_fmt, int align)
{
int line_size;
int sample_size = av_get_bytes_per_sample(sample_fmt);
int planar = av_sample_fmt_is_planar(sample_fmt);
/* validate parameter ranges */
if (!sample_size || nb_samples <= 0 || nb_channels <= 0)
return AVERROR(EINVAL);
/* check for integer overflow */
if (nb_channels > INT_MAX / align ||
(int64_t)nb_channels * nb_samples > (INT_MAX - (align * nb_channels)) / sample_size)
return AVERROR(EINVAL);
line_size = planar ? FFALIGN(nb_samples * sample_size, align) :
FFALIGN(nb_samples * sample_size * nb_channels, align);
if (linesize)
*linesize = line_size;
return planar ? line_size * nb_channels : line_size;
}
开发者ID:mcodegeeks,项目名称:OpenKODE-Framework,代码行数:23,代码来源:samplefmt.cpp
示例2: dxv_init
static int dxv_init(AVCodecContext *avctx)
{
DXVContext *ctx = avctx->priv_data;
int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
avctx->width, avctx->height);
return ret;
}
/* Codec requires 16x16 alignment. */
avctx->coded_width = FFALIGN(avctx->width, 16);
avctx->coded_height = FFALIGN(avctx->height, 16);
ff_texturedsp_init(&ctx->texdsp);
avctx->pix_fmt = AV_PIX_FMT_RGBA;
ctx->slice_count = av_clip(avctx->thread_count, 1,
avctx->coded_height / TEXTURE_BLOCK_H);
return 0;
}
开发者ID:Rodeo314,项目名称:tim-libav,代码行数:23,代码来源:dxv.c
示例3: backward_filter
/**
* Backward synthesis filter, find the LPC coefficients from past speech data.
*/
static void backward_filter(RA288Context *ractx,
float *hist, float *rec, const float *window,
float *lpc, const float *tab,
int order, int n, int non_rec, int move_size)
{
float temp[MAX_BACKWARD_FILTER_ORDER+1];
do_hybrid_window(ractx, order, n, non_rec, temp, hist, rec, window);
if (!compute_lpc_coefs(temp, order, lpc, 0, 1, 1))
ractx->fdsp->vector_fmul(lpc, lpc, tab, FFALIGN(order, 16));
memmove(hist, hist + n, move_size*sizeof(*hist));
}
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:17,代码来源:ra288.c
示例4: ffmal_copy_frame
static int ffmal_copy_frame(AVCodecContext *avctx, AVFrame *frame,
MMAL_BUFFER_HEADER_T *buffer)
{
MMALDecodeContext *ctx = avctx->priv_data;
int ret = 0;
if (avctx->pix_fmt == AV_PIX_FMT_MMAL) {
if (!ctx->pool_out)
return AVERROR_UNKNOWN; // format change code failed with OOM previously
if ((ret = ff_decode_frame_props(avctx, frame)) < 0)
goto done;
if ((ret = ffmmal_set_ref(frame, ctx->pool_out, buffer)) < 0)
goto done;
} else {
int w = FFALIGN(avctx->width, 32);
int h = FFALIGN(avctx->height, 16);
uint8_t *src[4];
int linesize[4];
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
goto done;
av_image_fill_arrays(src, linesize,
buffer->data + buffer->type->video.offset[0],
avctx->pix_fmt, w, h, 1);
av_image_copy(frame->data, frame->linesize, src, linesize,
avctx->pix_fmt, avctx->width, avctx->height);
}
frame->pkt_pts = buffer->pts == MMAL_TIME_UNKNOWN ? AV_NOPTS_VALUE : buffer->pts;
frame->pkt_dts = AV_NOPTS_VALUE;
done:
return ret;
}
开发者ID:0xheart0,项目名称:FFmpeg,代码行数:37,代码来源:mmaldec.c
示例5: msrle_decode_frame
static int msrle_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MsrleContext *s = avctx->priv_data;
int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;
s->buf = buf;
s->size = buf_size;
s->frame.reference = 1;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
if (s->avctx->palctrl) {
/* make the palette available */
memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
if (s->avctx->palctrl->palette_changed) {
s->frame.palette_has_changed = 1;
s->avctx->palctrl->palette_changed = 0;
}
}
/* FIXME how to correctly detect RLE ??? */
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
int linesize = avctx->width * avctx->bits_per_coded_sample / 8;
uint8_t *ptr = s->frame.data[0];
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
int i, j;
for (i = 0; i < avctx->height; i++) {
if (avctx->bits_per_coded_sample == 4) {
for (j = 0; j < avctx->width - 1; j += 2) {
ptr[j+0] = buf[j>>1] >> 4;
ptr[j+1] = buf[j>>1] & 0xF;
}
if (avctx->width & 1)
ptr[j+0] = buf[j>>1] >> 4;
} else {
memcpy(ptr, buf, linesize);
}
buf -= istride;
ptr += s->frame.linesize[0];
}
开发者ID:hhool,项目名称:tcpmp-android,代码行数:49,代码来源:msrle.c
示例6: w64_read_header
static int w64_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
int64_t size;
ByteIOContext *pb = s->pb;
WAVContext *wav = s->priv_data;
AVStream *st;
uint8_t guid[16];
get_buffer(pb, guid, 16);
if (memcmp(guid, guid_riff, 16))
return -1;
if (get_le64(pb) < 16 + 8 + 16 + 8 + 16 + 8) /* riff + wave + fmt + sizes */
return -1;
get_buffer(pb, guid, 16);
if (memcmp(guid, guid_wave, 16)) {
av_log(s, AV_LOG_ERROR, "could not find wave guid\n");
return -1;
}
size = find_guid(pb, guid_fmt);
if (size < 0) {
av_log(s, AV_LOG_ERROR, "could not find fmt guid\n");
return -1;
}
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
/* subtract chunk header size - normal wav file doesn't count it */
ff_get_wav_header(pb, st->codec, size - 24);
url_fskip(pb, FFALIGN(size, INT64_C(8)) - size);
st->need_parsing = AVSTREAM_PARSE_FULL;
av_set_pts_info(st, 64, 1, st->codec->sample_rate);
size = find_guid(pb, guid_data);
if (size < 0) {
av_log(s, AV_LOG_ERROR, "could not find data guid\n");
return -1;
}
wav->data_end = url_ftell(pb) + size - 24;
wav->w64 = 1;
return 0;
}
开发者ID:Akuaksh,项目名称:FFmpeg-alsenc,代码行数:49,代码来源:wav.c
示例7: play
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
af_resample_t *s = af->setup;
int ret;
int8_t *in = (int8_t*)data->audio;
int8_t *out;
int chans = data->nch;
int in_len = data->len;
int out_len = in_len * af->mul + 10;
if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
return NULL;
av_fast_malloc(&s->tmp[0], &s->tmp_alloc, FFALIGN(out_len,32));
if(s->tmp[0] == NULL) return NULL;
out= (int8_t*)af->data->audio;
out_len= FFMIN(out_len, af->data->len);
av_fast_malloc(&s->in[0], &s->in_alloc, FFALIGN(in_len,32));
if(s->in[0] == NULL) return NULL;
memcpy(s->in[0], in, in_len);
ret = swr_convert(s->swrctx, &s->tmp[0], out_len/chans/2, &s->in[0], in_len/chans/2);
if (ret < 0) return NULL;
out_len= ret*chans*2;
memcpy(out, s->tmp[0], out_len);
data->audio = af->data->audio;
data->len = out_len;
data->rate = af->data->rate;
return data;
}
开发者ID:basinilya,项目名称:mplayer,代码行数:37,代码来源:af_lavcresample.c
示例8: codec_reinit
static int codec_reinit(AVCodecContext *avctx, int width, int height,
int quality)
{
NuvContext *c = avctx->priv_data;
int ret;
width = FFALIGN(width, 2);
height = FFALIGN(height, 2);
if (quality >= 0)
get_quant_quality(c, quality);
if (width != c->width || height != c->height) {
// also reserve space for a possible additional header
int buf_size = height * width * 3 / 2
+ FFMAX(AV_LZO_OUTPUT_PADDING, FF_INPUT_BUFFER_PADDING_SIZE)
+ RTJPEG_HEADER_SIZE;
if (buf_size > INT_MAX/8)
return -1;
if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
return ret;
avctx->width = c->width = width;
avctx->height = c->height = height;
av_fast_malloc(&c->decomp_buf, &c->decomp_size,
buf_size);
if (!c->decomp_buf) {
av_log(avctx, AV_LOG_ERROR,
"Can't allocate decompression buffer.\n");
return AVERROR(ENOMEM);
}
ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
av_frame_unref(c->pic);
return 1;
} else if (quality != c->quality)
ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
return 0;
}
开发者ID:venkatarajasekhar,项目名称:Qt,代码行数:36,代码来源:nuv.c
示例9: put_meta
static void put_meta(AVFormatContext *s, const char *key, uint32_t id)
{
AVDictionaryEntry *tag;
AVIOContext *pb = s->pb;
if (tag = av_dict_get(s->metadata, key, NULL, 0)) {
int size = strlen(tag->value);
avio_wl32(pb, id);
avio_wb32(pb, FFALIGN(size, 2));
avio_write(pb, tag->value, size);
if (size & 1)
avio_w8(pb, 0);
}
}
开发者ID:0day-ci,项目名称:FFmpeg,代码行数:15,代码来源:aiffenc.c
示例10: allocate_xvimage
static void allocate_xvimage(struct vo *vo, int foo)
{
struct xvctx *ctx = vo->priv;
struct vo_x11_state *x11 = vo->x11;
// align it for faster OSD rendering (draw_bmp.c swscale usage)
int aligned_w = FFALIGN(ctx->image_width, 32);
#ifdef HAVE_SHM
if (x11->display_is_local && XShmQueryExtension(x11->display)) {
ctx->Shmem_Flag = 1;
x11->ShmCompletionEvent = XShmGetEventBase(x11->display)
+ ShmCompletion;
} else {
ctx->Shmem_Flag = 0;
MP_INFO(vo, "Shared memory not supported\nReverting to normal Xv.\n");
}
if (ctx->Shmem_Flag) {
ctx->xvimage[foo] =
(XvImage *) XvShmCreateImage(x11->display, ctx->xv_port,
ctx->xv_format, NULL,
aligned_w, ctx->image_height,
&ctx->Shminfo[foo]);
ctx->Shminfo[foo].shmid = shmget(IPC_PRIVATE,
ctx->xvimage[foo]->data_size,
IPC_CREAT | 0777);
ctx->Shminfo[foo].shmaddr = (char *) shmat(ctx->Shminfo[foo].shmid, 0,
0);
ctx->Shminfo[foo].readOnly = False;
ctx->xvimage[foo]->data = ctx->Shminfo[foo].shmaddr;
XShmAttach(x11->display, &ctx->Shminfo[foo]);
XSync(x11->display, False);
shmctl(ctx->Shminfo[foo].shmid, IPC_RMID, 0);
} else
#endif
{
ctx->xvimage[foo] =
(XvImage *) XvCreateImage(x11->display, ctx->xv_port,
ctx->xv_format, NULL, aligned_w,
ctx->image_height);
ctx->xvimage[foo]->data = av_malloc(ctx->xvimage[foo]->data_size);
XSync(x11->display, False);
}
struct mp_image img = get_xv_buffer(vo, foo);
img.w = aligned_w;
mp_image_clear(&img, 0, 0, img.w, img.h);
return;
}
开发者ID:benf,项目名称:mpv,代码行数:48,代码来源:vo_xv.c
示例11: msrle_decode_frame
static int msrle_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MsrleContext *s = avctx->priv_data;
int istride = FFALIGN(avctx->width*avctx->bits_per_coded_sample, 32) / 8;
int ret;
s->buf = buf;
s->size = buf_size;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
if (avctx->bits_per_coded_sample > 1 && avctx->bits_per_coded_sample <= 8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (pal) {
s->frame->palette_has_changed = 1;
memcpy(s->pal, pal, AVPALETTE_SIZE);
}
/* make the palette available */
memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE);
}
/* FIXME how to correctly detect RLE ??? */
if (avctx->height * istride == avpkt->size) { /* assume uncompressed */
int linesize = (avctx->width * avctx->bits_per_coded_sample + 7) / 8;
uint8_t *ptr = s->frame->data[0];
uint8_t *buf = avpkt->data + (avctx->height-1)*istride;
int i, j;
for (i = 0; i < avctx->height; i++) {
if (avctx->bits_per_coded_sample == 4) {
for (j = 0; j < avctx->width - 1; j += 2) {
ptr[j+0] = buf[j>>1] >> 4;
ptr[j+1] = buf[j>>1] & 0xF;
}
if (avctx->width & 1)
ptr[j+0] = buf[j>>1] >> 4;
} else {
memcpy(ptr, buf, linesize);
}
buf -= istride;
ptr += s->frame->linesize[0];
}
开发者ID:Ancaro,项目名称:stepmania,代码行数:48,代码来源:msrle.c
示例12: ff_image_copy_plane_uc_from_x86
int ff_image_copy_plane_uc_from_x86(uint8_t *dst, ptrdiff_t dst_linesize,
const uint8_t *src, ptrdiff_t src_linesize,
ptrdiff_t bytewidth, int height)
{
int cpu_flags = av_get_cpu_flags();
ptrdiff_t bw_aligned = FFALIGN(bytewidth, 64);
if (EXTERNAL_SSE4(cpu_flags) &&
bw_aligned <= dst_linesize && bw_aligned <= src_linesize)
ff_image_copy_plane_uc_from_sse4(dst, dst_linesize, src, src_linesize,
bw_aligned, height);
else
return AVERROR(ENOSYS);
return 0;
}
开发者ID:Rodeo314,项目名称:tim-libav,代码行数:16,代码来源:imgutils_init.c
示例13: find_guid
/** Find chunk with w64 GUID by skipping over other chunks
* @return the size of the found chunk
*/
static int64_t find_guid(ByteIOContext *pb, const uint8_t guid1[16])
{
uint8_t guid[16];
int64_t size;
while (!url_feof(pb)) {
get_buffer(pb, guid, 16);
size = get_le64(pb);
if (size <= 24)
return -1;
if (!memcmp(guid, guid1, 16))
return size;
url_fskip(pb, FFALIGN(size, INT64_C(8)) - 24);
}
return -1;
}
开发者ID:Akuaksh,项目名称:FFmpeg-alsenc,代码行数:19,代码来源:wav.c
示例14: find_guid
/** Find chunk with w64 GUID by skipping over other chunks
* @return the size of the found chunk
*/
static int64_t find_guid(AVIOContext *pb, const uint8_t guid1[16])
{
uint8_t guid[16];
int64_t size;
while (!url_feof(pb)) {
avio_read(pb, guid, 16);
size = avio_rl64(pb);
if (size <= 24)
return -1;
if (!memcmp(guid, guid1, 16))
return size;
avio_skip(pb, FFALIGN(size, INT64_C(8)) - 24);
}
return -1;
}
开发者ID:Aldwych,项目名称:buildroot-linux,代码行数:19,代码来源:wav.c
示例15: get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int ret, i;
if (!desc)
return AVERROR(EINVAL);
if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
return ret;
if (!frame->linesize[0]) {
ret = av_image_fill_linesizes(frame->linesize, frame->format,
frame->width);
if (ret < 0)
return ret;
for (i = 0; i < 4 && frame->linesize[i]; i++)
frame->linesize[i] = FFALIGN(frame->linesize[i], align);
}
for (i = 0; i < 4 && frame->linesize[i]; i++) {
int h = frame->height;
if (i == 1 || i == 2)
h = -((-h) >> desc->log2_chroma_h);
frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h);
if (!frame->buf[i])
goto fail;
frame->data[i] = frame->buf[i]->data;
}
if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
av_buffer_unref(&frame->buf[1]);
frame->buf[1] = av_buffer_alloc(1024);
if (!frame->buf[1])
goto fail;
frame->data[1] = frame->buf[1]->data;
}
frame->extended_data = frame->data;
return 0;
fail:
av_frame_unref(frame);
return AVERROR(ENOMEM);
}
开发者ID:JackDanger,项目名称:libav,代码行数:47,代码来源:frame.c
示例16: ff_mediacodec_sw_buffer_copy_yuv420_semi_planar
void ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(AVCodecContext *avctx,
MediaCodecDecContext *s,
uint8_t *data,
size_t size,
FFAMediaCodecBufferInfo *info,
AVFrame *frame)
{
int i;
uint8_t *src = NULL;
for (i = 0; i < 2; i++) {
int height;
src = data + info->offset;
if (i == 0) {
height = avctx->height;
src += s->crop_top * s->stride;
src += s->crop_left;
} else if (i == 1) {
height = avctx->height / 2;
src += s->slice_height * s->stride;
src += s->crop_top * s->stride;
src += s->crop_left;
}
if (frame->linesize[i] == s->stride) {
memcpy(frame->data[i], src, height * s->stride);
} else {
int j, width;
uint8_t *dst = frame->data[i];
if (i == 0) {
width = avctx->width;
} else if (i == 1) {
width = FFMIN(frame->linesize[i], FFALIGN(avctx->width, 2));
}
for (j = 0; j < height; j++) {
memcpy(dst, src, width);
src += s->stride;
dst += frame->linesize[i];
}
}
}
}
开发者ID:MarcoQin,项目名称:FFmpeg,代码行数:47,代码来源:mediacodec_sw_buffer.c
示例17: cuda_frames_init
static int cuda_frames_init(AVHWFramesContext *ctx)
{
CUDAFramesContext *priv = ctx->internal->priv;
int aligned_width = FFALIGN(ctx->width, CUDA_FRAME_ALIGNMENT);
int i;
for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
if (ctx->sw_format == supported_formats[i])
break;
}
if (i == FF_ARRAY_ELEMS(supported_formats)) {
av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
av_get_pix_fmt_name(ctx->sw_format));
return AVERROR(ENOSYS);
}
av_pix_fmt_get_chroma_sub_sample(ctx->sw_format, &priv->shift_width, &priv->shift_height);
if (!ctx->pool) {
int size;
switch (ctx->sw_format) {
case AV_PIX_FMT_NV12:
case AV_PIX_FMT_YUV420P:
size = aligned_width * ctx->height * 3 / 2;
break;
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_P010:
case AV_PIX_FMT_P016:
size = aligned_width * ctx->height * 3;
break;
case AV_PIX_FMT_YUV444P16:
size = aligned_width * ctx->height * 6;
break;
default:
av_log(ctx, AV_LOG_ERROR, "BUG: Pixel format missing from size calculation.");
return AVERROR_BUG;
}
ctx->internal->pool_internal = av_buffer_pool_init2(size, ctx, cuda_pool_alloc, NULL);
if (!ctx->internal->pool_internal)
return AVERROR(ENOMEM);
}
return 0;
}
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:46,代码来源:hwcontext_cuda.c
示例18: draw_alpha
static void draw_alpha(int x0,int y0, int w,int h, unsigned char* src, unsigned char *srca, int stride){
uint32_t bespitch = FFALIGN(mga_vid_config.src_width, 32);
x0+=mga_vid_config.src_width*(vo_panscan_x>>1)/(vo_dwidth+vo_panscan_x);
switch(mga_vid_config.format){
case MGA_VID_FORMAT_YV12:
case MGA_VID_FORMAT_IYUV:
case MGA_VID_FORMAT_I420:
vo_draw_alpha_yv12(w,h,src,srca,stride,vid_data+bespitch*y0+x0,bespitch);
break;
case MGA_VID_FORMAT_YUY2:
vo_draw_alpha_yuy2(w,h,src,srca,stride,vid_data+2*(bespitch*y0+x0),2*bespitch);
break;
case MGA_VID_FORMAT_UYVY:
vo_draw_alpha_yuy2(w,h,src,srca,stride,vid_data+2*(bespitch*y0+x0)+1,2*bespitch);
break;
}
}
开发者ID:NeeMeese,项目名称:mplayer-ce,代码行数:17,代码来源:mga_template.c
示例19: y41p_decode_frame
static int y41p_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
AVFrame *pic = data;
uint8_t *src = avpkt->data;
uint8_t *y, *u, *v;
int i, j, ret;
if (avpkt->size < 3LL * avctx->height * FFALIGN(avctx->width, 8) / 2) {
av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
return AVERROR(EINVAL);
}
if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = 1;
pic->pict_type = AV_PICTURE_TYPE_I;
for (i = avctx->height - 1; i >= 0 ; i--) {
y = &pic->data[0][i * pic->linesize[0]];
u = &pic->data[1][i * pic->linesize[1]];
v = &pic->data[2][i * pic->linesize[2]];
for (j = 0; j < avctx->width; j += 8) {
*(u++) = *src++;
*(y++) = *src++;
*(v++) = *src++;
*(y++) = *src++;
*(u++) = *src++;
*(y++) = *src++;
*(v++) = *src++;
*(y++) = *src++;
*(y++) = *src++;
*(y++) = *src++;
*(y++) = *src++;
*(y++) = *src++;
}
}
*got_frame = 1;
return avpkt->size;
}
开发者ID:DeHackEd,项目名称:FFmpeg,代码行数:45,代码来源:y41pdec.c
示例20: output_frame
/**
* Read samples from the input FIFOs, mix, and write to the output link.
*/
static int output_frame(AVFilterLink *outlink, int nb_samples)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFrame *out_buf, *in_buf;
int i;
calculate_scales(s, nb_samples);
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
in_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!in_buf) {
av_frame_free(&out_buf);
return AVERROR(ENOMEM);
}
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] == INPUT_ON) {
int planes, plane_size, p;
av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
nb_samples);
planes = s->planar ? s->nb_channels : 1;
plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
plane_size = FFALIGN(plane_size, 16);
for (p = 0; p < planes; p++) {
s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
(float *) in_buf->extended_data[p],
s->input_scale[i], plane_size);
}
}
}
av_frame_free(&in_buf);
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples;
return ff_filter_frame(outlink, out_buf);
}
开发者ID:Acidburn0zzz,项目名称:FFmpeg,代码行数:48,代码来源:af_amix.c
注:本文中的FFALIGN函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论