本文整理汇总了C++中FF_ARRAY_ELEMS函数的典型用法代码示例。如果您正苦于以下问题:C++ FF_ARRAY_ELEMS函数的具体用法?C++ FF_ARRAY_ELEMS怎么用?C++ FF_ARRAY_ELEMS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FF_ARRAY_ELEMS函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
{
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
int best_clock_code=1;
int best_divisor=60;
int best_error= INT_MAX;
if(s->h263_plus){
for(i=0; i<2; i++){
int div, error;
div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);
div= av_clip(div, 1, 127);
error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
if(error < best_error){
best_error= error;
best_divisor= div;
best_clock_code= i;
}
}
}
s->custom_pcf= best_clock_code!=1 || best_divisor!=60;
coded_frame_rate= 1800000;
coded_frame_rate_base= (1000+best_clock_code)*best_divisor;
avpriv_align_put_bits(&s->pb);
/* Update the pointer to last GOB */
s->ptr_lastgob = put_bits_ptr(&s->pb);
put_bits(&s->pb, 22, 0x20); /* PSC */
temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp
(coded_frame_rate_base * (int64_t)s->avctx->time_base.den);
put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */
put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, 0); /* h263 id */
put_bits(&s->pb, 1, 0); /* split screen off */
put_bits(&s->pb, 1, 0); /* camera off */
put_bits(&s->pb, 1, 0); /* freeze picture release off */
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
if (!s->h263_plus) {
/* H.263v1 */
put_bits(&s->pb, 3, format);
put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
/* By now UMV IS DISABLED ON H.263v1, since the restrictions
of H.263v1 UMV implies to check the predicted MV after
calculation of the current MB to see if we're on the limits */
put_bits(&s->pb, 1, 0); /* Unrestricted Motion Vector: off */
put_bits(&s->pb, 1, 0); /* SAC: off */
put_bits(&s->pb, 1, s->obmc); /* Advanced Prediction */
put_bits(&s->pb, 1, 0); /* only I/P frames, no PB frame */
put_bits(&s->pb, 5, s->qscale);
put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */
} else {
int ufep=1;
/* H.263v2 */
/* H.263 Plus PTYPE */
put_bits(&s->pb, 3, 7);
put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */
if (format == 8)
put_bits(&s->pb,3,6); /* Custom Source Format */
else
put_bits(&s->pb, 3, format);
put_bits(&s->pb,1, s->custom_pcf);
put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */
put_bits(&s->pb,1,0); /* SAC: off */
put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */
put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */
put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */
put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */
put_bits(&s->pb,1,0); /* Reference Picture Selection: off */
put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */
put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */
put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,3,0); /* Reserved */
put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);
put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */
put_bits(&s->pb,2,0); /* Reserved */
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
/* This should be here if PLUSPTYPE */
put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */
if (format == 8) {
/* Custom Picture Format (CPFMT) */
s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);
put_bits(&s->pb,4,s->aspect_ratio_info);
put_bits(&s->pb,9,(s->width >> 2) - 1);
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,9,(s->height >> 2));
if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
//.........这里部分代码省略.........
开发者ID:18565773346,项目名称:android-h264-decoder,代码行数:101,代码来源:ituh263enc.c
示例2: ff_h264_build_ref_list
//.........这里部分代码省略.........
case 1: {
const unsigned int abs_diff_pic_num = val + 1;
int frame_num;
if (abs_diff_pic_num > sl->max_pic_num) {
av_log(h->avctx, AV_LOG_ERROR,
"abs_diff_pic_num overflow\n");
return AVERROR_INVALIDDATA;
}
if (modification_of_pic_nums_idc == 0)
pred -= abs_diff_pic_num;
else
pred += abs_diff_pic_num;
pred &= sl->max_pic_num - 1;
frame_num = pic_num_extract(h, pred, &pic_structure);
for (i = h->short_ref_count - 1; i >= 0; i--) {
ref = h->short_ref[i];
assert(ref->reference);
assert(!ref->long_ref);
if (ref->frame_num == frame_num &&
(ref->reference & pic_structure))
break;
}
if (i >= 0)
ref->pic_id = pred;
break;
}
case 2: {
int long_idx;
pic_id = val; // long_term_pic_idx
long_idx = pic_num_extract(h, pic_id, &pic_structure);
if (long_idx > 31U) {
av_log(h->avctx, AV_LOG_ERROR,
"long_term_pic_idx overflow\n");
return AVERROR_INVALIDDATA;
}
ref = h->long_ref[long_idx];
assert(!(ref && !ref->reference));
if (ref && (ref->reference & pic_structure)) {
ref->pic_id = pic_id;
assert(ref->long_ref);
i = 0;
} else {
i = -1;
}
break;
}
default:
av_assert0(0);
}
if (i < 0) {
av_log(h->avctx, AV_LOG_ERROR,
"reference picture missing during reorder\n");
memset(&sl->ref_list[list][index], 0, sizeof(sl->ref_list[0][0])); // FIXME
} else {
for (i = index; i + 1 < sl->ref_count[list]; i++) {
if (sl->ref_list[list][i].parent &&
ref->long_ref == sl->ref_list[list][i].parent->long_ref &&
ref->pic_id == sl->ref_list[list][i].pic_id)
break;
}
for (; i > index; i--) {
sl->ref_list[list][i] = sl->ref_list[list][i - 1];
}
ref_from_h264pic(&sl->ref_list[list][index], ref);
if (FIELD_PICTURE(h)) {
pic_as_field(&sl->ref_list[list][index], pic_structure);
}
}
}
}
for (list = 0; list < sl->list_count; list++) {
for (index = 0; index < sl->ref_count[list]; index++) {
if ( !sl->ref_list[list][index].parent
|| (!FIELD_PICTURE(h) && (sl->ref_list[list][index].reference&3) != 3)) {
int i;
av_log(h->avctx, AV_LOG_ERROR, "Missing reference picture, default is %d\n", h->default_ref[list].poc);
for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)
h->last_pocs[i] = INT_MIN;
if (h->default_ref[list].parent
&& !(!FIELD_PICTURE(h) && (h->default_ref[list].reference&3) != 3))
sl->ref_list[list][index] = h->default_ref[list];
else
return -1;
}
av_assert0(av_buffer_get_ref_count(sl->ref_list[list][index].parent->f->buf[0]) > 0);
}
}
if (FRAME_MBAFF(h))
h264_fill_mbaff_ref_list(sl);
return 0;
}
开发者ID:Bilibili,项目名称:FFmpeg,代码行数:101,代码来源:h264_refs.c
示例3: av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
{
int i, ret = 0;
dst->format = src->format;
dst->width = src->width;
dst->height = src->height;
dst->channels = src->channels;
dst->channel_layout = src->channel_layout;
dst->nb_samples = src->nb_samples;
ret = frame_copy_props(dst, src, 0);
if (ret < 0)
return ret;
/* duplicate the frame data if it's not refcounted */
if (!src->buf[0]) {
ret = av_frame_get_buffer(dst, 32);
if (ret < 0)
return ret;
ret = av_frame_copy(dst, src);
if (ret < 0)
av_frame_unref(dst);
return ret;
}
/* ref the buffers */
for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
if (!src->buf[i])
continue;
dst->buf[i] = av_buffer_ref(src->buf[i]);
if (!dst->buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
if (src->extended_buf) {
dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
src->nb_extended_buf);
if (!dst->extended_buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
dst->nb_extended_buf = src->nb_extended_buf;
for (i = 0; i < src->nb_extended_buf; i++) {
dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
if (!dst->extended_buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
}
/* duplicate extended data */
if (src->extended_data != src->data) {
int ch = src->channels;
if (!ch) {
ret = AVERROR(EINVAL);
goto fail;
}
CHECK_CHANNELS_CONSISTENCY(src);
dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
if (!dst->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
} else
dst->extended_data = dst->data;
memcpy(dst->data, src->data, sizeof(src->data));
memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
return 0;
fail:
av_frame_unref(dst);
return ret;
}
开发者ID:DaveDaCoda,项目名称:mythtv,代码行数:85,代码来源:frame.c
示例4: asf_write_header1
//.........这里部分代码省略.........
avio_wl16(pb, n + 1);
avio_wl16(pb, 26); // name_len
avio_wl16(pb, 3); // value_type
avio_wl32(pb, 4); // value_len
avio_put_str16le(pb, "AspectRatioX");
avio_wl32(pb, sar.num);
avio_wl16(pb, 0);
// the stream number is set like this below
avio_wl16(pb, n + 1);
avio_wl16(pb, 26); // name_len
avio_wl16(pb, 3); // value_type
avio_wl32(pb, 4); // value_len
avio_put_str16le(pb, "AspectRatioY");
avio_wl32(pb, sar.den);
}
}
end_header(pb, hpos2);
} else {
avio_wl32(pb, 0);
}
end_header(pb, hpos);
/* title and other infos */
if (has_title) {
int len;
uint8_t *buf;
AVIOContext *dyn_buf;
if (avio_open_dyn_buf(&dyn_buf) < 0)
return AVERROR(ENOMEM);
hpos = put_header(pb, &ff_asf_comment_header);
for (n = 0; n < FF_ARRAY_ELEMS(tags); n++) {
len = tags[n] ? avio_put_str16le(dyn_buf, tags[n]->value) : 0;
avio_wl16(pb, len);
}
len = avio_close_dyn_buf(dyn_buf, &buf);
avio_write(pb, buf, len);
av_freep(&buf);
end_header(pb, hpos);
}
if (metadata_count) {
AVDictionaryEntry *tag = NULL;
hpos = put_header(pb, &ff_asf_extended_content_header);
avio_wl16(pb, metadata_count);
while ((tag = av_dict_get(s->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
put_str16(pb, tag->key);
avio_wl16(pb, 0);
put_str16(pb, tag->value);
}
end_header(pb, hpos);
}
/* chapters using ASF markers */
if (!asf->is_streamed && s->nb_chapters) {
int ret;
if (ret = asf_write_markers(s))
return ret;
}
/* stream headers */
for (n = 0; n < s->nb_streams; n++) {
int64_t es_pos;
// ASFStream *stream = &asf->streams[n];
enc = s->streams[n]->codec;
asf->streams[n].num = n + 1;
开发者ID:26mansi,项目名称:FFmpeg,代码行数:67,代码来源:asfenc.c
示例5: add_codec
/* add a codec and set the default parameters */
static void add_codec(FFServerStream *stream, AVCodecContext *av,
FFServerConfig *config)
{
AVStream *st;
AVDictionary **opts, *recommended = NULL;
char *enc_config;
if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
return;
opts = av->codec_type == AVMEDIA_TYPE_AUDIO ?
&config->audio_opts : &config->video_opts;
av_dict_copy(&recommended, *opts, 0);
av_opt_set_dict2(av->priv_data, opts, AV_OPT_SEARCH_CHILDREN);
av_opt_set_dict2(av, opts, AV_OPT_SEARCH_CHILDREN);
if (av_dict_count(*opts))
av_log(NULL, AV_LOG_WARNING,
"Something is wrong, %d options are not set!\n",
av_dict_count(*opts));
if (!config->stream_use_defaults) {
switch(av->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (av->bit_rate == 0)
report_config_error(config->filename, config->line_num,
AV_LOG_ERROR, &config->errors,
"audio bit rate is not set\n");
if (av->sample_rate == 0)
report_config_error(config->filename, config->line_num,
AV_LOG_ERROR, &config->errors,
"audio sample rate is not set\n");
break;
case AVMEDIA_TYPE_VIDEO:
if (av->width == 0 || av->height == 0)
report_config_error(config->filename, config->line_num,
AV_LOG_ERROR, &config->errors,
"video size is not set\n");
break;
default:
av_assert0(0);
}
goto done;
}
/* stream_use_defaults = true */
/* compute default parameters */
switch(av->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (!av_dict_get(recommended, "b", NULL, 0)) {
av->bit_rate = 64000;
av_dict_set_int(&recommended, "b", av->bit_rate, 0);
WARNING("Setting default value for audio bit rate = %d. "
"Use NoDefaults to disable it.\n",
av->bit_rate);
}
if (!av_dict_get(recommended, "ar", NULL, 0)) {
av->sample_rate = 22050;
av_dict_set_int(&recommended, "ar", av->sample_rate, 0);
WARNING("Setting default value for audio sample rate = %d. "
"Use NoDefaults to disable it.\n",
av->sample_rate);
}
if (!av_dict_get(recommended, "ac", NULL, 0)) {
av->channels = 1;
av_dict_set_int(&recommended, "ac", av->channels, 0);
WARNING("Setting default value for audio channel count = %d. "
"Use NoDefaults to disable it.\n",
av->channels);
}
break;
case AVMEDIA_TYPE_VIDEO:
if (!av_dict_get(recommended, "b", NULL, 0)) {
av->bit_rate = 64000;
av_dict_set_int(&recommended, "b", av->bit_rate, 0);
WARNING("Setting default value for video bit rate = %d. "
"Use NoDefaults to disable it.\n",
av->bit_rate);
}
if (!av_dict_get(recommended, "time_base", NULL, 0)) {
av->time_base.den = 5;
av->time_base.num = 1;
av_dict_set(&recommended, "time_base", "1/5", 0);
WARNING("Setting default value for video frame rate = %d. "
"Use NoDefaults to disable it.\n",
av->time_base.den);
}
if (!av_dict_get(recommended, "video_size", NULL, 0)) {
av->width = 160;
av->height = 128;
av_dict_set(&recommended, "video_size", "160x128", 0);
WARNING("Setting default value for video size = %dx%d. "
"Use NoDefaults to disable it.\n",
av->width, av->height);
}
/* Bitrate tolerance is less for streaming */
if (!av_dict_get(recommended, "bt", NULL, 0)) {
av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
//.........这里部分代码省略.........
开发者ID:rcombs,项目名称:FFmpeg,代码行数:101,代码来源:ffserver_config.c
示例6: swap_channel_layouts_on_filter
static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
AVFilterLink *link = NULL;
int i, j, k;
for (i = 0; i < filter->nb_inputs; i++) {
link = filter->inputs[i];
if (link->type == AVMEDIA_TYPE_AUDIO &&
link->out_channel_layouts->nb_channel_layouts == 1)
break;
}
if (i == filter->nb_inputs)
return;
for (i = 0; i < filter->nb_outputs; i++) {
AVFilterLink *outlink = filter->outputs[i];
int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;
if (outlink->type != AVMEDIA_TYPE_AUDIO ||
outlink->in_channel_layouts->nb_channel_layouts < 2)
continue;
for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
uint64_t in_chlayout = link->out_channel_layouts->channel_layouts[0];
uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
int in_channels = av_get_channel_layout_nb_channels(in_chlayout);
int out_channels = av_get_channel_layout_nb_channels(out_chlayout);
int count_diff = out_channels - in_channels;
int matched_channels, extra_channels;
int score = 100000;
if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
/* Compute score in case the input or output layout encodes
a channel count; in this case the score is not altered by
the computation afterwards, as in_chlayout and
out_chlayout have both been set to 0 */
if (FF_LAYOUT2COUNT(in_chlayout))
in_channels = FF_LAYOUT2COUNT(in_chlayout);
if (FF_LAYOUT2COUNT(out_chlayout))
out_channels = FF_LAYOUT2COUNT(out_chlayout);
score -= 10000 + FFABS(out_channels - in_channels) +
(in_channels > out_channels ? 10000 : 0);
in_chlayout = out_chlayout = 0;
/* Let the remaining computation run, even if the score
value is not altered */
}
/* channel substitution */
for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
uint64_t cmp0 = ch_subst[k][0];
uint64_t cmp1 = ch_subst[k][1];
if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&
(out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {
in_chlayout &= ~cmp0;
out_chlayout &= ~cmp1;
/* add score for channel match, minus a deduction for
having to do the substitution */
score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;
}
}
/* no penalty for LFE channel mismatch */
if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&
(out_chlayout & AV_CH_LOW_FREQUENCY))
score += 10;
in_chlayout &= ~AV_CH_LOW_FREQUENCY;
out_chlayout &= ~AV_CH_LOW_FREQUENCY;
matched_channels = av_get_channel_layout_nb_channels(in_chlayout &
out_chlayout);
extra_channels = av_get_channel_layout_nb_channels(out_chlayout &
(~in_chlayout));
score += 10 * matched_channels - 5 * extra_channels;
if (score > best_score ||
(count_diff < best_count_diff && score == best_score)) {
best_score = score;
best_idx = j;
best_count_diff = count_diff;
}
}
av_assert0(best_idx >= 0);
FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
outlink->in_channel_layouts->channel_layouts[best_idx]);
}
}
开发者ID:Ivnz,项目名称:iFrameExtracotrWithFFMPEG,代码行数:88,代码来源:avfiltergraph.c
示例7: cuda_transfer_data_from
static int cuda_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
CUDAFramesContext *priv = ctx->internal->priv;
AVCUDADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
CudaFunctions *cu = device_hwctx->internal->cuda_dl;
CUcontext dummy;
CUresult err;
int i;
err = cu->cuCtxPushCurrent(device_hwctx->cuda_ctx);
if (err != CUDA_SUCCESS)
return AVERROR_UNKNOWN;
for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
CUDA_MEMCPY2D cpy = {
.srcMemoryType = CU_MEMORYTYPE_DEVICE,
.dstMemoryType = CU_MEMORYTYPE_HOST,
.srcDevice = (CUdeviceptr)src->data[i],
.dstHost = dst->data[i],
.srcPitch = src->linesize[i],
.dstPitch = dst->linesize[i],
.WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
.Height = src->height >> (i ? priv->shift_height : 0),
};
err = cu->cuMemcpy2D(&cpy);
if (err != CUDA_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "Error transferring the data from the CUDA frame\n");
return AVERROR_UNKNOWN;
}
}
cu->cuCtxPopCurrent(&dummy);
return 0;
}
static int cuda_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
const AVFrame *src)
{
CUDAFramesContext *priv = ctx->internal->priv;
AVCUDADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
CudaFunctions *cu = device_hwctx->internal->cuda_dl;
CUcontext dummy;
CUresult err;
int i;
err = cu->cuCtxPushCurrent(device_hwctx->cuda_ctx);
if (err != CUDA_SUCCESS)
return AVERROR_UNKNOWN;
for (i = 0; i < FF_ARRAY_ELEMS(src->data) && src->data[i]; i++) {
CUDA_MEMCPY2D cpy = {
.srcMemoryType = CU_MEMORYTYPE_HOST,
.dstMemoryType = CU_MEMORYTYPE_DEVICE,
.srcHost = src->data[i],
.dstDevice = (CUdeviceptr)dst->data[i],
.srcPitch = src->linesize[i],
.dstPitch = dst->linesize[i],
.WidthInBytes = FFMIN(src->linesize[i], dst->linesize[i]),
.Height = src->height >> (i ? priv->shift_height : 0),
};
err = cu->cuMemcpy2D(&cpy);
if (err != CUDA_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "Error transferring the data from the CUDA frame\n");
return AVERROR_UNKNOWN;
}
}
cu->cuCtxPopCurrent(&dummy);
return 0;
}
static void cuda_device_uninit(AVHWDeviceContext *ctx)
{
AVCUDADeviceContext *hwctx = ctx->hwctx;
if (hwctx->internal) {
if (hwctx->internal->is_allocated && hwctx->cuda_ctx) {
hwctx->internal->cuda_dl->cuCtxDestroy(hwctx->cuda_ctx);
hwctx->cuda_ctx = NULL;
}
cuda_free_functions(&hwctx->internal->cuda_dl);
}
av_freep(&hwctx->internal);
}
开发者ID:Hero2000,项目名称:CainCamera,代码行数:92,代码来源:hwcontext_cuda.c
示例8: opus_decode_packet
static int opus_decode_packet(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
OpusContext *c = avctx->priv_data;
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int coded_samples = 0;
int decoded_samples = INT_MAX;
int delayed_samples = 0;
int i, ret;
/* calculate the number of delayed samples */
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
s->out[0] =
s->out[1] = NULL;
delayed_samples = FFMAX(delayed_samples,
s->delayed_samples + av_audio_fifo_size(c->sync_buffers[i]));
}
/* decode the header of the first sub-packet to find out the sample count */
if (buf) {
OpusPacket *pkt = &c->streams[0].packet;
ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
}
coded_samples += pkt->frame_count * pkt->frame_duration;
c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
}
frame->nb_samples = coded_samples + delayed_samples;
/* no input or buffered data => nothing to do */
if (!frame->nb_samples) {
*got_frame_ptr = 0;
return 0;
}
/* setup the data buffers */
ret = ff_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
frame->nb_samples = 0;
memset(c->out, 0, c->nb_streams * 2 * sizeof(*c->out));
for (i = 0; i < avctx->channels; i++) {
ChannelMap *map = &c->channel_maps[i];
if (!map->copy)
c->out[2 * map->stream_idx + map->channel_idx] = (float*)frame->extended_data[i];
}
/* read the data from the sync buffers */
for (i = 0; i < c->nb_streams; i++) {
float **out = c->out + 2 * i;
int sync_size = av_audio_fifo_size(c->sync_buffers[i]);
float sync_dummy[32];
int out_dummy = (!out[0]) | ((!out[1]) << 1);
if (!out[0])
out[0] = sync_dummy;
if (!out[1])
out[1] = sync_dummy;
if (out_dummy && sync_size > FF_ARRAY_ELEMS(sync_dummy))
return AVERROR_BUG;
ret = av_audio_fifo_read(c->sync_buffers[i], (void**)out, sync_size);
if (ret < 0)
return ret;
if (out_dummy & 1)
out[0] = NULL;
else
out[0] += ret;
if (out_dummy & 2)
out[1] = NULL;
else
out[1] += ret;
c->out_size[i] = frame->linesize[0] - ret * sizeof(float);
}
/* decode each sub-packet */
for (i = 0; i < c->nb_streams; i++) {
OpusStreamContext *s = &c->streams[i];
if (i && buf) {
ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
return ret;
}
if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
av_log(avctx, AV_LOG_ERROR,
"Mismatching coded sample count in substream %d.\n", i);
return AVERROR_INVALIDDATA;
}
//.........这里部分代码省略.........
开发者ID:KangLin,项目名称:FFmpeg,代码行数:101,代码来源:opusdec.c
示例9: fill_slice_long
static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
unsigned position, unsigned size)
{
const H264Context *h = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
unsigned list;
memset(slice, 0, sizeof(*slice));
slice->BSNALunitDataLocation = position;
slice->SliceBytesInBuffer = size;
slice->wBadSliceChopping = 0;
slice->first_mb_in_slice = (h->mb_y >> FIELD_OR_MBAFF_PICTURE) * h->mb_width + h->mb_x;
slice->NumMbsForSlice = 0; /* XXX it is set once we have all slices */
slice->BitOffsetToSliceData = get_bits_count(&h->gb);
slice->slice_type = ff_h264_get_slice_type(h);
if (h->slice_type_fixed)
slice->slice_type += 5;
slice->luma_log2_weight_denom = h->luma_log2_weight_denom;
slice->chroma_log2_weight_denom = h->chroma_log2_weight_denom;
if (h->list_count > 0)
slice->num_ref_idx_l0_active_minus1 = h->ref_count[0] - 1;
if (h->list_count > 1)
slice->num_ref_idx_l1_active_minus1 = h->ref_count[1] - 1;
slice->slice_alpha_c0_offset_div2 = h->slice_alpha_c0_offset / 2 - 26;
slice->slice_beta_offset_div2 = h->slice_beta_offset / 2 - 26;
slice->Reserved8Bits = 0;
for (list = 0; list < 2; list++) {
unsigned i;
for (i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList[list]); i++) {
if (list < h->list_count && i < h->ref_count[list]) {
const Picture *r = &h->ref_list[list][i];
unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r),
r->f.reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) {
int w, o;
if (plane == 0 && h->luma_weight_flag[list]) {
w = h->luma_weight[i][list][0];
o = h->luma_weight[i][list][1];
} else if (plane >= 1 && h->chroma_weight_flag[list]) {
w = h->chroma_weight[i][list][plane-1][0];
o = h->chroma_weight[i][list][plane-1][1];
} else {
w = 1 << (plane == 0 ? h->luma_log2_weight_denom :
h->chroma_log2_weight_denom);
o = 0;
}
slice->Weights[list][i][plane][0] = w;
slice->Weights[list][i][plane][1] = o;
}
} else {
unsigned plane;
slice->RefPicList[list][i].bPicEntry = 0xff;
for (plane = 0; plane < 3; plane++) {
slice->Weights[list][i][plane][0] = 0;
slice->Weights[list][i][plane][1] = 0;
}
}
}
}
slice->slice_qs_delta = 0; /* XXX not implemented by Libav */
slice->slice_qp_delta = h->qscale - h->pps.init_qp;
slice->redundant_pic_cnt = h->redundant_pic_count;
if (h->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = h->direct_spatial_mv_pred;
slice->cabac_init_idc = h->pps.cabac ? h->cabac_init_idc : 0;
if (h->deblocking_filter < 2)
slice->disable_deblocking_filter_idc = 1 - h->deblocking_filter;
else
slice->disable_deblocking_filter_idc = h->deblocking_filter;
slice->slice_id = h->current_slice - 1;
}
开发者ID:dwbuiten,项目名称:libav,代码行数:75,代码来源:dxva2_h264.c
示例10: vaapi_encode_h264_init_sequence_params
static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
{
VAAPIEncodeContext *ctx = avctx->priv_data;
VAEncSequenceParameterBufferH264 *vseq = ctx->codec_sequence_params;
VAEncPictureParameterBufferH264 *vpic = ctx->codec_picture_params;
VAAPIEncodeH264Context *priv = ctx->priv_data;
VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;
int i;
{
vseq->seq_parameter_set_id = 0;
vseq->level_idc = avctx->level;
vseq->max_num_ref_frames = 2;
vseq->picture_width_in_mbs = priv->mb_width;
vseq->picture_height_in_mbs = priv->mb_height;
vseq->seq_fields.bits.chroma_format_idc = 1;
vseq->seq_fields.bits.frame_mbs_only_flag = 1;
vseq->seq_fields.bits.direct_8x8_inference_flag = 1;
vseq->seq_fields.bits.log2_max_frame_num_minus4 = 4;
vseq->seq_fields.bits.pic_order_cnt_type = 0;
if (ctx->input_width != ctx->aligned_width ||
ctx->input_height != ctx->aligned_height) {
vseq->frame_cropping_flag = 1;
vseq->frame_crop_left_offset = 0;
vseq->frame_crop_right_offset =
(ctx->aligned_width - ctx->input_width) / 2;
vseq->frame_crop_top_offset = 0;
vseq->frame_crop_bottom_offset =
(ctx->aligned_height - ctx->input_height) / 2;
} else {
vseq->frame_cropping_flag = 0;
}
vseq->bits_per_second = avctx->bit_rate;
if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
vseq->num_units_in_tick = avctx->framerate.num;
vseq->time_scale = 2 * avctx->framerate.den;
} else {
vseq->num_units_in_tick = avctx->time_base.num;
vseq->time_scale = 2 * avctx->time_base.den;
}
vseq->intra_period = ctx->p_per_i * (ctx->b_per_p + 1);
vseq->intra_idr_period = vseq->intra_period;
vseq->ip_period = ctx->b_per_p + 1;
}
{
vpic->CurrPic.picture_id = VA_INVALID_ID;
vpic->CurrPic.flags = VA_PICTURE_H264_INVALID;
for (i = 0; i < FF_ARRAY_ELEMS(vpic->ReferenceFrames); i++) {
vpic->ReferenceFrames[i].picture_id = VA_INVALID_ID;
vpic->ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
}
vpic->coded_buf = VA_INVALID_ID;
vpic->pic_parameter_set_id = 0;
vpic->seq_parameter_set_id = 0;
vpic->num_ref_idx_l0_active_minus1 = 0;
vpic->num_ref_idx_l1_active_minus1 = 0;
vpic->pic_fields.bits.entropy_coding_mode_flag =
((avctx->profile & 0xff) != 66);
vpic->pic_fields.bits.weighted_pred_flag = 0;
vpic->pic_fields.bits.weighted_bipred_idc = 0;
vpic->pic_fields.bits.transform_8x8_mode_flag =
((avctx->profile & 0xff) >= 100);
vpic->pic_init_qp = priv->fixed_qp_idr;
}
{
mseq->profile_idc = avctx->profile & 0xff;
if (avctx->profile & FF_PROFILE_H264_CONSTRAINED)
mseq->constraint_set1_flag = 1;
if (avctx->profile & FF_PROFILE_H264_INTRA)
mseq->constraint_set3_flag = 1;
}
return 0;
}
开发者ID:tkoeppe,项目名称:libav,代码行数:91,代码来源:vaapi_encode_h264.c
示例11: vaapi_encode_h264_init_slice_params
static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
VAAPIEncodePicture *pic,
VAAPIEncodeSlice *slice)
{
VAAPIEncodeContext *ctx = avctx->priv_data;
VAEncSequenceParameterBufferH264 *vseq = ctx->codec_sequence_params;
VAEncPictureParameterBufferH264 *vpic = pic->codec_picture_params;
VAEncSliceParameterBufferH264 *vslice = slice->codec_slice_params;
VAAPIEncodeH264Context *priv = ctx->priv_data;
VAAPIEncodeH264Slice *pslice;
VAAPIEncodeH264MiscSliceParams *mslice;
int i;
slice->priv_data = av_mallocz(sizeof(*pslice));
if (!slice->priv_data)
return AVERROR(ENOMEM);
pslice = slice->priv_data;
mslice = &pslice->misc_slice_params;
if (pic->type == PICTURE_TYPE_IDR)
mslice->nal_unit_type = NAL_IDR_SLICE;
else
mslice->nal_unit_type = NAL_SLICE;
switch (pic->type) {
case PICTURE_TYPE_IDR:
vslice->slice_type = SLICE_TYPE_I;
mslice->nal_ref_idc = 3;
break;
case PICTURE_TYPE_I:
vslice->slice_type = SLICE_TYPE_I;
mslice->nal_ref_idc = 2;
break;
case PICTURE_TYPE_P:
vslice->slice_type = SLICE_TYPE_P;
mslice->nal_ref_idc = 1;
break;
case PICTURE_TYPE_B:
vslice->slice_type = SLICE_TYPE_B;
mslice->nal_ref_idc = 0;
break;
default:
av_assert0(0 && "invalid picture type");
}
// Only one slice per frame.
vslice->macroblock_address = 0;
vslice->num_macroblocks = priv->mb_width * priv->mb_height;
vslice->macroblock_info = VA_INVALID_ID;
vslice->pic_parameter_set_id = vpic->pic_parameter_set_id;
vslice->idr_pic_id = priv->idr_pic_count++;
vslice->pic_order_cnt_lsb = pic->display_order &
((1 << (4 + vseq->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4)) - 1);
for (i = 0; i < FF_ARRAY_ELEMS(vslice->RefPicList0); i++) {
vslice->RefPicList0[i].picture_id = VA_INVALID_ID;
vslice->RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
vslice->RefPicList1[i].picture_id = VA_INVALID_ID;
vslice->RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
}
av_assert0(pic->nb_refs <= 2);
if (pic->nb_refs >= 1) {
// Backward reference for P- or B-frame.
av_assert0(pic->type == PICTURE_TYPE_P ||
pic->type == PICTURE_TYPE_B);
vslice->num_ref_idx_l0_active_minus1 = 0;
vslice->RefPicList0[0] = vpic->ReferenceFrames[0];
}
if (pic->nb_refs >= 2) {
// Forward reference for B-frame.
av_assert0(pic->type == PICTURE_TYPE_B);
vslice->num_ref_idx_l1_active_minus1 = 0;
vslice->RefPicList1[0] = vpic->ReferenceFrames[1];
}
if (pic->type == PICTURE_TYPE_B)
vslice->slice_qp_delta = priv->fixed_qp_b - vpic->pic_init_qp;
else if (pic->type == PICTURE_TYPE_P)
vslice->slice_qp_delta = priv->fixed_qp_p - vpic->pic_init_qp;
else
vslice->slice_qp_delta = priv->fixed_qp_idr - vpic->pic_init_qp;
vslice->direct_spatial_mv_pred_flag = 1;
return 0;
}
开发者ID:tkoeppe,项目名称:libav,代码行数:92,代码来源:vaapi_encode_h264.c
示例12: dxva2_init_pool
static int dxva2_init_pool(AVHWFramesContext *ctx)
{
AVDXVA2FramesContext *frames_hwctx = ctx->hwctx;
AVDXVA2DeviceContext *device_hwctx = ctx->device_ctx->hwctx;
DXVA2FramesContext *s = ctx->internal->priv;
int decode = (frames_hwctx->surface_type == DXVA2_VideoDecoderRenderTarget);
int i;
HRESULT hr;
if (ctx->initial_pool_size <= 0)
return 0;
hr = IDirect3DDeviceManager9_OpenDeviceHandle(device_hwctx->devmgr, &s->device_handle);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Failed to open device handle\n");
return AVERROR_UNKNOWN;
}
hr = IDirect3DDeviceManager9_GetVideoService(device_hwctx->devmgr,
s->device_handle,
decode ? &video_decoder_service : &video_processor_service,
(void **)&s->service);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Failed to create the video service\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
if (ctx->sw_format == supported_formats[i].pix_fmt) {
s->format = supported_formats[i].d3d_format;
break;
}
}
if (i == FF_ARRAY_ELEMS(supported_formats)) {
av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n",
av_get_pix_fmt_name(ctx->sw_format));
return AVERROR(EINVAL);
}
s->surfaces_internal = av_mallocz_array(ctx->initial_pool_size,
sizeof(*s->surfaces_internal));
if (!s->surfaces_internal)
return AVERROR(ENOMEM);
hr = IDirectXVideoAccelerationService_CreateSurface(s->service,
ctx->width, ctx->height,
ctx->initial_pool_size - 1,
s->format, D3DPOOL_DEFAULT, 0,
frames_hwctx->surface_type,
s->surfaces_internal, NULL);
if (FAILED(hr)) {
av_log(ctx, AV_LOG_ERROR, "Could not create the surfaces\n");
return AVERROR_UNKNOWN;
}
ctx->internal->pool_internal = av_buffer_pool_init2(sizeof(*s->surfaces_internal),
ctx, dxva2_pool_alloc, NULL);
if (!ctx->internal->pool_internal)
return AVERROR(ENOMEM);
frames_hwctx->surfaces = s->surfaces_internal;
frames_hwctx->nb_surfaces = ctx->initial_pool_size;
return 0;
}
开发者ID:AddictXQ,项目名称:FFmpeg,代码行数:66,代码来源:hwcontext_dxva2.c
示例13: ff_h264_fill_default_ref_list
int ff_h264_fill_default_ref_list(H264Context *h)
{
int i, len;
if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
H264Picture *sorted[32];
int cur_poc, list;
int lens[2];
if (FIELD_PICTURE(h))
cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
else
cur_poc = h->cur_pic_ptr->poc;
for (list = 0; list < 2; list++) {
len = add_sorted(sorted, h->short_ref, h->short_ref_count, cur_poc, 1 ^ list);
len += add_sorted(sorted + len, h->short_ref, h->short_ref_count, cur_poc, 0 ^ list);
av_assert0(len <= 32);
len = build_def_list(h->default_ref_list[list], FF_ARRAY_ELEMS(h->default_ref_list[0]),
sorted, len, 0, h->picture_structure);
len += build_def_list(h->default_ref_list[list] + len,
FF_ARRAY_ELEMS(h->default_ref_list[0]) - len,
h->long_ref, 16, 1, h->picture_structure);
av_assert0(len <= 32);
if (len < h->ref_count[list])
memset(&h->default_ref_list[list][len], 0, sizeof(H264Picture) * (h->ref_count[list] - len));
lens[list] = len;
}
if (lens[0] == lens[1] && lens[1] > 1) {
for (i = 0; i < lens[0] &&
h->default_ref_list[0][i].f.buf[0]->buffer ==
h->default_ref_list[1][i].f.buf[0]->buffer; i++);
if (i == lens[0]) {
H264Picture tmp;
COPY_PICTURE(&tmp, &h->default_ref_list[1][0]);
COPY_PICTURE(&h->default_ref_list[1][0], &h->default_ref_list[1][1]);
COPY_PICTURE(&h->default_ref_list[1][1], &tmp);
}
}
} else {
len = build_def_list(h->default_ref_list[0], FF_ARRAY_ELEMS(h->default_ref_list[0]),
h->short_ref, h->short_ref_count, 0, h->picture_structure);
len += build_def_list(h->default_ref_list[0] + len,
FF_ARRAY_ELEMS(h-
|
请发表评论