本文整理汇总了C++中GST_BUFFER_TIMESTAMP函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_BUFFER_TIMESTAMP函数的具体用法?C++ GST_BUFFER_TIMESTAMP怎么用?C++ GST_BUFFER_TIMESTAMP使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GST_BUFFER_TIMESTAMP函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: audioresample_do_output
static GstFlowReturn
audioresample_do_output (GstAudioresample * audioresample, GstBuffer * outbuf)
{
int outsize;
int outsamples;
ResampleState *r;
r = audioresample->resample;
outsize = resample_get_output_size (r);
GST_LOG_OBJECT (audioresample, "audioresample can give me %d bytes", outsize);
/* protect against mem corruption */
if (outsize > GST_BUFFER_SIZE (outbuf)) {
GST_WARNING_OBJECT (audioresample,
"overriding audioresample's outsize %d with outbuffer's size %d",
outsize, GST_BUFFER_SIZE (outbuf));
outsize = GST_BUFFER_SIZE (outbuf);
}
/* catch possibly wrong size differences */
if (GST_BUFFER_SIZE (outbuf) - outsize > r->sample_size) {
GST_WARNING_OBJECT (audioresample,
"audioresample's outsize %d too far from outbuffer's size %d",
outsize, GST_BUFFER_SIZE (outbuf));
}
outsize = resample_get_output_data (r, GST_BUFFER_DATA (outbuf), outsize);
outsamples = outsize / r->sample_size;
GST_LOG_OBJECT (audioresample, "resample gave me %d bytes or %d samples",
outsize, outsamples);
GST_BUFFER_OFFSET (outbuf) = audioresample->offset;
GST_BUFFER_TIMESTAMP (outbuf) = audioresample->next_ts;
if (audioresample->ts_offset != -1) {
audioresample->offset += outsamples;
audioresample->ts_offset += outsamples;
audioresample->next_ts =
gst_util_uint64_scale_int (audioresample->ts_offset, GST_SECOND,
audioresample->o_rate);
GST_BUFFER_OFFSET_END (outbuf) = audioresample->offset;
/* we calculate DURATION as the difference between "next" timestamp
* and current timestamp so we ensure a contiguous stream, instead of
* having rounding errors. */
GST_BUFFER_DURATION (outbuf) = audioresample->next_ts -
GST_BUFFER_TIMESTAMP (outbuf);
} else {
/* no valid offset know, we can still sortof calculate the duration though */
GST_BUFFER_DURATION (outbuf) =
gst_util_uint64_scale_int (outsamples, GST_SECOND,
audioresample->o_rate);
}
/* check for possible mem corruption */
if (outsize > GST_BUFFER_SIZE (outbuf)) {
/* this is an error that when it happens, would need fixing in the
* resample library; we told it we wanted only GST_BUFFER_SIZE (outbuf),
* and it gave us more ! */
GST_WARNING_OBJECT (audioresample,
"audioresample, you memory corrupting bastard. "
"you gave me outsize %d while my buffer was size %d",
outsize, GST_BUFFER_SIZE (outbuf));
return GST_FLOW_ERROR;
}
/* catch possibly wrong size differences */
if (GST_BUFFER_SIZE (outbuf) - outsize > r->sample_size) {
GST_WARNING_OBJECT (audioresample,
"audioresample's written outsize %d too far from outbuffer's size %d",
outsize, GST_BUFFER_SIZE (outbuf));
}
GST_BUFFER_SIZE (outbuf) = outsize;
if (G_UNLIKELY (audioresample->need_discont)) {
GST_DEBUG_OBJECT (audioresample,
"marking this buffer with the DISCONT flag");
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
audioresample->need_discont = FALSE;
}
GST_LOG_OBJECT (audioresample, "transformed to buffer of %d bytes, ts %"
GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT ", offset %"
G_GINT64_FORMAT ", offset_end %" G_GINT64_FORMAT,
outsize, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)),
GST_BUFFER_OFFSET (outbuf), GST_BUFFER_OFFSET_END (outbuf));
return GST_FLOW_OK;
}
开发者ID:eta-im-dev,项目名称:media,代码行数:90,代码来源:audioresample_static.c
示例2: gst_audio_fx_base_fir_filter_transform
static GstFlowReturn
gst_audio_fx_base_fir_filter_transform (GstBaseTransform * base,
GstBuffer * inbuf, GstBuffer * outbuf)
{
GstAudioFXBaseFIRFilter *self = GST_AUDIO_FX_BASE_FIR_FILTER (base);
GstClockTime timestamp, expected_timestamp;
gint channels = GST_AUDIO_FILTER_CAST (self)->format.channels;
gint rate = GST_AUDIO_FILTER_CAST (self)->format.rate;
gint width = GST_AUDIO_FILTER_CAST (self)->format.width / 8;
guint input_samples = (GST_BUFFER_SIZE (inbuf) / width) / channels;
guint output_samples = (GST_BUFFER_SIZE (outbuf) / width) / channels;
guint generated_samples;
guint64 output_offset;
gint64 diff = 0;
GstClockTime stream_time;
timestamp = GST_BUFFER_TIMESTAMP (outbuf);
if (!GST_CLOCK_TIME_IS_VALID (timestamp)
&& !GST_CLOCK_TIME_IS_VALID (self->start_ts)) {
GST_ERROR_OBJECT (self, "Invalid timestamp");
return GST_FLOW_ERROR;
}
stream_time =
gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME, timestamp);
GST_DEBUG_OBJECT (self, "sync to %" GST_TIME_FORMAT,
GST_TIME_ARGS (timestamp));
if (GST_CLOCK_TIME_IS_VALID (stream_time))
gst_object_sync_values (G_OBJECT (self), stream_time);
g_return_val_if_fail (self->kernel != NULL, GST_FLOW_ERROR);
g_return_val_if_fail (channels != 0, GST_FLOW_ERROR);
if (GST_CLOCK_TIME_IS_VALID (self->start_ts))
expected_timestamp =
self->start_ts + gst_util_uint64_scale_int (self->nsamples_in,
GST_SECOND, rate);
else
expected_timestamp = GST_CLOCK_TIME_NONE;
/* Reset the residue if already existing on discont buffers */
if (GST_BUFFER_IS_DISCONT (inbuf)
|| (GST_CLOCK_TIME_IS_VALID (expected_timestamp)
&& (ABS (GST_CLOCK_DIFF (timestamp,
expected_timestamp) > 5 * GST_MSECOND)))) {
GST_DEBUG_OBJECT (self, "Discontinuity detected - flushing");
if (GST_CLOCK_TIME_IS_VALID (expected_timestamp))
gst_audio_fx_base_fir_filter_push_residue (self);
self->buffer_fill = 0;
g_free (self->buffer);
self->buffer = NULL;
self->start_ts = timestamp;
self->start_off = GST_BUFFER_OFFSET (inbuf);
self->nsamples_out = 0;
self->nsamples_in = 0;
} else if (!GST_CLOCK_TIME_IS_VALID (self->start_ts)) {
self->start_ts = timestamp;
self->start_off = GST_BUFFER_OFFSET (inbuf);
}
self->nsamples_in += input_samples;
generated_samples =
self->process (self, GST_BUFFER_DATA (inbuf), GST_BUFFER_DATA (outbuf),
input_samples);
g_assert (generated_samples <= output_samples);
self->nsamples_out += generated_samples;
if (generated_samples == 0)
return GST_BASE_TRANSFORM_FLOW_DROPPED;
/* Calculate the number of samples we can push out now without outputting
* latency zeros in the beginning */
diff = ((gint64) self->nsamples_out) - ((gint64) self->latency);
if (diff < 0) {
return GST_BASE_TRANSFORM_FLOW_DROPPED;
} else if (diff < generated_samples) {
gint64 tmp = diff;
diff = generated_samples - diff;
generated_samples = tmp;
GST_BUFFER_DATA (outbuf) += diff * width * channels;
}
GST_BUFFER_SIZE (outbuf) = generated_samples * width * channels;
output_offset = self->nsamples_out - self->latency - generated_samples;
GST_BUFFER_TIMESTAMP (outbuf) =
self->start_ts + gst_util_uint64_scale_int (output_offset, GST_SECOND,
rate);
GST_BUFFER_DURATION (outbuf) =
gst_util_uint64_scale_int (output_samples, GST_SECOND, rate);
if (self->start_off != GST_BUFFER_OFFSET_NONE) {
GST_BUFFER_OFFSET (outbuf) = self->start_off + output_offset;
GST_BUFFER_OFFSET_END (outbuf) =
GST_BUFFER_OFFSET (outbuf) + generated_samples;
} else {
GST_BUFFER_OFFSET (outbuf) = GST_BUFFER_OFFSET_NONE;
GST_BUFFER_OFFSET_END (outbuf) = GST_BUFFER_OFFSET_NONE;
//.........这里部分代码省略.........
开发者ID:pli3,项目名称:gst-plugins-good,代码行数:101,代码来源:audiofxbasefirfilter.c
示例3: dvdspu_handle_vid_buffer
static GstFlowReturn
dvdspu_handle_vid_buffer (GstDVDSpu * dvdspu, GstBuffer * buf)
{
GstClockTime new_ts;
GstFlowReturn ret;
gboolean using_ref = FALSE;
DVD_SPU_LOCK (dvdspu);
if (buf == NULL) {
GstClockTime next_ts = dvdspu->video_seg.position;
next_ts += gst_util_uint64_scale_int (GST_SECOND,
dvdspu->spu_state.info.fps_d, dvdspu->spu_state.info.fps_n);
/* NULL buffer was passed - use the reference frame and update the timestamp,
* or else there's nothing to draw, and just return GST_FLOW_OK */
if (dvdspu->ref_frame == NULL) {
dvdspu->video_seg.position = next_ts;
goto no_ref_frame;
}
buf = gst_buffer_copy (dvdspu->ref_frame);
#if 0
g_print ("Duping frame %" GST_TIME_FORMAT " with new TS %" GST_TIME_FORMAT
"\n", GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_TIME_ARGS (next_ts));
#endif
GST_BUFFER_TIMESTAMP (buf) = next_ts;
using_ref = TRUE;
}
if (GST_BUFFER_TIMESTAMP_IS_VALID (buf)) {
dvdspu->video_seg.position = GST_BUFFER_TIMESTAMP (buf);
}
new_ts = gst_segment_to_running_time (&dvdspu->video_seg, GST_FORMAT_TIME,
dvdspu->video_seg.position);
#if 0
g_print ("TS %" GST_TIME_FORMAT " running: %" GST_TIME_FORMAT "\n",
GST_TIME_ARGS (dvdspu->video_seg.position), GST_TIME_ARGS (new_ts));
#endif
gst_dvd_spu_advance_spu (dvdspu, new_ts);
/* If we have an active SPU command set, we store a copy of the frame in case
* we hit a still and need to draw on it. Otherwise, a reference is
* sufficient in case we later encounter a still */
if ((dvdspu->spu_state.flags & SPU_STATE_FORCED_DSP) ||
((dvdspu->spu_state.flags & SPU_STATE_FORCED_ONLY) == 0 &&
(dvdspu->spu_state.flags & SPU_STATE_DISPLAY))) {
if (using_ref == FALSE) {
GstBuffer *copy;
/* Take a copy in case we hit a still frame and need the pristine
* frame around */
copy = gst_buffer_copy (buf);
gst_buffer_replace (&dvdspu->ref_frame, copy);
gst_buffer_unref (copy);
}
/* Render the SPU overlay onto the buffer */
buf = gst_buffer_make_writable (buf);
gstspu_render (dvdspu, buf);
} else {
if (using_ref == FALSE) {
/* Not going to draw anything on this frame, just store a reference
* in case we hit a still frame and need it */
gst_buffer_replace (&dvdspu->ref_frame, buf);
}
}
if (dvdspu->spu_state.flags & SPU_STATE_STILL_FRAME) {
GST_DEBUG_OBJECT (dvdspu, "Outputting buffer with TS %" GST_TIME_FORMAT
"from chain while in still",
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
}
DVD_SPU_UNLOCK (dvdspu);
/* just push out the incoming buffer without touching it */
ret = gst_pad_push (dvdspu->srcpad, buf);
return ret;
no_ref_frame:
DVD_SPU_UNLOCK (dvdspu);
return GST_FLOW_OK;
}
开发者ID:drothlis,项目名称:gst-plugins-bad,代码行数:95,代码来源:gstdvdspu.c
示例4: test_reuse
void test_reuse()
{
GstElement *audioresample;
GstEvent *newseg;
GstBuffer *inbuffer;
GstCaps *caps;
xmlfile = "test_reuse";
std_log(LOG_FILENAME_LINE, "Test Started test_reuse");
audioresample = setup_audioresample (1, 9343, 48000, 16, FALSE);
caps = gst_pad_get_negotiated_caps (mysrcpad);
fail_unless (gst_caps_is_fixed (caps));
fail_unless (gst_element_set_state (audioresample,
GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
"could not set to playing");
newseg = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0);
fail_unless (gst_pad_push_event (mysrcpad, newseg) != FALSE);
inbuffer = gst_buffer_new_and_alloc (9343 * 4);
memset (GST_BUFFER_DATA (inbuffer), 0, GST_BUFFER_SIZE (inbuffer));
GST_BUFFER_DURATION (inbuffer) = GST_SECOND;
GST_BUFFER_TIMESTAMP (inbuffer) = 0;
GST_BUFFER_OFFSET (inbuffer) = 0;
gst_buffer_set_caps (inbuffer, caps);
/* pushing gives away my reference ... */
fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK);
/* ... but it ends up being collected on the global buffer list */
fail_unless_equals_int (g_list_length (buffers), 1);
/* now reset and try again ... */
fail_unless (gst_element_set_state (audioresample,
GST_STATE_NULL) == GST_STATE_CHANGE_SUCCESS, "could not set to NULL");
fail_unless (gst_element_set_state (audioresample,
GST_STATE_PLAYING) == GST_STATE_CHANGE_SUCCESS,
"could not set to playing");
newseg = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0, -1, 0);
fail_unless (gst_pad_push_event (mysrcpad, newseg) != FALSE);
inbuffer = gst_buffer_new_and_alloc (9343 * 4);
memset (GST_BUFFER_DATA (inbuffer), 0, GST_BUFFER_SIZE (inbuffer));
GST_BUFFER_DURATION (inbuffer) = GST_SECOND;
GST_BUFFER_TIMESTAMP (inbuffer) = 0;
GST_BUFFER_OFFSET (inbuffer) = 0;
gst_buffer_set_caps (inbuffer, caps);
fail_unless (gst_pad_push (mysrcpad, inbuffer) == GST_FLOW_OK);
/* ... it also ends up being collected on the global buffer list. If we
* now have more than 2 buffers, then audioresample probably didn't clean
* up its internal buffer properly and tried to push the remaining samples
* when it got the second NEWSEGMENT event */
fail_unless_equals_int (g_list_length (buffers), 2);
cleanup_audioresample (audioresample);
gst_caps_unref (caps);
std_log(LOG_FILENAME_LINE, "Test Successful");
create_xml(0);
}
开发者ID:kuailexs,项目名称:symbiandump-mw1,代码行数:64,代码来源:audioresample.c
示例5: gst_ffmpegmux_collected
//.........这里部分代码省略.........
/* now open the mux format */
if (avformat_write_header (ffmpegmux->context, NULL) < 0) {
GST_ELEMENT_ERROR (ffmpegmux, LIBRARY, SETTINGS, (NULL),
("Failed to write file header - check codec settings"));
return GST_FLOW_ERROR;
}
/* we're now opened */
ffmpegmux->opened = TRUE;
/* flush the header so it will be used as streamheader */
avio_flush (ffmpegmux->context->pb);
}
/* take the one with earliest timestamp,
* and push it forward */
best_pad = NULL;
best_time = GST_CLOCK_TIME_NONE;
for (collected = ffmpegmux->collect->data; collected;
collected = g_slist_next (collected)) {
GstFFMpegMuxPad *collect_pad = (GstFFMpegMuxPad *) collected->data;
GstBuffer *buffer = gst_collect_pads_peek (ffmpegmux->collect,
(GstCollectData *) collect_pad);
/* if there's no buffer, just continue */
if (buffer == NULL) {
continue;
}
/* if we have no buffer yet, just use the first one */
if (best_pad == NULL) {
best_pad = collect_pad;
best_time = GST_BUFFER_TIMESTAMP (buffer);
goto next_pad;
}
/* if we do have one, only use this one if it's older */
if (GST_BUFFER_TIMESTAMP (buffer) < best_time) {
best_time = GST_BUFFER_TIMESTAMP (buffer);
best_pad = collect_pad;
}
next_pad:
gst_buffer_unref (buffer);
/* Mux buffers with invalid timestamp first */
if (!GST_CLOCK_TIME_IS_VALID (best_time))
break;
}
/* now handle the buffer, or signal EOS if we have
* no buffers left */
if (best_pad != NULL) {
GstBuffer *buf;
AVPacket pkt;
gboolean need_free = FALSE;
GstMapInfo map;
/* push out current buffer */
buf =
gst_collect_pads_pop (ffmpegmux->collect, (GstCollectData *) best_pad);
ffmpegmux->context->streams[best_pad->padnum]->codec->frame_number++;
/* set time */
开发者ID:ranjankumar23,项目名称:gst-libav,代码行数:67,代码来源:gstavmux.c
示例6: gst_bml_transform_transform_mono_to_stereo
static GstFlowReturn
gst_bml_transform_transform_mono_to_stereo (GstBaseTransform * base,
GstBuffer * inbuf, GstBuffer * outbuf)
{
GstMapInfo infoi, infoo;
GstBMLTransform *bml_transform = GST_BML_TRANSFORM (base);
GstBMLTransformClass *klass = GST_BML_TRANSFORM_GET_CLASS (bml_transform);
GstBML *bml = GST_BML (bml_transform);
GstBMLClass *bml_class = GST_BML_CLASS (klass);
BMLData *datai, *datao, *seg_datai, *seg_datao;
gpointer bm = bml->bm;
guint todo, seg_size, samples_per_buffer;
gboolean has_data;
guint mode = 3; /*WM_READWRITE */
bml->running_time =
gst_segment_to_stream_time (&base->segment, GST_FORMAT_TIME,
GST_BUFFER_TIMESTAMP (inbuf));
if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_DISCONT)) {
bml->subtick_count = (!bml->reverse) ? bml->subticks_per_tick : 1;
}
if (bml->subtick_count >= bml->subticks_per_tick) {
bml (gstbml_reset_triggers (bml, bml_class));
bml (gstbml_sync_values (bml, bml_class, GST_BUFFER_TIMESTAMP (outbuf)));
bml (tick (bm));
bml->subtick_count = 1;
} else {
bml->subtick_count++;
}
/* don't process data in passthrough-mode */
if (gst_base_transform_is_passthrough (base)) {
// we would actually need to convert mono to stereo here
// but this is not even called
GST_WARNING_OBJECT (bml_transform, "m2s in passthrough mode");
//return GST_FLOW_OK;
}
if (!gst_buffer_map (inbuf, &infoi, GST_MAP_READ)) {
GST_WARNING_OBJECT (base, "unable to map input buffer for read");
return GST_FLOW_ERROR;
}
datai = (BMLData *) infoi.data;
samples_per_buffer = infoi.size / sizeof (BMLData);
if (!gst_buffer_map (outbuf, &infoo, GST_MAP_READ | GST_MAP_WRITE)) {
GST_WARNING_OBJECT (base, "unable to map output buffer for read & write");
return GST_FLOW_ERROR;
}
datao = (BMLData *) infoo.data;
// some buzzmachines expect a cleared buffer
//for(i=0;i<samples_per_buffer*2;i++) datao[i]=0.0f;
memset (datao, 0, samples_per_buffer * 2 * sizeof (BMLData));
/* if buffer has only silence process with different mode */
if (GST_BUFFER_FLAG_IS_SET (outbuf, GST_BUFFER_FLAG_GAP)) {
mode = 2; /* WM_WRITE */
} else {
gfloat fc = 32768.0;
orc_scalarmultiply_f32_ns (datai, datai, fc, samples_per_buffer);
}
GST_DEBUG_OBJECT (bml_transform, " calling work_m2s(%d,%d)",
samples_per_buffer, mode);
todo = samples_per_buffer;
seg_datai = datai;
seg_datao = datao;
has_data = FALSE;
while (todo) {
// 256 is MachineInterface.h::MAX_BUFFER_LENGTH
seg_size = (todo > 256) ? 256 : todo;
has_data |= bml (work_m2s (bm, seg_datai, seg_datao, (int) seg_size, mode));
seg_datai = &seg_datai[seg_size];
seg_datao = &seg_datao[seg_size * 2];
todo -= seg_size;
}
if (gstbml_fix_data ((GstElement *) bml_transform, &infoo, has_data)) {
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
} else {
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
}
gst_buffer_unmap (inbuf, &infoi);
gst_buffer_unmap (outbuf, &infoo);
return GST_FLOW_OK;
}
开发者ID:Buzztrax,项目名称:buzztrax,代码行数:88,代码来源:gstbmltransform.c
示例7: gst_pad_probe_info_get_buffer
GstPadProbeReturn GstEnginePipeline::HandoffCallback(GstPad*,
GstPadProbeInfo* info,
gpointer self) {
GstEnginePipeline* instance = reinterpret_cast<GstEnginePipeline*>(self);
GstBuffer* buf = gst_pad_probe_info_get_buffer(info);
QList<BufferConsumer*> consumers;
{
QMutexLocker l(&instance->buffer_consumers_mutex_);
consumers = instance->buffer_consumers_;
}
for (BufferConsumer* consumer : consumers) {
gst_buffer_ref(buf);
consumer->ConsumeBuffer(buf, instance->id());
}
// Calculate the end time of this buffer so we can stop playback if it's
// after the end time of this song.
if (instance->end_offset_nanosec_ > 0) {
quint64 start_time = GST_BUFFER_TIMESTAMP(buf) - instance->segment_start_;
quint64 duration = GST_BUFFER_DURATION(buf);
quint64 end_time = start_time + duration;
if (end_time > instance->end_offset_nanosec_) {
if (instance->has_next_valid_url()) {
if (instance->next_url_ == instance->url_ &&
instance->next_beginning_offset_nanosec_ ==
instance->end_offset_nanosec_) {
// The "next" song is actually the next segment of this file - so
// cheat and keep on playing, but just tell the Engine we've moved on.
instance->end_offset_nanosec_ = instance->next_end_offset_nanosec_;
instance->next_url_ = QUrl();
instance->next_beginning_offset_nanosec_ = 0;
instance->next_end_offset_nanosec_ = 0;
// GstEngine will try to seek to the start of the new section, but
// we're already there so ignore it.
instance->ignore_next_seek_ = true;
emit instance->EndOfStreamReached(instance->id(), true);
} else {
// We have a next song but we can't cheat, so move to it normally.
instance->TransitionToNext();
}
} else {
// There's no next song
emit instance->EndOfStreamReached(instance->id(), false);
}
}
}
if (instance->emit_track_ended_on_time_discontinuity_) {
if (GST_BUFFER_FLAG_IS_SET(buf, GST_BUFFER_FLAG_DISCONT) ||
GST_BUFFER_OFFSET(buf) < instance->last_buffer_offset_) {
qLog(Debug) << "Buffer discontinuity - emitting EOS";
instance->emit_track_ended_on_time_discontinuity_ = false;
emit instance->EndOfStreamReached(instance->id(), true);
}
}
instance->last_buffer_offset_ = GST_BUFFER_OFFSET(buf);
return GST_PAD_PROBE_OK;
}
开发者ID:shaowei-wang,项目名称:Clementine,代码行数:64,代码来源:gstenginepipeline.cpp
示例8: gst_rtp_qdm2_depay_process
static GstBuffer *
gst_rtp_qdm2_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
GstRtpQDM2Depay *rtpqdm2depay;
GstBuffer *outbuf = NULL;
guint16 seq;
GstRTPBuffer rtp = { NULL };
rtpqdm2depay = GST_RTP_QDM2_DEPAY (depayload);
{
gint payload_len;
guint8 *payload;
guint avail;
guint pos = 0;
gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);
payload_len = gst_rtp_buffer_get_payload_len (&rtp);
if (payload_len < 3)
goto bad_packet;
payload = gst_rtp_buffer_get_payload (&rtp);
seq = gst_rtp_buffer_get_seq (&rtp);
if (G_UNLIKELY (seq != rtpqdm2depay->nextseq)) {
GST_DEBUG ("GAP in sequence number, Resetting data !");
/* Flush previous data */
flush_data (rtpqdm2depay);
/* And store new timestamp */
rtpqdm2depay->ptimestamp = rtpqdm2depay->timestamp;
rtpqdm2depay->timestamp = GST_BUFFER_TIMESTAMP (buf);
/* And that previous data will be pushed at the bottom */
}
rtpqdm2depay->nextseq = seq + 1;
GST_DEBUG ("Payload size %d 0x%x sequence:%d", payload_len, payload_len,
seq);
GST_MEMDUMP ("Incoming payload", payload, payload_len);
while (pos < payload_len) {
switch (payload[pos]) {
case 0x80:{
GST_DEBUG ("Unrecognized 0x80 marker, skipping 12 bytes");
pos += 12;
}
break;
case 0xff:
/* HEADERS */
GST_DEBUG ("Headers");
/* Store the incoming timestamp */
rtpqdm2depay->ptimestamp = rtpqdm2depay->timestamp;
rtpqdm2depay->timestamp = GST_BUFFER_TIMESTAMP (buf);
/* flush the internal data if needed */
flush_data (rtpqdm2depay);
if (G_UNLIKELY (!rtpqdm2depay->configured)) {
guint8 *ourdata;
GstBuffer *codecdata;
GstMapInfo cmap;
GstCaps *caps;
/* First bytes are unknown */
GST_MEMDUMP ("Header", payload + pos, 32);
ourdata = payload + pos + 10;
pos += 10;
rtpqdm2depay->channs = GST_READ_UINT32_BE (payload + pos + 4);
rtpqdm2depay->samplerate = GST_READ_UINT32_BE (payload + pos + 8);
rtpqdm2depay->bitrate = GST_READ_UINT32_BE (payload + pos + 12);
rtpqdm2depay->blocksize = GST_READ_UINT32_BE (payload + pos + 16);
rtpqdm2depay->framesize = GST_READ_UINT32_BE (payload + pos + 20);
rtpqdm2depay->packetsize = GST_READ_UINT32_BE (payload + pos + 24);
/* 16 bit empty block (0x02 0x00) */
pos += 30;
GST_DEBUG
("channs:%d, samplerate:%d, bitrate:%d, blocksize:%d, framesize:%d, packetsize:%d",
rtpqdm2depay->channs, rtpqdm2depay->samplerate,
rtpqdm2depay->bitrate, rtpqdm2depay->blocksize,
rtpqdm2depay->framesize, rtpqdm2depay->packetsize);
/* Caps */
codecdata = gst_buffer_new_and_alloc (48);
gst_buffer_map (codecdata, &cmap, GST_MAP_WRITE);
memcpy (cmap.data, headheader, 20);
memcpy (cmap.data + 20, ourdata, 28);
gst_buffer_unmap (codecdata, &cmap);
caps = gst_caps_new_simple ("audio/x-qdm2",
"samplesize", G_TYPE_INT, 16,
"rate", G_TYPE_INT, rtpqdm2depay->samplerate,
"channels", G_TYPE_INT, rtpqdm2depay->channs,
"codec_data", GST_TYPE_BUFFER, codecdata, NULL);
gst_pad_set_caps (GST_RTP_BASE_DEPAYLOAD_SRCPAD (depayload), caps);
gst_caps_unref (caps);
rtpqdm2depay->configured = TRUE;
} else {
GST_DEBUG ("Already configured, skipping headers");
pos += 40;
}
break;
default:{
/* Shuffled packet contents */
//.........这里部分代码省略.........
开发者ID:lubing521,项目名称:gst-embedded-builder,代码行数:101,代码来源:gstrtpqdmdepay.c
示例9: gst_rtp_dtmf_depay_process
static GstBuffer *
gst_rtp_dtmf_depay_process (GstBaseRTPDepayload * depayload, GstBuffer * buf)
{
GstRtpDTMFDepay *rtpdtmfdepay = NULL;
GstBuffer *outbuf = NULL;
gint payload_len;
guint8 *payload = NULL;
guint32 timestamp;
GstRTPDTMFPayload dtmf_payload;
gboolean marker;
GstStructure *structure = NULL;
GstMessage *dtmf_message = NULL;
rtpdtmfdepay = GST_RTP_DTMF_DEPAY (depayload);
if (!gst_rtp_buffer_validate (buf))
goto bad_packet;
payload_len = gst_rtp_buffer_get_payload_len (buf);
payload = gst_rtp_buffer_get_payload (buf);
if (payload_len != sizeof (GstRTPDTMFPayload))
goto bad_packet;
memcpy (&dtmf_payload, payload, sizeof (GstRTPDTMFPayload));
if (dtmf_payload.event > MAX_EVENT)
goto bad_packet;
marker = gst_rtp_buffer_get_marker (buf);
timestamp = gst_rtp_buffer_get_timestamp (buf);
dtmf_payload.duration = g_ntohs (dtmf_payload.duration);
/* clip to whole units of unit_time */
if (rtpdtmfdepay->unit_time) {
guint unit_time_clock =
(rtpdtmfdepay->unit_time * depayload->clock_rate) / 1000;
if (dtmf_payload.duration % unit_time_clock) {
/* Make sure we don't overflow the duration */
if (dtmf_payload.duration < G_MAXUINT16 - unit_time_clock)
dtmf_payload.duration += unit_time_clock -
(dtmf_payload.duration % unit_time_clock);
else
dtmf_payload.duration -= dtmf_payload.duration % unit_time_clock;
}
}
/* clip to max duration */
if (rtpdtmfdepay->max_duration) {
guint max_duration_clock =
(rtpdtmfdepay->max_duration * depayload->clock_rate) / 1000;
if (max_duration_clock < G_MAXUINT16 &&
dtmf_payload.duration > max_duration_clock)
dtmf_payload.duration = max_duration_clock;
}
GST_DEBUG_OBJECT (depayload, "Received new RTP DTMF packet : "
"marker=%d - timestamp=%u - event=%d - duration=%d",
marker, timestamp, dtmf_payload.event, dtmf_payload.duration);
GST_DEBUG_OBJECT (depayload,
"Previous information : timestamp=%u - duration=%d",
rtpdtmfdepay->previous_ts, rtpdtmfdepay->previous_duration);
/* First packet */
if (marker || rtpdtmfdepay->previous_ts != timestamp) {
rtpdtmfdepay->sample = 0;
rtpdtmfdepay->previous_ts = timestamp;
rtpdtmfdepay->previous_duration = dtmf_payload.duration;
rtpdtmfdepay->first_gst_ts = GST_BUFFER_TIMESTAMP (buf);
structure = gst_structure_new ("dtmf-event",
"number", G_TYPE_INT, dtmf_payload.event,
"volume", G_TYPE_INT, dtmf_payload.volume,
"type", G_TYPE_INT, 1, "method", G_TYPE_INT, 1, NULL);
if (structure) {
dtmf_message =
gst_message_new_element (GST_OBJECT (depayload), structure);
if (dtmf_message) {
if (!gst_element_post_message (GST_ELEMENT (depayload), dtmf_message)) {
GST_ERROR_OBJECT (depayload,
"Unable to send dtmf-event message to bus");
}
} else {
GST_ERROR_OBJECT (depayload, "Unable to create dtmf-event message");
}
} else {
GST_ERROR_OBJECT (depayload, "Unable to create dtmf-event structure");
}
} else {
guint16 duration = dtmf_payload.duration;
dtmf_payload.duration -= rtpdtmfdepay->previous_duration;
/* If late buffer, ignore */
if (duration > rtpdtmfdepay->previous_duration)
rtpdtmfdepay->previous_duration = duration;
//.........这里部分代码省略.........
开发者ID:dylansong77,项目名称:gstreamer,代码行数:101,代码来源:gstrtpdtmfdepay.c
示例10: mpegpsmux_choose_best_stream
static MpegPsPadData *
mpegpsmux_choose_best_stream (MpegPsMux * mux)
{
/* Choose from which stream to mux with */
MpegPsPadData *best = NULL;
GstCollectData *c_best = NULL;
GSList *walk;
for (walk = mux->collect->data; walk != NULL; walk = g_slist_next (walk)) {
GstCollectData *c_data = (GstCollectData *) walk->data;
MpegPsPadData *ps_data = (MpegPsPadData *) walk->data;
if (ps_data->eos == FALSE) {
if (ps_data->queued_buf == NULL) {
GstBuffer *buf;
ps_data->queued_buf = buf =
gst_collect_pads_peek (mux->collect, c_data);
if (buf != NULL) {
if (ps_data->prepare_func) {
buf = ps_data->prepare_func (buf, ps_data, mux);
if (buf) { /* Take the prepared buffer instead */
gst_buffer_unref (ps_data->queued_buf);
ps_data->queued_buf = buf;
} else { /* If data preparation returned NULL, use unprepared one */
buf = ps_data->queued_buf;
}
}
if (GST_BUFFER_TIMESTAMP (buf) != GST_CLOCK_TIME_NONE) {
/* Ignore timestamps that go backward for now. FIXME: Handle all
* incoming PTS */
if (ps_data->last_ts == GST_CLOCK_TIME_NONE ||
ps_data->last_ts < GST_BUFFER_TIMESTAMP (buf)) {
ps_data->cur_ts = ps_data->last_ts =
gst_segment_to_running_time (&c_data->segment,
GST_FORMAT_TIME, GST_BUFFER_TIMESTAMP (buf));
} else {
GST_DEBUG_OBJECT (mux, "Ignoring PTS that has gone backward");
}
} else
ps_data->cur_ts = GST_CLOCK_TIME_NONE;
GST_DEBUG_OBJECT (mux, "Pulled buffer with ts %" GST_TIME_FORMAT
" (uncorrected ts %" GST_TIME_FORMAT " %" G_GUINT64_FORMAT
") for PID 0x%04x",
GST_TIME_ARGS (ps_data->cur_ts),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
GST_BUFFER_TIMESTAMP (buf), ps_data->stream_id);
/* Choose a stream we've never seen a timestamp for to ensure
* we push enough buffers from it to reach a timestamp */
if (ps_data->last_ts == GST_CLOCK_TIME_NONE) {
best = ps_data;
c_best = c_data;
}
} else {
ps_data->eos = TRUE;
continue;
}
}
/* If we don't yet have a best pad, take this one, otherwise take
* whichever has the oldest timestamp */
if (best != NULL) {
if (ps_data->last_ts != GST_CLOCK_TIME_NONE &&
best->last_ts != GST_CLOCK_TIME_NONE &&
ps_data->last_ts < best->last_ts) {
best = ps_data;
c_best = c_data;
}
} else {
best = ps_data;
c_best = c_data;
}
}
}
if (c_best) {
gst_buffer_unref (gst_collect_pads_pop (mux->collect, c_best));
}
return best;
}
开发者ID:oxcsnicho,项目名称:mpegpsmux,代码行数:84,代码来源:mpegpsmux.c
示例11: gst_vader_chain
static GstFlowReturn
gst_vader_chain(GstPad * pad, GstBuffer * buf)
{
GstVader *filter;
gint16 *in_data;
guint num_samples;
gint i, vote;
guint power, rms;
g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
g_return_val_if_fail(GST_IS_PAD(pad), GST_FLOW_ERROR);
g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);
filter = GST_VADER(GST_OBJECT_PARENT(pad));
g_return_val_if_fail(filter != NULL, GST_FLOW_ERROR);
g_return_val_if_fail(GST_IS_VADER(filter), GST_FLOW_ERROR);
in_data = (gint16 *) GST_BUFFER_DATA(buf);
num_samples = GST_BUFFER_SIZE(buf) / 2;
/* Enter a critical section. */
g_static_rec_mutex_lock(&filter->mtx);
filter->silent_prev = filter->silent;
/* If we are in auto-threshold mode, check to see if we have
* enough data to estimate a threshold. (FIXME: we should be
* estimating at the sample level rather than the frame level,
* probably) */
if (filter->threshold_level == -1) {
if (filter->silence_frames > 5) {
filter->silence_mean /= filter->silence_frames;
filter->silence_stddev /= filter->silence_frames;
filter->silence_stddev -= filter->silence_mean * filter->silence_mean;
filter->silence_stddev = fixpoint_bogus_sqrt(filter->silence_stddev);
/* Set threshold three standard deviations from the mean. */
filter->threshold_level = filter->silence_mean + 3 * filter->silence_stddev;
GST_DEBUG_OBJECT(filter, "silence_mean %d stddev %d auto_threshold %d\n",
filter->silence_mean, filter->silence_stddev,
filter->threshold_level);
}
}
/* Divide buffer into reasonably sized parts. */
for (i = 0; i < num_samples; i += VADER_FRAME) {
gint frame_len, j;
frame_len = MIN(num_samples - i, VADER_FRAME);
power = compute_normed_power(in_data + i, frame_len, &filter->prior_sample);
rms = fixpoint_sqrt_q15(power);
/* If we are in auto-threshold mode, don't do any voting etc. */
if (filter->threshold_level == -1) {
filter->silence_mean += rms;
filter->silence_stddev += rms * rms;
filter->silence_frames += 1;
GST_DEBUG_OBJECT(filter, "silence_mean_acc %d silence_stddev_acc %d frames %d\n",
filter->silence_mean, filter->silence_stddev, filter->silence_frames);
continue;
}
/* Shift back window values. */
memmove(filter->window, filter->window + 1,
(VADER_WINDOW - 1) * sizeof(*filter->window));
/* Decide if this buffer is silence or not. */
if (rms > filter->threshold_level)
filter->window[VADER_WINDOW-1] = TRUE;
else
filter->window[VADER_WINDOW-1] = FALSE;
/* Vote on whether we have entered a region of non-silence. */
vote = 0;
for (j = 0; j < VADER_WINDOW; ++j)
vote += filter->window[j];
GST_DEBUG_OBJECT(filter, "frame_len %d rms power %d threshold %d vote %d\n",
frame_len, rms, filter->threshold_level, vote);
if (vote > VADER_WINDOW / 2) {
filter->silent_run_length = 0;
filter->silent = FALSE;
}
else {
filter->silent_run_length
+= gst_audio_duration_from_pad_buffer(filter->sinkpad, buf);
}
if (filter->silent_run_length > filter->threshold_length)
/* it has been silent long enough, flag it */
filter->silent = TRUE;
}
/* Handle transitions between silence and non-silence. */
if (filter->silent != filter->silent_prev) {
gst_vader_transition(filter, GST_BUFFER_TIMESTAMP(buf));
}
/* Handling of silence detection is done. */
g_static_rec_mutex_unlock(&filter->mtx);
/* now check if we have to send the new buffer to the internal buffer cache
* or to the srcpad */
if (filter->silent) {
//.........这里部分代码省略.........
开发者ID:AaronZhangL,项目名称:pocketsphinx.js,代码行数:101,代码来源:gstvader.c
示例12: gst_vader_transition
static void
gst_vader_transition(GstVader *filter, GstClockTime ts)
{
/* NOTE: This function MUST be called with filter->mtx held! */
/* has the silent status changed ? if so, send right signal
* and, if from silent -> not silent, flush pre_record buffer
*/
if (filter->silent) {
/* Sound to silence transition. */
GstMessage *m =
gst_vader_message_new(filter, FALSE, ts);
GstEvent *e =
gst_vader_event_new(filter, GST_EVENT_VADER_STOP, ts);
GST_DEBUG_OBJECT(filter, "signaling CUT_STOP");
gst_element_post_message(GST_ELEMENT(filter), m);
/* Insert a custom event in the stream to mark the end of a cut. */
/* This will block if the pipeline is paused so we have to unlock. */
g_static_rec_mutex_unlock(&filter->mtx);
gst_pad_push_event(filter->srcpad, e);
g_static_rec_mutex_lock(&filter->mtx);
/* FIXME: That event's timestamp is wrong... as is this one. */
g_signal_emit(filter, gst_vader_signals[SIGNAL_VADER_STOP], 0, ts);
/* Stop dumping audio */
if (filter->dumpfile) {
fclose(filter->dumpfile);
filter->dumpfile = NULL;
++filter->dumpidx;
}
} else {
/* Silence to sound transition. */
gint count = 0;
GstMessage *m;
GstEvent *e;
GST_DEBUG_OBJECT(filter, "signaling CUT_START");
/* Use the first pre_buffer's timestamp for the signal if possible. */
if (filter->pre_buffer) {
GstBuffer *prebuf;
prebuf = (g_list_first(filter->pre_buffer))->data;
ts = GST_BUFFER_TIMESTAMP(prebuf);
}
g_signal_emit(filter, gst_vader_signals[SIGNAL_VADER_START],
0, ts);
m = gst_vader_message_new(filter, TRUE, ts);
e = gst_vader_event_new(filter, GST_EVENT_VADER_START, ts);
gst_element_post_message(GST_ELEMENT(filter), m);
/* Insert a custom event in the stream to mark the beginning of a cut. */
/* This will block if the pipeline is paused so we have to unlock. */
g_static_rec_mutex_unlock(&filter->mtx);
gst_pad_push_event(filter->srcpad, e);
g_static_rec_mutex_lock(&filter->mtx);
/* Start dumping audio */
if (filter->dumpdir) {
gchar *filename = g_strdup_printf("%s/%08d.raw", filter->dumpdir,
filter->dumpidx);
filter->dumpfile = fopen(filename, "wb");
g_free(filename);
}
/* first of all, flush current buffer */
GST_DEBUG_OBJECT(filter, "flushing buffer of length %" GST_TIME_FORMAT,
GST_TIME_ARGS(filter->pre_run_length));
while (filter->pre_buffer) {
GstBuffer *prebuf;
prebuf = (g_list_first(filter->pre_buffer))->data;
filter->pre_buffer = g_list_remove(filter->pre_buffer, prebuf);
if (filter->dumpfile)
fwrite(GST_BUFFER_DATA(prebuf), 1, GST_BUFFER_SIZE(prebuf),
filter->dumpfile);
/* This will block if the pipeline is paused so we have to unlock. */
g_static_rec_mutex_unlock(&filter->mtx);
gst_pad_push(filter->srcpad, prebuf);
g_static_rec_mutex_lock(&filter->mtx);
++count;
}
GST_DEBUG_OBJECT(filter, "flushed %d buffers", count);
filter->pre_run_length = 0;
}
}
开发者ID:AaronZhangL,项目名称:pocketsphinx.js,代码行数:84,代码来源:gstvader.c
示例13: audioresample_transform
static GstFlowReturn
audioresample_transform (GstBaseTransform * base, GstBuffer * inbuf,
GstBuffer * outbuf)
{
GstAudioresample *audioresample;
ResampleState *r;
guchar *data, *datacopy;
gulong size;
GstClockTime timestamp;
audioresample = GST_AUDIORESAMPLE (base);
r = audioresample->resample;
data = GST_BUFFER_DATA (inbuf);
size = GST_BUFFER_SIZE (inbuf);
timestamp = GST_BUFFER_TIMESTAMP (inbuf);
GST_LOG_OBJECT (audioresample, "transforming buffer of %ld bytes, ts %"
GST_TIME_FORMAT ", duration %" GST_TIME_FORMAT ", offset %"
G_GINT64_FORMAT ", offset_end %" G_GINT64_FORMAT,
size, GST_TIME_ARGS (timestamp),
GST_TIME_ARGS (GST_BUFFER_DURATION (inbuf)),
GST_BUFFER_OFFSET (inbuf), GST_BUFFER_OFFSET_END (inbuf));
/* check for timestamp discontinuities and flush/reset if needed */
if (G_UNLIKELY (audioresample_check_discont (audioresample, timestamp))) {
/* Flush internal samples */
audioresample_pushthrough (audioresample);
/* Inform downstream element about discontinuity */
audioresample->need_discont = TRUE;
/* We want to recalculate the offset */
audioresample->ts_offset = -1;
}
if (audioresample->ts_offset == -1) {
/* if we don't know the initial offset yet, calculate it based on the
* input timestamp. */
if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
GstClockTime stime;
/* offset used to calculate the timestamps. We use the sample offset for
* this to make it more accurate. We want the first buffer to have the
* same timestamp as the incoming timestamp. */
audioresample->next_ts = timestamp;
audioresample->ts_offset =
gst_util_uint64_scale_int (timestamp, r->o_rate, GST_SECOND);
/* offset used to set as the buffer offset, this offset is always
* relative to the stream time, note that timestamp is not... */
stime = (timestamp - base->segment.start) + base->segment.time;
audioresample->offset =
gst_util_uint64_scale_int (stime, r->o_rate, GST_SECOND);
}
}
audioresample->prev_ts = timestamp;
audioresample->prev_duration = GST_BUFFER_DURATION (inbuf);
/* need to memdup, resample takes ownership. */
datacopy = g_memdup (data, size);
resample_add_input_data (r, datacopy, size, g_free, datacopy);
return audioresample_do_output (audioresample, outbuf);
}
开发者ID:eta-im-dev,项目名称:media,代码行数:62,代码来源:audioresample_static.c
示例14: gst_a52dec_chain
static GstFlowReturn
gst_a52dec_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
GstA52Dec *a52dec = GST_A52DEC (parent);
GstFlowReturn ret = GST_FLOW_OK;
gint first_access;
if (a52dec->dvdmode) {
gsize size;
guint8 data[2];
gint offset;
gint len;
GstBuffer *subbuf;
size = gst_buffer_get_size (buf);
if (size < 2)
goto not_enough_data;
gst_buffer_extract (buf, 0, data, 2);
first_access = (data[0] << 8) | data[1];
/* Skip the first_access header */
offset = 2;
if (first_access > 1) {
/* Length of data before first_access */
len = first_access - 1;
if (len <= 0 || offset + len > size)
goto bad_first_access_parameter;
subbuf = gst_buffer_copy_region (buf, GST_BUFFER_COPY_ALL, offset, len);
GST_BUFFER_TIMESTAMP (subbuf) = GST_CLOCK_TIME_NONE;
ret = a52dec->base_chain (pad, parent, subbuf);
if (ret != GST_FLOW_OK) {
gst_buffer_unref (buf);
goto done;
}
offset += len;
len = size - offset;
if (len > 0) {
subbuf = gst_buffer_copy_region (buf, GST_BUFFER_COPY_ALL, offset, len);
GST_BUFFER_TIMESTAMP (subbuf) = GST_BUFFER_TIMESTAMP (buf);
ret = a52dec->base_chain (pad, parent, subbuf);
}
gst_buffer_unref (buf);
} else {
/* first_access = 0 or 1, so if there's a timestamp it applies to the first byte */
subbuf =
gst_buffer_copy_region (buf, GST_BUFFER_COPY_ALL, offset,
size - offset);
GST_BUFFER_TIMESTAMP (subbuf) = GST_BUFFER_TIMESTAMP (buf);
gst_buffer_unref (buf);
ret = a52dec->base_chain (pad, parent, subbuf);
}
} else {
ret = a52dec->base_chain (pad, parent, buf);
}
done:
return ret;
/* ERRORS */
not_enough_data:
{
GST_ELEMENT_ERROR (GST_ELEMENT (a52dec), STREAM, DECODE, (NULL),
("Insufficient data in buffer. Can't determine first_acess"));
gst_buffer_unref (buf);
return GST_FLOW_
|
请发表评论