本文整理汇总了C++中VideoDecoder类的典型用法代码示例。如果您正苦于以下问题:C++ VideoDecoder类的具体用法?C++ VideoDecoder怎么用?C++ VideoDecoder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoDecoder类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: readFunction
static int readFunction(void* opaque, uint8_t* buffer, int bufferSize)
{
VideoDecoder* decoder = (VideoDecoder*) opaque;
//Call implemented function
return decoder->getFillFileBufferFunc()(decoder->getCustomFileBufferFuncData(), buffer, bufferSize);
}
开发者ID:Caresilabs,项目名称:gdx-video,代码行数:7,代码来源:VideoDecoder.cpp
示例2: vdec_flush
Vdec_ReturnType vdec_flush(struct VDecoder* dec, int *nFlushedFrames)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: flush \n");
VideoDecoder* pDec = (VideoDecoder*)(dec->core);
if(!pDec) return VDEC_EFAILED;
pDec->Flush( nFlushedFrames );
return VDEC_SUCCESS;
}
开发者ID:armv6,项目名称:android_hardware_qcom_qdsp5,代码行数:8,代码来源:vdec.cpp
示例3: GetFormat
static AVPixelFormat GetFormat(AVCodecContext *Context, const AVPixelFormat *Formats)
{
if (!Context || !Formats)
return AV_PIX_FMT_YUV420P;
VideoDecoder* parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
return parent->AgreePixelFormat(Context, Formats);
return GetFormatDefault(Context, Formats);
}
开发者ID:,项目名称:,代码行数:11,代码来源:
示例4: gstvideo_has_codec
G_GNUC_INTERNAL
gboolean gstvideo_has_codec(int codec_type)
{
gboolean has_codec = FALSE;
VideoDecoder *decoder = create_gstreamer_decoder(codec_type, NULL);
if (decoder) {
has_codec = TRUE;
decoder->destroy(decoder);
}
return has_codec;
}
开发者ID:fgouget,项目名称:spice-gtk,代码行数:13,代码来源:channel-display-gst.c
示例5: ReleaseBuffer
static void ReleaseBuffer(AVCodecContext *Context, AVFrame *Frame)
{
if (Frame->type == FF_BUFFER_TYPE_INTERNAL)
{
avcodec_default_release_buffer(Context, Frame);
return;
}
VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
parent->ReleaseAVBuffer(Context, Frame);
else
LOG(VB_GENERAL, LOG_ERR, "Invalid context");
}
开发者ID:,项目名称:,代码行数:14,代码来源:
示例6: GetBuffer
static int GetBuffer(struct AVCodecContext *Context, AVFrame *Frame)
{
if (!Context->codec)
return -1;
if (!(Context->codec->capabilities & CODEC_CAP_DR1))
return avcodec_default_get_buffer(Context, Frame);
VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
return parent->GetAVBuffer(Context, Frame);
LOG(VB_GENERAL, LOG_ERR, "Invalid context");
return -1;
}
开发者ID:,项目名称:,代码行数:15,代码来源:
示例7: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
QString file = "test.avi";
int idx = a.arguments().indexOf("-f");
if (idx > 0)
file = a.arguments().at(idx + 1);
QString decName("FFmpeg");
idx = a.arguments().indexOf("-vc");
if (idx > 0)
decName = a.arguments().at(idx + 1);
VideoDecoderId cid = VideoDecoderFactory::id(decName.toStdString());
if (cid <= 0) {
qWarning("Can not find decoder: %s", decName.toUtf8().constData());
return 1;
}
VideoDecoder *dec = VideoDecoderFactory::create(cid);
AVDemuxer demux;
if (!demux.loadFile(file)) {
qWarning("Failed to load file: %s", file.toUtf8().constData());
return 1;
}
dec->setCodecContext(demux.videoCodecContext());
dec->prepare();
dec->open();
QElapsedTimer timer;
timer.start();
int count = 0;
VideoFrame frame;
while (!demux.atEnd()) {
if (!demux.readFrame())
continue;
if (dec->decode(demux.packet()->data)) {
/*
* TODO: may contains more than 1 frames
* map from gpu or not?
*/
//frame = dec->frame().clone();
count++;
}
}
qint64 elapsed = timer.elapsed();
int msec = elapsed/1000LL;
qDebug("decoded frames: %d, time: %d, average speed: %d", count, msec, count/msec);
return 0;
}
开发者ID:AlexSnet,项目名称:QtAV,代码行数:48,代码来源:main.cpp
示例8: vdec_release_frame
Vdec_ReturnType vdec_release_frame(struct VDecoder *dec, struct vdec_frame *frame)
{
VDEC_FRAME vdecFrame;
VideoDecoder *pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: release_frame %p\n", frame);
if (NULL == dec || NULL == frame)
{
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter vdec: 0x%x frame: 0x%x", (unsigned int)dec, (unsigned int)frame);
return VDEC_EFAILED;
}
//vdecFrame.pBuf = (VDEC_BYTE*)frame->phys;
vdecFrame.pBuf = (VDEC_BYTE*)frame->base;
vdecFrame.timestamp = (unsigned long long)frame->timestamp;
pDec->ReuseFrameBuffer(&vdecFrame);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: released_frame with ptr: 0x%x", (unsigned int)vdecFrame.pBuf);
return VDEC_SUCCESS;
}
开发者ID:armv6,项目名称:android_hardware_qcom_qdsp5,代码行数:18,代码来源:vdec.cpp
示例9: FFPlayer
FFPlayer()
{
stream_ = new FFStream();
audio_decoder_ = new AudioDecoder();
video_decoder_ = new VideoDecoder();
video_decoder_->setOnDecodeFinished( bind(&FFPlayer::video_decoder_OnDecodeFinished, this, placeholders::_1) );
audio_renderer_ = new AudioRenderer();
video_renderer_ = new VideoRenderer();
scheduler_ = new Scheduler();
scheduler_->setOnTime( bind(&FFPlayer::scheduler_OnTimer, this) );
}
开发者ID:yangee,项目名称:ff-player,代码行数:14,代码来源:FFPlayer.hpp
示例10: onData
virtual void onData(LiveTrack* track, uint8_t* p_buffer, int i_size,
int i_truncated_bytes, int64_t pts, int64_t dts) {
//std::cout << "Got Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; pts = " << pts << "; dts = " << dts << std::endl;
//std::cout << "Got Data. size = " << i_size << "; pts = " << pts << std::endl;
int consumed;
if (track->getFormat().i_codec != VLC_CODEC_H264)
return;
//std::cout << "Got H264 Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; NAL type = " << (int)(p_buffer[4] & 0x1f) << std::endl;
if (!decoder) {
decoder = new VideoDecoder();
decoder->openCodec(0);
if (track->getFormat().p_extra) {
decoder->decode(track->getFormat().p_extra,
track->getFormat().i_extra, consumed);
}
}
uint8_t* tmp = p_buffer;
int left = i_size;
while (left) {
AVFrame* ret = decoder->decode(tmp, left, consumed);
if (ret) {
av_frame_unref(ret);
#ifdef TEST_MULTI_CLIENT
std::cout << "client " << this << " got frame!!!\n";
#endif
}
tmp += consumed;
left -= consumed;
}
}
开发者ID:nguyen-viet-hung,项目名称:live555,代码行数:36,代码来源:main.cpp
示例11: RecVideo
/****************************************
* RecVideo
* Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
//Coders
VideoDecoder* decoder = NULL;
VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
//Create new video frame
RTMPVideoFrame frame(0,262143);
//Set codec
frame.SetVideoCodec(RTMPVideoFrame::FLV1);
int width=0;
int height=0;
DWORD numpixels=0;
Log(">RecVideo\n");
//Mientras tengamos que capturar
while(receivingVideo)
{
///Obtenemos el paquete
RTPPacket* packet = rtpVideo.GetPacket();
//Check
if (!packet)
//Next
continue;
//Get type
VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();
if ((decoder==NULL) || (type!=decoder->type))
{
//Si habia uno nos lo cargamos
if (decoder!=NULL)
delete decoder;
//Creamos uno dependiendo del tipo
decoder = VideoCodecFactory::CreateDecoder(type);
//Check
if (!decoder)
{
delete(packet);
continue;
}
}
//Lo decodificamos
if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
{
delete(packet);
continue;
}
//Get mark
bool mark = packet->GetMark();
//Delete packet
delete(packet);
//Check if it is last one
if(!mark)
continue;
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Encode next frame
VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//Check
if (!encoded)
break;
//Check size
if (frame.GetMaxMediaSize()<encoded->GetLength())
//Not enougth space
return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
//Get full frame
frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());
//.........这里部分代码省略.........
开发者ID:crubia,项目名称:wt,代码行数:101,代码来源:mediabridgesession.cpp
示例12: main
int main(int argc, char *argv[])
{
#if 0
QCoreApplication a(argc, argv);
return a.exec();
#endif
VideoDecoder* videoDecoder = new VideoDecoder;
VideoEncoder* videoEncoder = 0;
AdaboostClassifier* openClassifier = new AdaboostClassifier;
AdaboostClassifier* closedClassifier = new AdaboostClassifier;
HandyTracker tracker;
if ( argc != 5 )
{
printf("Usage: %s <open classifier> <closed classifier> <input video> <output video>\n", argv[0]);
return 0;
}
if ( !openClassifier->Load(argv[1]) )
{
fprintf(stderr, "Failed loading open classifier\n", argv[1]);
return 1;
}
if ( !tracker.SetOpenClassifier(openClassifier) )
{
fprintf(stderr, "Failed setting open classifier\n");
return 1;
}
if ( !closedClassifier->Load(argv[2]) )
{
fprintf(stderr, "Failed loading closed classifier\n", argv[2]);
return 1;
}
if ( !tracker.SetClosedClassifier(closedClassifier) )
{
fprintf(stderr, "Failed setting closed classifier\n");
return 1;
}
videoDecoder->SetFilename(argv[3]);
if ( !videoDecoder->Load() )
{
fprintf(stderr, "Failed loading video <%s>\n", argv[3]);
return 1;
}
if ( !videoDecoder->UpdateFrame() )
{
fprintf(stderr, "Failed updating frame\n");
return 1;
}
int frameNumber = 0;
bool trackingInitialized = false;
Image* img = videoDecoder->GetFrame();
while ( img )
{
if ( !videoEncoder )
{
videoEncoder = new VideoEncoder;
if ( !videoEncoder->Open(argv[4], img->GetWidth(), img->GetHeight(), 25) )
{
fprintf(stderr, "Failed opening output video <%s>\n", argv[4]);
return 1;
}
}
ProcessFrame(img, &tracker, trackingInitialized, frameNumber);
if ( trackingInitialized )
DrawResults(img, &tracker, frameNumber);
videoEncoder->AddFrame(img);
if ( frameNumber > 1 )
tracker.PurgeRegion(frameNumber - 2);
frameNumber++;
videoDecoder->UpdateFrame();
img = videoDecoder->GetFrame();
}
videoEncoder->Close();
return 0;
}
开发者ID:yodergj,项目名称:HandDetection,代码行数:87,代码来源:main.cpp
示例13: vdec_post_input_buffer
Vdec_ReturnType vdec_post_input_buffer(struct VDecoder *dec, video_input_frame_info *frame, void *cookie)
{
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: post_input data=%p len=%d cookie=%p\n", frame->data, frame->len, cookie);
#ifdef LOG_INPUT_BUFFERS
static int take_input = 1;
#endif
int fatal_err = 0;
/*checkBufAvail flag is needed since we do not need to checkout
* YUV/Slice buffer incase the NAL corresponds to same frame.
* This is required for multiple NALs in one input buffer
*/
bool checkBufAvail = true;
VDEC_INPUT_BUFFER input;
VideoDecoder *pDec = (VideoDecoder*)(dec->core);
VDEC_ERROR err = VDEC_ERR_EVERYTHING_FINE;
if (NULL == dec || NULL == frame || NULL == frame->data )
{
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter dec: 0x%x frame: 0x%x data: 0x%x\n",
(unsigned int)dec,
(unsigned int)frame,
(unsigned int)frame->data);
return VDEC_EFAILED;
}
input.buffer[0] = (unsigned char*)frame->data;
input.timestamp[0] = (long long)frame->timestamp;
input.buffer_size[0] = (unsigned long int)frame->len;
input.buffer_pos[0] = 0;
input.layers = 1;
input.eOSIndicator[0]= false;
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: received ts: %lld", frame->timestamp);
if (frame->timestamp < timestamp )
{
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: error: out of order stamp! %d < %d\n",
(int)(frame->timestamp&0xFFFFFFFF), timestamp);
}
timestamp = (int)frame->timestamp;
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: vdec_core_post_input. buffer_size[0]: %ld frame->flags: 0x%x\n",
input.buffer_size[0], frame->flags);
if (input.buffer_size[0] == 0 && frame->flags & FRAME_FLAG_EOS)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Zero-length buffer with EOS bit set\n");
input.eOSIndicator[0] = true;
if(pDec)
err = pDec->EOS( );
else
err = VDEC_ERR_NULL_STREAM_ID;
if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS;
vdec_decoder_info->ctxt->buffer_done(vdec_decoder_info->ctxt, cookie);
if (VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS;
return VDEC_EFAILED;
}
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_core_post_input\n");
#ifdef LOG_INPUT_BUFFERS
if (take_input)
{
fwritex((unsigned char*)frame->data, frame->len, pInputFile);
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: frame %d frame->len %d\n", counter++, frame->len);
}
#endif
do {
QPERF_TIME(arm_decode, err = pDec->Decode( &input, checkBufAvail ));
if (VDEC_ERR_EVERYTHING_FINE != err)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: vdec_decoder error: %d\n", (int)err);
if(VDEC_ERR_UNSUPPORTED_DIMENSIONS == err) {
fatal_err = 1;
break;
}
}
checkBufAvail = false;
} while( ( VDEC_ERR_EVERYTHING_FINE == err ) && ( 0 != input.buffer_size[0] ) );
#ifdef LOG_INPUT_BUFFERS
take_input = (err==14?0:1);
#endif
if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS;
vdec_input_buffer_release_cb_handler(pDec,&input,cookie);
if(VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS;
if(fatal_err) {
static struct vdec_frame frame;
memset(&frame, 0, sizeof(frame));
frame.flags |= FRAME_FLAG_FATAL_ERROR;
QPERF_END(frame_data);
vdec_decoder_info->ctxt->frame_done(vdec_decoder_info->ctxt, &frame);
}
return VDEC_EFAILED;
}
开发者ID:armv6,项目名称:android_hardware_qcom_qdsp5,代码行数:97,代码来源:vdec.cpp
示例14: sizeof
struct VDecoder *vdec_open(struct vdec_context *ctxt)
{
struct VDecoder *dec = NULL;
VDEC_ERROR err = 0;
const VDEC_CONCURRENCY_CONFIG concurrencyConfig = VDEC_CONCURRENT_NONE;
VideoDecoder* pDec = NULL;
dec = (VDecoder*)calloc(1, sizeof(struct VDecoder));
if (!dec) {
return 0;
}
dec->ctxt = ctxt;
dec->width = ctxt->width;
dec->height = ctxt->height;
dec->ctxt->outputBuffer.numBuffers = ctxt->outputBuffer.numBuffers;
if(VDEC_SUCCESS != vdec_commit_memory(dec)) {
return 0;
}
QPERF_RESET(arm_decode);
QPERF_RESET(frame_data);
nFrameDoneCnt = 0;
nGoodFrameCnt = 0;
#ifdef PROFILE_DECODER
qperf_total_frame_cnt = 0;
#endif
vdec_output_frame_index = 0;
timestamp = 0;
int i;
VDEC_PARAMETER_DATA codeDetectionEnable;
codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = false; // by default set to false; MPEG4 doesnt require it
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d\n", dec->width, dec->height);
vdec_decoder_info = dec;
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d kind[%s]\n",
vdec_decoder_info->ctxt->width, vdec_decoder_info->ctxt->height,
vdec_decoder_info->ctxt->kind);
if(!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.avc"))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnH264(&err));
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating H264 Decoder [%p]\n",dec->core);
VDEC_PARAMETER_DATA sizeOfNalLengthField;
sizeOfNalLengthField.sizeOfNalLengthField.sizeOfNalLengthField = dec->ctxt->size_of_nal_length_field;
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: NAL lenght [%d]\n",dec->ctxt->size_of_nal_length_field);
pDec = (VideoDecoder*)(dec->core);
if (0 == dec->ctxt->size_of_nal_length_field)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: START CODE....\n");
codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = true;
if(!pDec)
err = VDEC_ERR_NULL_STREAM_ID;
else
err = pDec->SetParameter(VDEC_PARM_START_CODE_DETECTION,&codeDetectionEnable);
if (VDEC_ERR_EVERYTHING_FINE != err)
{
// TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"[vdec_core] set start code detection parameter failed: %d", (int)err);
goto fail_initialize;
}
}
else if(dec->ctxt->size_of_nal_length_field > 0 && dec->ctxt->size_of_nal_length_field <= 4)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: NALU LENGTH[%d]\n",dec->ctxt->size_of_nal_length_field);
// test size of NAL length field decoder support
if(!pDec)
err = VDEC_ERR_NULL_STREAM_ID;
else
err = pDec->SetParameter( VDEC_PARM_SIZE_OF_NAL_LENGTH_FIELD, &sizeOfNalLengthField );
if (VDEC_ERR_EVERYTHING_FINE != err)
{
// TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err);
goto fail_initialize;
}
}
else
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Invalid size of nal length field: %d\n", dec->ctxt->size_of_nal_length_field);
goto fail_core;
}
}
else if ((!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.mpeg4"))
|| (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.h263")))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnMpeg4(&err));
pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating MP4 Decoder [%p]\n",dec->core);
}
else if (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.vc1"))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnWmv(&err));
pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating WMV Decoder [%p]\n",dec->core);
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"Incorrect codec kind\n");
goto fail_core;
}
//.........这里部分代码省略.........
开发者ID:armv6,项目名称:android_hardware_qcom_qdsp5,代码行数:101,代码来源:vdec.cpp
示例15: vdec_close
Vdec_ReturnType vdec_close(struct VDecoder *dec)
{
VDEC_ERROR err;
VideoDecoder* pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_close()\n");
#ifdef PROFILE_DECODER
usecs_t time_taken_by_arm = QPERF_TERMINATE(arm_decode);
float avArmTime = (float)time_taken_by_arm/(qperf_total_frame_cnt*1000);
usecs_t frame_data_time = QPERF_TERMINATE(frame_data);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Arm Statistics \n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Total number of frames decoded = %ld\n",qperf_total_frame_cnt);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Average Arm time/frame(ms) = %f\n",avArmTime);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Arm Decoded/sec = %f\n",1000/avArmTime);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Frame Done Statistics \n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frame done cumulative time = %lld\n",frame_data_time);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Done per second = %f\n",(float)(qperf_total_frame_cnt-1)*1000000/frame_data_time);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
#endif
#ifdef LOG_YUV_FRAMES
if (pYUVFile)
{
fclose (pYUVFile);
pYUVFile = NULL;
}
#endif
#ifdef LOG_INPUT_BUFFERS
if (pInputFile)
{
fclose (pInputFile);
}
#endif
vdec_output_frame_index = 0;
#if NEED_VDEC_LP
if (vdec->fake)
{
//jlk - adsp_close() calls adsp_disable right now. Calling adsp_disable() twice causes problems
//Renable this line when we fix the kernel driver
//adsp_disable((adsp_module*)vdec->fake);
adsp_close((adsp_module*)vdec->fake);
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: adsp modules is NULL\n");
}
#endif
nFrameDoneCnt = 0;
nGoodFrameCnt = 0;
if (dec->core)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling Suspend");
err = pDec->Suspend( );
if (VDEC_ERR_EVERYTHING_FINE != err)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Suspend returned error: %d\n", (int)err);
}
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling vdec_destroy");
QTV_Delete( (VideoDecoder*)(dec->core) );
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: core is NULL");
}
pmem_free(&dec->arena);
free(dec);
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: closed\n");
return VDEC_SUCCESS;
}
开发者ID:armv6,项目名称:android_hardware_qcom_qdsp5,代码行数:73,代码来源:vdec.cpp
示例16: main
int main(int argc, char* argv[])
{
int i, j, k;
int width, height;
int numFleshRegions, numHands, xScale, yScale;
int left, right, top, bottom;
Image* image;
Image outlineImage;
FleshDetector* fleshDetector;
vector<ConnectedRegion*>* fleshRegionVector;
vector<Hand*> hands;
Hand* hand;
vector<HandCandidate*> handCandidates;
HandCandidate* candidate;
unsigned char angledBoxColor[] = {255, 255, 0};
unsigned char longColor[] = {0, 255, 0};
unsigned char shortColor[] = {0, 0, 255};
unsigned char offsetColor[] = {0, 255, 255};
unsigned char pointColor[] = {255, 0, 0};
unsigned char farPointColor[] = {255, 0, 255};
int numLargeRegions;
string basename;
DoublePoint centroid, center, nearEdge, farEdge;
LineSegment shortLine, longLine, offsetLine;
Rect angledBox;
double edgeAngle, offsetAngle;
CompositeClassifier postureDetector;
string features;
Matrix input;
int classIndex;
SubImage handImage;
vector<Point> farPoints;
int numFarPoints;
string inputFilename, outputFilename;
VideoDecoder decoder;
VideoEncoder encoder;
bool needInit = true;
if ( argc < 5 )
{
printf("Usage: %s <flesh classifier file> <hand classifier file> <input file> <output file>\n", argv[0]);
return 1;
}
// Either loads a real detector or gets a dummy detector if arg is "DUMMY"
fleshDetector = FleshDetector::Get(argv[1]);
if ( !fleshDetector )
{
fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
return 1;
}
if ( !postureDetector.Load(argv[2]) )
{
fprintf(stderr, "Error loading hand detector %s\n", argv[2]);
return 1;
}
features = postureDetector.GetFeatureString();
inputFilename = argv[3];
outputFilename = argv[4];
decoder.SetFilename(inputFilename);
if ( !decoder.Load() )
{
fprintf(stderr, "Error loading video %s\n", inputFilename.c_str());
return 1;
}
while ( decoder.UpdateFrame() )
{
image = decoder.GetFrame();
if ( needInit )
{
needInit = false;
width = image->GetWidth();
height = image->GetHeight();
if ( !encoder.Open(outputFilename.c_str(), width, height, 10) )
{
fprintf(stderr, "Failed opening %s\n", outputFilename.c_str());
return 1;
}
}
hands.clear();
outlineImage = *image;
fleshRegionVector = fleshDetector->GetFleshRegions(image, xScale, yScale);
if ( fleshRegionVector )
{
numFleshRegions = fleshRegionVector->size();
numLargeRegions = 0;
for (i = 0; i < numFleshRegions; i++)
{
if ( !(*fleshRegionVector)[i]->GetBounds(left, right, top, bottom) )
{
fprintf(stderr, "Error getting flesh block %d bounds\n", i);
return 1;
}
//.........这里部分代码省略.........
开发者ID:yodergj,项目名称:HandDetection,代码行数:101,代码来源:VidPostureDetect.cpp
示例17: SendVideo
int MediaBridgeSession::SendVideo()
{
VideoDecoder *decoder = VideoCodecFactory::CreateDecoder(VideoCodec::SORENSON);
VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(rtpVideoCodec);
DWORD width = 0;
DWORD height = 0;
DWORD numpixels = 0;
QWORD lastVideoTs = 0;
Log(">SendVideo\n");
//Set video format
if (!rtpVideo.SetSendingCodec(rtpVideoCodec))
//Error
return Error("Peer do not support [%d,%s]\n",rtpVideoCodec,VideoCodec::GetNameFor(rtpVideoCodec));
//While sending video
while (sendingVideo)
{
//Wait for next video
if (!videoFrames.Wait(0))
//Check again
continue;
//Get audio grame
RTMPVideoFrame* video = videoFrames.Pop();
//check
if (!video)
//Again
continue;
//Get time difference
DWORD diff = 0;
//Get timestam
QWORD ts = video->GetTimestamp();
//If it is not the first frame
if (lastVideoTs)
//Calculate it
diff = ts - lastVideoTs;
//Set the last audio timestamp
lastVideoTs = ts;
//Check
if (video->GetVideoCodec()!=RTMPVideoFrame::FLV1)
//Error
continue;
//Decode frame
if (!decoder->Decode(video->GetMediaData(),video->GetMediaSize()))
{
Error("decode packet error");
//Next
continue;
}
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Check size
if (!numpixels)
{
Error("numpixels equals 0");
//Next
continue;
}
//Check fpu
if (sendFPU)
{
//Send it
encoder->FastPictureUpdate();
//Reset
sendFPU = false;
}
//Encode it
VideoFrame *videoFrame = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//If was failed
if (!videoFrame)
{
Log("No video frame\n");
//Next
continue;
}
//Set frame time
//.........这里部分代码省略.........
开发者ID:crubia,项目名称:wt,代码行数:101,代码来源:mediabridgesession.cpp
示例18: RecVideo
/****************************************
* RecVideo
* Obtiene los packetes y los muestra
*****************************************/
int VideoStream::RecVideo()
{
VideoDecoder* videoDecoder = NULL;
VideoCodec::Type type;
timeval before;
timeval lastFPURequest;
DWORD lostCount=0;
DWORD frameTime = (DWORD)-1;
DWORD lastSeq = RTPPacket::MaxExtSeqNum;
bool waitIntra = false;
Log(">RecVideo\n");
//Get now
gettimeofday(&before,NULL);
//Not sent FPU yet
setZeroTime(&lastFPURequest);
//Mientras tengamos que capturar
while(receivingVideo)
{
//Get RTP packet
RTPPacket* packet = rtp.GetPacket();
//Check
if (!packet)
//Next
continue;
//Get extended sequence number and timestamp
DWORD seq = packet->GetExtSeqNum();
DWORD ts = packet->GetTimestamp();
//Get packet data
BYTE* buffer = packet->GetMediaData();
DWORD size = packet->GetMediaLength();
//Get type
type = (VideoCodec::Type)packet->GetCodec();
//Lost packets since last
DWORD lost = 0;
//If not first
if (lastSeq!=RTPPacket::MaxExtSeqNum)
//Calculate losts
lost = seq-lastSeq-1;
//Increase total lost count
lostCount += lost;
//Update last sequence number
lastSeq = seq;
//If lost some packets or still have not got an iframe
if(lostCount || waitIntra)
{
//Check if we got listener and more than 1/2 second have elapsed from last request
if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
{
//Debug
Debug("-Requesting FPU lost %d\n",lostCount);
//Reset count
lostCount = 0;
//Request it
listener->onRequestFPU();
//Request also over rtp
rtp.RequestFPU();
//Update time
getUpdDifTime(&lastFPURequest);
//Waiting for refresh
waitIntra = true;
}
}
//Check if it is a redundant packet
if (type==VideoCodec::RED)
{
//Get redundant packet
RTPRedundantPacket* red = (RTPRedundantPacket*)packet;
//Get primary codec
type = (VideoCodec::Type)red->GetPrimaryCodec();
//Check it is not ULPFEC redundant packet
if (type==VideoCodec::ULPFEC)
{
//Delete packet
delete(packet);
//Skip
continue;
}
//Update primary redundant payload
buffer = red->GetPrimaryPayloadData();
size = red->GetPrimaryPayloadSize();
}
//.........这里部分代码省略.........
开发者ID:tidehc,项目名称:media-server-1,代码行数:101,代码来源:videostream.cpp
示例19: _inputStream
StreamTranscoder::StreamTranscoder(
IInputStream& inputStream,
IOutputFile& outputFile,
const ProfileLoader::Profile& profile,
const int subStreamIndex,
const double offset
)
: _inputStream( &inputStream )
, _outputStream( NULL )
, _sourceBuffer( NULL )
, _frameBuffer( NULL )
, _inputDecoder( NULL )
, _generator( NULL )
, _currentDecoder( NULL )
, _outputEncoder( NULL )
, _transform( NULL )
, _subStreamIndex( subStreamIndex )
, _offset( offset )
, _canSwitchToGenerator( false )
{
// create a transcode case
switch( _inputStream->getStreamType() )
{
case AVMEDIA_TYPE_VIDEO :
{
// input decoder
VideoDecoder* inputVideo = new VideoDecoder( *static_cast<InputStream*>( _inputStream ) );
// set decoder options with empty profile to set some key options to specific values (example: threads to auto)
inputVideo->setProfile( ProfileLoader::Profile() );
inputVideo->setup();
_inputDecoder = inputVideo;
_currentDecoder = _inputDecoder;
// output encoder
VideoEncoder* outputVideo = new VideoEncoder( profile.at( constants::avProfileCodec ) );
_outputEncoder = outputVideo;
VideoFrameDesc outputFrameDesc = _inputStream->getVideoCodec().getVideoFrameDesc();
outputFrameDesc.setParameters( profile );
outputVideo->setProfile( profile, outputFrameDesc );
// output stream
_outputStream = &outputFile.addVideoStream( outputVideo->getVideoCodec() );
// buffers to process
_sourceBuffer = new VideoFrame( _inputStream->getVideoCodec().getVideoFrameDesc() );
_frameBuffer = new VideoFrame( outputVideo->getVideoCodec().getVideoFrameDesc() );
// transform
_transform = new VideoTransform();
// generator decoder
VideoGenerator* generatorVideo = new VideoGenerator();
generatorVideo->setVideoFrameDesc( outputVideo->getVideoCodec().getVideoFrameDesc() );
_generator = generatorVideo;
break;
}
case AVMEDIA_TYPE_AUDIO :
{
// input decoder
AudioDecoder* inputAudio = new AudioDecoder( *static_cast<InputStream*>( _inputStream ) );
// set decoder options with empty profile to set some key options to specific values (example: threads to auto)
inputAudio->setProfile( ProfileLoader::Profile() );
inputAudio->setup();
_inputDecoder = inputAudio;
_currentDecoder = _inputDecoder;
// output encoder
AudioEncoder* outputAudio = new AudioEncoder( profile.at( constants::avProfileCodec ) );
_outputEncoder = outputAudio;
AudioFrameDesc outputFrameDesc( _inputStream->getAudioCodec().getAudioFrameDesc() );
outputFrameDesc.setParameters( profile );
if( subStreamIndex > -1 )
{
// @todo manage downmix ?
outputFrameDesc.setChannels( 1 );
}
outputAudio->setProfile( profile, outputFrameDesc );
// output stream
_outputStream = &outputFile.addAudioStream( outputAudio->getAudioCodec() );
// buffers to process
AudioFrameDesc inputFrameDesc( _inputStream->getAudioCodec().getAudioFrameDesc() );
if( subStreamIndex > -1 )
inputFrameDesc.setChannels( 1 );
_sourceBuffer = new AudioFrame( inputFrameDesc );
_frameBuffer = new AudioFrame( outputAudio->getAudioCodec().getAudioFrameDesc() );
// transform
_transform = new AudioTransform();
// generator decoder
AudioGenerator* generatorAudio = new AudioGenerator();
generatorAudio->setAudioFrameDesc( outputAudio->getAudioCodec().getAudioFrameDesc() );
_generator = generatorAudio;
//.........这里部分代码省略.........
开发者ID:cchampet,项目名称:avTranscoder,代码行数:101,代码来源:StreamTranscoder.cpp
示例20: main
int main(int argc, char* argv[])
{
VideoDecoder decoder;
FleshDetector fleshDetector;
Image* inputImage;
Image* fleshImage;
Image* outlineImage;
Image* confidenceImage;
int frameNumber = 0;
string vidFilename;
char outputFilename[1024];
if ( argc < 4 )
{
printf("Usage: %s <classifier file> <video file> <output directory>\n", argv[0]);
return 1;
}
if ( !fleshDetector.Load(argv[1]) )
{
fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
return 1;
}
vidFilename = argv[2];
decoder.SetFilename(vidFilename);
if ( !decoder.Load() )
{
fprintf(stderr, "Error loading video %s\n", argv[2]);
return 1;
}
while ( decoder.UpdateFrame() )
{
inputImage = decoder.GetFrame();
TimingAnalyzer_Start(0);
if ( fleshDetector.Process(inputImage, &outlineImage, &fleshImage, &confidenceImage) )
{
TimingAnalyzer_Stop(0);
sprintf(outputFilename, "%s/flesh%05d.ppm", argv[3], frameNumber);
fleshImage->Save(outputFilename);
sprintf(outputFilename, "%s/frame%05d.ppm", argv[3], frameNumber);
outlineImage->Save(outputFilename);
sprintf(outputFilename, "%s/confidence%05d.ppm", argv[3], frameNumber);
confidenceImage->Save(outputFilename);
}
frameNumber++;
}
printf("FleshDetector Process Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(0), TimingAnalyzer_Max(0), TimingAnalyzer_Mean(0));
printf("FleshDetector GetFleshImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(1), TimingAnalyzer_Max(1), TimingAnalyzer_Mean(1));
printf("FleshDetector GetOutlineImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(2), TimingAnalyzer_Max(2), TimingAnalyzer_Mean(2));
printf("FleshDetector GetFleshConfidenceImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(3), TimingAnalyzer_Max(3), TimingAnalyzer_Mean(3));
printf("FleshDetector CalcConfidence Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(4), TimingAnalyzer_Max(4), TimingAnalyzer_Mean(4));
return 0;
}
开发者ID:yodergj,项目名称:HandDetection,代码行数:64,代码来源:FleshBlockOutline.cpp
注:本文中的VideoDecoder类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论