本文整理汇总了C++中XIRef类的典型用法代码示例。如果您正苦于以下问题:C++ XIRef类的具体用法?C++ XIRef怎么用?C++ XIRef使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了XIRef类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: printf
void H264UserDataTest::TestZeroRawSEI()
{
printf("H264UserDataTest::TestZeroRawSEI()\n");
fflush(stdout);
XIRef<XMemory> data = new XMemory;
for(uint8_t i = 1; i < 77; ++i)
data->Append<uint8_t>(0);
XRef<SEIPayload> payload = new SEIPayload(data);
CPPUNIT_ASSERT(payload->GetData().Get() == data.Get());
CPPUNIT_ASSERT(payload->GetUUID() == XUuid("7e0858c4-38fe-48ea-852d-dace39badb30"));
H264UserData before(payload);
const std::vector<XRef<SEIPayload> >& beforePayloads = before.GetPayloads();
CPPUNIT_ASSERT(beforePayloads.size() == 1);
CPPUNIT_ASSERT(beforePayloads.front() == payload);
XIRef<XMemory> sei = before.GenSEI();
H264UserData after(sei->begin(), sei->GetDataSize());
const std::vector<XRef<SEIPayload> >& afterPayloads = before.GetPayloads();
CPPUNIT_ASSERT(afterPayloads.size() == 1);
CPPUNIT_ASSERT(afterPayloads.front().Get() == payload.Get());
}
开发者ID:KennyDark,项目名称:opencvr,代码行数:25,代码来源:H264UserDataTest.cpp
示例2: PacketFactoryDefault
AVDeMuxer::AVDeMuxer( XIRef<XSDK::XMemory> buffer, bool annexBFilter ) :
_fileName(),
_memoryIOContext( NULL ),
_storage( new XMemory ),
_pos( 0 ),
_context( NULL ),
_eof( false ),
_deMuxPkt(),
_filterPkt(),
_streamTypes(),
_videoStreamIndex( STREAM_TYPE_UNKNOWN ),
_audioPrimaryStreamIndex( STREAM_TYPE_UNKNOWN ),
_bsfc( (annexBFilter)? av_bitstream_filter_init( "h264_mp4toannexb" ) : NULL ),
_pf( new PacketFactoryDefault() )
{
if( !Locky::IsRegistered() )
X_THROW(("Please register AVKit::Locky before using this class."));
_deMuxPkt.size = 0;
_deMuxPkt.data = NULL;
_filterPkt.size = 0;
_filterPkt.data = NULL;
size_t bufferSize = buffer->GetDataSize();
_OpenCustomIOContext( buffer->Map(), bufferSize );
_OpenStreams();
}
开发者ID:MultiSight,项目名称:avkit,代码行数:29,代码来源:AVDeMuxer.cpp
示例3:
XIRef<XDomParserNode> XUTC::ToXML() const
{
XIRef<XDomParserNode> node = new XDomParserNode;
node->SetTagName("XUTC");
return node;
}
开发者ID:MultiSight,项目名称:xsdk,代码行数:7,代码来源:XTimeZone.cpp
示例4: XMemory
XIRef<XMemory> H264Encoder::GetExtraData() const
{
XIRef<XMemory> ed = new XMemory( DEFAULT_EXTRADATA_BUFFER_SIZE );
memcpy( &ed->Extend( _extraData.GetDataSize() ), _extraData.Map(), _extraData.GetDataSize() );
return ed;
}
开发者ID:dulton,项目名称:avkit-1,代码行数:8,代码来源:H264Encoder.cpp
示例5: dm
struct StreamStatistics AVDeMuxer::GetVideoStreamStatistics( const XSDK::XString& fileName )
{
struct StreamStatistics result;
XStatistics<Average,uint32_t> avgFrameSize;
uint32_t indexFirstKey = 0;
bool foundFirstKey = false;
bool foundGOPSize = false;
uint32_t currentIndex = 0;
AVDeMuxer dm( fileName );
int videoStreamIndex = dm.GetVideoStreamIndex();
result.frameRate = (((double)1.0) / dm.GetSecondsBetweenFrames( videoStreamIndex ));
pair<int,int> tb = dm.GetTimeBase( videoStreamIndex );
result.timeBaseNum = tb.first;
result.timeBaseDen = tb.second;
int streamIndex = 0;
while( dm.ReadFrame( streamIndex ) )
{
if( streamIndex != videoStreamIndex )
continue;
if( dm.IsKey() )
{
if( !foundFirstKey )
{
indexFirstKey = currentIndex;
foundFirstKey = true;
}
else
{
if( !foundGOPSize )
{
result.gopSize = currentIndex - indexFirstKey;
foundGOPSize = true;
}
}
}
XIRef<Packet> pkt = dm.Get();
avgFrameSize.AddSample( pkt->GetDataSize() );
currentIndex++;
}
uint32_t avgSize = 0;
avgFrameSize.GetResult( avgSize );
result.averageBitRate = (uint32_t)((avgSize * (1.0 / dm.GetSecondsBetweenFrames(videoStreamIndex))) * 8);
result.numFrames = currentIndex;
return result;
}
开发者ID:MultiSight,项目名称:avkit,代码行数:58,代码来源:AVDeMuxer.cpp
示例6: _msg
ExportOverlay::ExportOverlay( const XSDK::XString& msg,
bool withTime,
OverlayHAlign hAlign,
OverlayVAlign vAlign,
uint16_t width,
uint16_t height,
int timeBaseNum,
int timeBaseDen ) :
_msg( msg ),
_decodedMsg(),
_withTime( withTime ),
_hAlign( hAlign ),
_vAlign( vAlign ),
_width( width ),
_height( height ),
_timeBaseNum( timeBaseNum),
_timeBaseDen( timeBaseDen ),
_timePerFrame( ((double)timeBaseNum / timeBaseDen) ),
_logoX( (uint16_t)((double)_width * 0.79) ),
_logoY( (uint16_t)((double)_height * 0.92) ),
_logoWidth( (uint16_t)((double)_width * 0.2) ),
_logoHeight( (uint16_t)((double)_height * 0.07) ),
_wmSurface( NULL )
{
if( !_msg.empty() )
{
XIRef<XSDK::XMemory> decodedBuf = _msg.FromBase64();
_decodedMsg = XString( (const char*)decodedBuf->Map(), decodedBuf->GetDataSize() );
}
X_LOG_NOTICE("watermark: x=%u, y=%u, w=%u, h=%u", _logoX, _logoY, _logoWidth, _logoHeight);
_wmSurface = cairo_image_surface_create( CAIRO_FORMAT_ARGB32, _logoWidth, _logoHeight );
if( !_wmSurface )
X_THROW(("Unable to allocate cairo surface for watermark: _logoWidth = %u, _logoHeight = %u", _logoWidth, _logoHeight));
cairo_t* wmCr = cairo_create( _wmSurface );
if( !wmCr )
X_THROW(("Unable to allocate cairo handle for watermark."));
cairo_scale( wmCr, (double)_width / 1408, (double)_height / 792 );
GError* err = NULL;
RsvgHandle* rsvgHandle = rsvg_handle_new_from_file("multisight-logo-white-outline.svg", &err);
if( !rsvgHandle )
X_THROW(("Unable to open ms logo from svg for watermark."));
if( rsvg_handle_render_cairo( rsvgHandle, wmCr ) != TRUE )
X_THROW(("svg render failed for watermark."));
g_object_unref(rsvgHandle);
cairo_destroy( wmCr );
}
开发者ID:MultiSight,项目名称:exporty,代码行数:58,代码来源:TranscodeExport.cpp
示例7: WriteJPEGFile
void JPEGEncoder::WriteJPEGFile( const XSDK::XString& fileName, XIRef<Packet> jpeg )
{
FILE* outFile = fopen( fileName.c_str(), "wb" );
if( !outFile )
X_THROW(("Unable to open output file."));
fwrite( jpeg->Map(), 1, jpeg->GetDataSize(), outFile );
fclose( outFile );
}
开发者ID:MultiSight,项目名称:avkit,代码行数:10,代码来源:JPEGEncoder.cpp
示例8: lock
XIRef<XTimeZone> XUTC::Instance()
{
XGuard lock(_cInstanceLock);
static XIRef<XTimeZone> instance;
if( instance.IsEmpty() )
instance = new XUTC;
return instance;
}
开发者ID:MultiSight,项目名称:xsdk,代码行数:11,代码来源:XTimeZone.cpp
示例9: WriteVideoPacket
void AVMuxer::WriteVideoPacket( XIRef<Packet> input, bool keyFrame )
{
if( _context->pb == NULL )
_OpenIO();
if( _isTS )
{
if( _numVideoFramesWritten == 0 )
{
if( _fileNum == 0 )
{
if( avformat_write_header( _context, NULL ) < 0 )
X_THROW(("Unable to write header to container."));
}
av_opt_set( _context->priv_data, "mpegts_flags", "resend_headers", 0 );
}
}
else
{
if( !_oweTrailer )
{
if( avformat_write_header( _context, NULL ) < 0 )
X_THROW(("Unable to write header to container."));
_oweTrailer = true;
}
}
AVPacket pkt;
av_init_packet( &pkt );
pkt.stream_index = _stream->index;
pkt.data = input->Map();
pkt.size = input->GetDataSize();
pkt.pts = _ts;
pkt.dts = _ts;
// convert a tick of 1 from the codecs time_base (e.g. 1/15) to the containers
// time_base
_ts += av_rescale_q(1, _stream->codec->time_base, _stream->time_base);
pkt.flags |= (keyFrame) ? AV_PKT_FLAG_KEY : 0;
if( av_interleaved_write_frame( _context, &pkt ) < 0 )
X_THROW(("Unable to write video frame."));
_numVideoFramesWritten++;
}
开发者ID:MultiSight,项目名称:avkit,代码行数:50,代码来源:AVMuxer.cpp
示例10: SetExtraData
void AVMuxer::SetExtraData( XIRef<XSDK::XMemory> extraData )
{
if( !(_context->oformat->flags & AVFMT_GLOBALHEADER) )
X_LOG_INFO("Extradata not required for %s container.",_fileName.c_str());
else
{
_stream->codec->extradata = (uint8_t*)av_mallocz( extraData->GetDataSize() );
if( !_stream->codec->extradata )
X_THROW(("Unable to allocate extradata storage."));
_stream->codec->extradata_size = extraData->GetDataSize();
memcpy( _stream->codec->extradata, extraData->Map(), extraData->GetDataSize() );
}
}
开发者ID:KennyDark,项目名称:opencvr,代码行数:14,代码来源:AVMuxer.cpp
示例11: FinalizeBuffer
void AVMuxer::FinalizeBuffer( XIRef<XSDK::XMemory> buffer )
{
if( _location != OUTPUT_LOCATION_BUFFER )
X_THROW(("Unable to finalize a non buffer IO object."));
_FinalizeCommon();
uint8_t* fileBytes = NULL;
int fileSize = avio_close_dyn_buf( _context->pb, &fileBytes );
_context->pb = NULL;
if( fileBytes == NULL || fileSize == 0 )
X_THROW(("Unable to finalize empty buffer."));
buffer->ResizeData( fileSize );
memcpy( buffer->Map(), fileBytes, fileSize );
av_freep( &fileBytes );
}
开发者ID:KennyDark,项目名称:opencvr,代码行数:20,代码来源:AVMuxer.cpp
示例12: XSimpleTimeZone
XIRef<XTimeZone> XSimpleTimeZone::FromXML(XIRef<XDomParserNode> node)
{
const XString utcOffsetStr = node->GetMetaData("utcOffset");
const XString dstOffsetStr = node->GetMetaData("dstOffset");
if(node->GetTagName() != "XSimpleTimeZone" ||
utcOffsetStr.empty() ||
(!verifyDigit(utcOffsetStr[0]) && utcOffsetStr[0] != '-') ||
count_if(utcOffsetStr.begin() + 1, utcOffsetStr.end(), verifyDigit) != (int)utcOffsetStr.size() - 1 ||
dstOffsetStr.empty() ||
(!verifyDigit(dstOffsetStr[0]) && dstOffsetStr[0] != '-') ||
count_if(dstOffsetStr.begin() + 1, dstOffsetStr.end(), verifyDigit) != (int)dstOffsetStr.size() - 1)
{
return 0;
}
const int utcOffset = utcOffsetStr.ToInt();
const int dstOffset = dstOffsetStr.ToInt();
return new XSimpleTimeZone(utcOffset, dstOffset);
}
开发者ID:MultiSight,项目名称:xsdk,代码行数:21,代码来源:XTimeZone.cpp
示例13: fopen
XIRef<XMemory> AVDeMuxer::LoadFile( const XSDK::XString& fileName )
{
XIRef<XMemory> buffer = new XMemory;
struct x_file_info fileInfo;
if( x_stat( fileName, &fileInfo ) < 0 )
X_THROW(("Unable to stat specified file."));
FILE* inFile = fopen( fileName.c_str(), "rb" );
if( !inFile )
X_THROW(("Unable to open specified file."));
uint8_t* d = &buffer->Extend( fileInfo._fileSize );
int itemsRead = fread( d, 1, fileInfo._fileSize, inFile );
fclose( inFile );
if( itemsRead != fileInfo._fileSize )
X_THROW(("Failed to read all of the data from the file."));
return buffer;
}
开发者ID:MultiSight,项目名称:avkit,代码行数:23,代码来源:AVDeMuxer.cpp
示例14: while
void JPEGEncoder::EncodeYUV420P( XIRef<Packet> input )
{
AVFrame frame;
avcodec_get_frame_defaults( &frame );
_output = _pf->Get( DEFAULT_JPEG_ENCODE_BUFFER_SIZE + DEFAULT_PADDING );
uint8_t* pic = input->Map();
frame.data[0] = pic;
pic += (_context->width * _context->height);
frame.data[1] = pic;
pic += ((_context->width/4) * _context->height);
frame.data[2] = pic;
frame.linesize[0] = _context->width;
frame.linesize[1] = (_context->width/2);
frame.linesize[2] = (_context->width/2);
int attempt = 0;
int gotPacket = 0;
AVPacket pkt;
do
{
av_init_packet( &pkt );
pkt.data = _output->Map();
pkt.size = _output->GetBufferSize();
if( avcodec_encode_video2( _context,
&pkt,
&frame,
&gotPacket ) < 0 )
X_THROW(("Error while encoding."));
attempt++;
} while( gotPacket == 0 && (attempt < _encodeAttempts) );
_output->SetDataSize( pkt.size );
}
开发者ID:MultiSight,项目名称:avkit,代码行数:40,代码来源:JPEGEncoder.cpp
示例15:
void YUV420PToARGB24::Transform( XIRef<Packet> input, size_t width, size_t height )
{
uint8_t* src = input->Map();
AVFrame frame;
frame.data[0] = src;
src += width * height;
frame.data[1] = src;
src += ((width/2) * (height/2));
frame.data[2] = src;
frame.linesize[0] = width;
frame.linesize[1] = (width/2);
frame.linesize[2] = (width/2);
size_t dataSize = height * (width*4);
_rgb24 = _pf->Get( dataSize + DEFAULT_PADDING );
_rgb24->SetDataSize( dataSize );
AVPicture pict;
pict.data[0] = _rgb24->Map();
pict.linesize[0] = width * 4;
if( (width != _currentWidth) || (height != _currentHeight) )
_DestroyScaler();
if( !_scaler )
_InitScaler( width, height );
int ret = sws_scale( _scaler,
frame.data,
frame.linesize,
0,
height,
pict.data,
pict.linesize );
}
开发者ID:MultiSight,项目名称:avkit,代码行数:37,代码来源:YUV420PToARGB24.cpp
示例16: memcpy
XIRef<Packet> AVDeMuxer::Get()
{
XIRef<Packet> pkt;
if( _bsfc && (_deMuxPkt.stream_index == _videoStreamIndex) )
{
pkt = _pf->Get( (size_t)_filterPkt.size + DEFAULT_PADDING );
pkt->SetDataSize( _filterPkt.size );
memcpy( pkt->Map(), _filterPkt.data, _filterPkt.size );
}
else
{
pkt = _pf->Get( (size_t)_deMuxPkt.size + DEFAULT_PADDING );
pkt->SetDataSize( _deMuxPkt.size );
memcpy( pkt->Map(), _deMuxPkt.data, _deMuxPkt.size );
}
if( IsKey() )
pkt->SetKey( true );
return pkt;
}
开发者ID:MultiSight,项目名称:avkit,代码行数:22,代码来源:AVDeMuxer.cpp
示例17: cairo_image_surface_create
XIRef<Packet> ExportOverlay::Process( XIRef<Packet> input, int64_t clockTime )
{
cairo_surface_t* surface = NULL;
cairo_t* cr = NULL;
try
{
surface = cairo_image_surface_create( CAIRO_FORMAT_ARGB32, _width, _height );
cr = cairo_create( surface );
uint8_t* cairoSrc = cairo_image_surface_get_data( surface );
int cairoSrcWidth = cairo_image_surface_get_width( surface );
int cairoSrcHeight = cairo_image_surface_get_height( surface );
if( cairo_image_surface_get_stride( surface ) != (cairoSrcWidth * 4) )
X_THROW(("Unexpected cairo stride!"));
cairo_set_source_rgba( cr, 0.0, 0.0, 0.0, 1.0 );
cairo_rectangle( cr, 0.0, 0.0, cairoSrcWidth, cairoSrcHeight );
cairo_fill( cr );
memcpy( cairoSrc, input->Map(), input->GetDataSize() );
PangoLayout* layout = pango_cairo_create_layout( cr );
pango_layout_set_text( layout, _decodedMsg.c_str(), -1 );
PangoFontDescription* desc = pango_font_description_from_string( "Helvetica 22" );
pango_layout_set_font_description( layout, desc );
pango_font_description_free( desc );
PangoRectangle logicalRect;
pango_layout_get_pixel_extents( layout, NULL, &logicalRect );
uint16_t y = (_vAlign==V_ALIGN_TOP) ? 14 : _height - 52;
uint16_t timeX = 0;
uint16_t msgX = 0;
uint16_t bgX = 0;
uint16_t bgWidth = 0;
_GetXPositions( timeX, msgX, logicalRect.width, bgX, bgWidth );
cairo_set_source_rgba( cr, 0.5, 0.5, 0.5, 0.50 );
cairo_rectangle( cr, bgX, y, bgWidth, 32 );
cairo_fill( cr );
cairo_set_source_rgba( cr, 1.0, 1.0, 1.0, 1.0 );
if( !_decodedMsg.empty() )
_DrawMessage( cr, layout, msgX, y );
if( _withTime )
_DrawTime( cr, timeX, y, clockTime );
g_object_unref( layout );
// copy from our watermark surface to our output surface...
cairo_set_source_surface( cr, _wmSurface, _logoX, _logoY );
cairo_rectangle( cr, _logoX, _logoY, _logoWidth, _logoHeight );
cairo_clip( cr );
cairo_paint_with_alpha( cr, 0.70 );
// Copy data out of our cairo surface into our output packet...
size_t outputSize = (cairoSrcWidth * 4) * cairoSrcHeight;
XIRef<Packet> dest = new Packet( outputSize );
memcpy( dest->Map(), cairoSrc, outputSize );
dest->SetDataSize( outputSize );
cairo_destroy( cr );
cairo_surface_destroy( surface );
return dest;
}
catch(...)
{
if( cr )
cairo_destroy( cr );
if( surface )
cairo_surface_destroy( surface );
throw;
}
}
开发者ID:MultiSight,项目名称:exporty,代码行数:81,代码来源:TranscodeExport.cpp
示例18: _recorderIP
Config::Config() :
_recorderIP( "127.0.0.1" ),
_recorderPort( 10013 ),
_logFilePath( "" ),
_hasDRIEncoding( false ),
_hasDRIDecoding( false ),
_transcodeSleep( 0 ),
_enableDecodeSkipping( false ),
_cacheLok(),
_progressCache(10)
{
if( XPath::Exists( "config.xml" ) )
{
XIRef<XDomParser> domParser = new XDomParser;
domParser->OpenAndSetDocument( "config.xml" );
XIRef<XDomParserNode> rootNode = domParser->Parse();
{
list<XIRef<XDomParserNode> > searchResults = domParser->SearchForAll( "recorder_ip", rootNode );
if( !searchResults.empty() )
_recorderIP = searchResults.front()->GetData();
}
{
list<XIRef<XDomParserNode> > searchResults = domParser->SearchForAll( "recorder_port", rootNode );
if( !searchResults.empty() )
_recorderPort = searchResults.front()->GetData().ToInt();
}
{
list<XIRef<XDomParserNode> > searchResults = domParser->SearchForAll( "log_file_path", rootNode );
if( !searchResults.empty() )
_logFilePath = searchResults.front()->GetData();
}
{
list<XIRef<XDomParserNode> > searchResults = domParser->SearchForAll( "transcode_sleep", rootNode );
if( !searchResults.empty() )
_transcodeSleep = searchResults.front()->GetData().ToInt();
}
{
list<XIRef<XDomParserNode> > searchResults = domParser->SearchForAll( "decode_skipping", rootNode );
if( !searchResults.empty() )
_enableDecodeSkipping = searchResults.front()->GetData().ToInt() != 0;
}
}
try
{
#ifndef WIN32
_hasDRIEncoding = VAH264Encoder::HasHW( "/dev/dri/card0" );
#endif
}
catch(...)
{
X_LOG_NOTICE("/dev/dri/card0 device not supported for encoding.");
}
try
{
#ifndef WIN32
_hasDRIDecoding = VAH264Decoder::HasHW( "/dev/dri/card0" );
#endif
}
catch(...)
{
X_LOG_NOTICE("/dev/dri/card0 device not supported for encoding.");
}
}
开发者ID:MultiSight,项目名称:exporty,代码行数:78,代码来源:Config.cpp
示例19: _GetTMPName
void TranscodeExport::Create( XIRef<XMemory> output )
{
XString tempFileName = _GetTMPName( _fileName );
// If their is only 1 export in progress (us), but the temp file exists then it means we were interrupted
// (either a power issue, or a segfault) and we should delete the temporary.
if( _exportsInProgress == 1 )
{
if( XPath::Exists(tempFileName) )
unlink(tempFileName.c_str());
}
if( XPath::Exists(tempFileName) )
X_THROW(("Export in progress exception: %s", tempFileName.c_str()));
bool outputToFile = (output.IsEmpty()) ? true : false;
H264Decoder decoder( GetFastH264DecoderOptions() );
XRef<YUV420PToARGB24> yuvToARGB = new YUV420PToARGB24;
XRef<ARGB24ToYUV420P> argbToYUV = new ARGB24ToYUV420P;
XRef<H264Transcoder> transcoder;
XRef<H264Encoder> encoder;
XRef<AVMuxer> muxer;
XRef<ExportOverlay> ov;
bool wroteToContainer = false;
auto lastProgressTime = steady_clock::now();
// We are going to count how many decoding or encoding exceptions we get... If it
// ever exceeds some large threshold, we bail on this export.
int64_t codingExceptions = 0;
XString recorderURI;
while( _recorderURLS.GetNextURL( recorderURI ) )
{
auto now = steady_clock::now();
if( wroteToContainer && duration_cast<seconds>(now-lastProgressTime).count() > 2 )
{
_progress( _recorderURLS.PercentComplete() );
lastProgressTime = now;
}
try
{
XIRef<XMemory> responseBuffer = FRAME_STORE_CLIENT::FetchMedia( _config->GetRecorderIP(),
_config->GetRecorderPort(),
recorderURI );
ResultParser resultParser;
resultParser.Parse( responseBuffer );
FRAME_STORE_CLIENT::ResultStatistics stats = resultParser.GetStatistics();
// If we are not provided with a bit rate or a frame rate, we use the sources values.
if( _bitRate == 0 )
_bitRate = stats.averageBitRate;
if( _maxRate == 0 )
_maxRate = 2 * stats.averageBitRate;
if( _bufSize == 0 )
_bufSize = 2 * stats.averageBitRate;
if( _frameRate == 0.0 )
_frameRate = stats.frameRate;
// Fix for ffmpeg's inability to make files with fps < 6.0. Don't believe me? Try these 2 commands and play
// output in vlc:
//
// # generate a test movie of the game of life in life.mp4
// ffmpeg -f lavfi -i life -frames:v 1000 life.mp4
// # transcode and drop framerate of life.mp4 to 1 fps. output.mp4 won't play in vlc and will have a weird
// # pause at the beginning for other players.
// ffmpeg -i life.mp4 -vf fps=fps=1/1 -vcodec h264 output.mp4
//
if( _frameRate < 6.0 )
_frameRate = 6.0;
int outputTimeBaseNum = 0;
int outputTimeBaseDen = 0;
int inputTimeBaseNum = 0;
int inputTimeBaseDen = 0;
AVKit::DToQ( (1/stats.frameRate), inputTimeBaseNum, inputTimeBaseDen );
AVKit::DToQ( (1/_frameRate), outputTimeBaseNum, outputTimeBaseDen );
if( transcoder.IsEmpty() )
{
transcoder = new H264Transcoder( inputTimeBaseNum, inputTimeBaseDen,
outputTimeBaseNum, outputTimeBaseDen,
_speed,
// if our input is key only, enable decode skipping...
_recorderURLS.KeyFrameOnly() );
}
double secondsPer = AVKit::QToD(inputTimeBaseNum, inputTimeBaseDen) / (AVKit::QToD(inputTimeBaseNum, inputTimeBaseDen) / (AVKit::QToD(outputTimeBaseNum, outputTimeBaseDen) * _speed));
int traversalNum = 0;
int traversalDen = 0;
AVKit::DToQ( secondsPer, traversalNum, traversalDen );
//.........这里部分代码省略.........
开发者ID:MultiSight,项目名称:exporty,代码行数:101,代码来源:TranscodeExport.cpp
示例20: WriteVideoFrame
void AVMuxer::WriteVideoFrame( XIRef<XMemory> frame, bool keyFrame )
{
WriteVideoFrame( frame->Map(), frame->GetDataSize(), keyFrame );
}
开发者ID:KennyDark,项目名称:opencvr,代码行数:4,代码来源:AVMuxer.cpp
注:本文中的XIRef类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论