• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ VideoFrame类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中VideoFrame的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame类的具体用法?C++ VideoFrame怎么用?C++ VideoFrame使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VideoFrame类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: clear_if

VideoFrame VideoDecoderContext::decodeVideo(OptionalErrorCode ec, const Packet &packet, size_t offset, size_t *decodedBytes, bool autoAllocateFrame)
{
    clear_if(ec);

    VideoFrame outFrame;
    if (!autoAllocateFrame)
    {
        outFrame = {pixelFormat(), width(), height(), 32};

        if (!outFrame.isValid())
        {
            throws_if(ec, Errors::FrameInvalid);
            return VideoFrame();
        }
    }

    int gotFrame = 0;
    auto st = decodeCommon(outFrame, packet, offset, gotFrame, avcodec_decode_video_legacy);

    if (get<1>(st)) {
        throws_if(ec, get<0>(st), *get<1>(st));
        return VideoFrame();
    }

    if (!gotFrame)
        return VideoFrame();

    outFrame.setPictureType(AV_PICTURE_TYPE_I);

    if (decodedBytes)
        *decodedBytes = get<0>(st);

    return outFrame;
}
开发者ID:wlanjie,项目名称:AndroidFFmpeg,代码行数:34,代码来源:codeccontext.cpp


示例2: newVideoFrame

	VideoFrame VideoFrame::newVideoFrame(VideoFrame videoFrame){
		if(videoFrame.data->createdTexPixels){
			return newVideoFrame(videoFrame.getPixelsRef());
		}else{
			return newVideoFrame(videoFrame.getFboRef());
		}
	}
开发者ID:jurcello,项目名称:ofxPlaymodes,代码行数:7,代码来源:VideoFrame.cpp


示例3: locker

void VideoBuffers::CheckDecodedFrames(void)
{
    QMutexLocker locker(m_lock);

    QList<VideoFrame*> recovered;
    QList<VideoFrame*>::iterator it = m_reference.begin();
    for ( ; it != m_reference.end(); ++it)
        if (!m_decoded.contains((*it)))
            recovered.append((*it));

    while (!recovered.isEmpty())
    {
        VideoFrame* frame = recovered.takeFirst();
        m_reference.removeOne(frame);
        if (frame->Discard())
        {
            delete frame;
            m_frameCount--;
        }
        else
        {
            m_unused.append(frame);
        }
    }
}
开发者ID:,项目名称:,代码行数:25,代码来源:


示例4:

bool VideoEncoderX264or5::doProcessFrame(Frame *org, Frame *dst)
{
    if (!(org && dst)) {
        utils::errorMsg("Error encoding video frame: org or dst are NULL");
        return false;
    }

    VideoFrame* rawFrame = dynamic_cast<VideoFrame*> (org);
    VideoFrame* codedFrame = dynamic_cast<VideoFrame*> (dst);

    if (!rawFrame || !codedFrame) {
        utils::errorMsg("Error encoding video frame: org and dst MUST be VideoFrame");
        return false;
    }

    if (!reconfigure(rawFrame, codedFrame)) {
        utils::errorMsg("Error encoding video frame: reconfigure failed");
        return false;
    }

    if (!fill_x264or5_picture(rawFrame)){
        utils::errorMsg("Could not fill x264_picture_t from frame");
        return false;
    }

    if (!encodeFrame(codedFrame)) {
        utils::errorMsg("Could not encode video frame");
        return false;
    }

    codedFrame->setSize(rawFrame->getWidth(), rawFrame->getHeight());

    return true;
}
开发者ID:donfanning,项目名称:liveMediaStreamer,代码行数:34,代码来源:VideoEncoderX264or5.cpp


示例5: addFramesToDeinterlace

void DiscardDeint::filter(QQueue< FrameBuffer > &framesQueue)
{
    int insertAt = addFramesToDeinterlace(framesQueue);
    while (!internalQueue.isEmpty())
    {
        FrameBuffer dequeued = internalQueue.dequeue();
        VideoFrame *videoFrame = VideoFrame::fromData(dequeued.data);
        const bool TFF = isTopFieldFirst(videoFrame);
        videoFrame->setNoInterlaced();
        for (int p = 0; p < 3; ++p)
        {
            const int linesize = videoFrame->linesize[p];
            quint8 *src = videoFrame->data[p];
            quint8 *dst = videoFrame->data[p];
            const int lines = (p ? h >> 2 : h >> 1) - 1;
            if (!TFF)
            {
                memcpy(dst, src + linesize, linesize);
                src += linesize;
                dst += linesize;
            }
            dst += linesize;
            src += linesize;
            for (int i = 0; i < lines; ++i)
            {
                VideoFilters::averageTwoLines(dst, src - linesize, src + linesize, linesize);
                src += linesize << 1;
                dst += linesize << 1;
            }
            if (TFF)
                memcpy(dst, src - linesize, linesize);
        }
        framesQueue.insert(insertAt++, dequeued);
    }
}
开发者ID:mitya57,项目名称:QMPlay2,代码行数:35,代码来源:DiscardDeint.cpp


示例6: drawNextFrame

void VideoRenderer::drawNextFrame(){
    VideoFrame * frame = source->getNextVideoFrame();
    if(frame!=NULL){
        frame->getTextureRef().draw(0,0);
        frame->release();
    }
}
开发者ID:NAML,项目名称:ofxPlaymodes,代码行数:7,代码来源:VideoRenderer.cpp


示例7: render

void Window::render(const VideoFrame& frame)
{
    LogDebug("Rendering frame " << frame.getId());
    glClear(GL_COLOR_BUFFER_BIT);

    glEnableVertexAttribArray(0);
    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
    glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    glEnableVertexAttribArray(1);
    glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    // TODO: consider linesize padding here
    // TODO: use glTexSubImage2D for more performance
    glTexImage2D(GL_TEXTURE_2D,
                 0,
                 GL_RED,
                 frame.getWidth(),
                 frame.getHeight(),
                 0,
                 GL_RED,
                 GL_UNSIGNED_BYTE,
                 frame.getLumaData());

    glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);

    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(0);
    glfwSwapBuffers(glfwWindow);
}
开发者ID:dcvetko,项目名称:mdetect,代码行数:31,代码来源:window.cpp


示例8: upload

bool VaApiMixer::upload(const VideoFrame &frame, bool deint) {
	if (!m_glSurface)
		return false;
	static const int specs[MP_CSP_COUNT] = {
		0,					//MP_CSP_AUTO,
		VA_SRC_BT601,		//MP_CSP_BT_601,
		VA_SRC_BT709,		//MP_CSP_BT_709,
		VA_SRC_SMPTE_240,	//MP_CSP_SMPTE_240M,
		0,					//MP_CSP_RGB,
		0,					//MP_CSP_XYZ,
		0,					//MP_CSP_YCGCO,
	};
	static const int field[] = {
		// Picture = 0,   Top = 1,      Bottom = 2
		VA_FRAME_PICTURE, VA_TOP_FIELD, VA_BOTTOM_FIELD, VA_FRAME_PICTURE
	};
	const auto id = (VASurfaceID)(quintptr)frame.data(3);
	int flags = specs[frame.format().colorspace()];
	if (deint)
		flags |= field[frame.field() & VideoFrame::Interlaced];
	if (!check(vaCopySurfaceGLX(VaApi::glx(), m_glSurface, id,  flags), "Cannot copy OpenGL surface."))
		return false;
	if (!check(vaSyncSurface(VaApi::glx(), id), "Cannot sync video surface."))
		return false;
	return true;
}
开发者ID:akhilo,项目名称:cmplayer,代码行数:26,代码来源:hwacc_vaapi.cpp


示例9: preparePixmap

bool QPainterRenderer::preparePixmap(const VideoFrame &frame)
{
    DPTR_D(QPainterRenderer);
    // already locked in a larger scope of receive()
    QImage::Format imgfmt = frame.imageFormat();
    if (frame.constBits(0)) {
        d.video_frame = frame;
    } else {
        if (imgfmt == QImage::Format_Invalid) {
            d.video_frame = frame.to(VideoFormat::Format_RGB32);
            imgfmt = d.video_frame.imageFormat();
        } else {
            d.video_frame = frame.to(frame.pixelFormat());
        }
    }
    const bool swapRGB = (int)imgfmt < 0;
    if (swapRGB) {
        imgfmt = (QImage::Format)(-imgfmt);
    }
    // DO NOT use frameData().data() because it's temp ptr while d.image does not deep copy the data
    QImage image = QImage((uchar*)d.video_frame.constBits(), d.video_frame.width(), d.video_frame.height(), d.video_frame.bytesPerLine(), imgfmt);
    if (swapRGB)
        image = image.rgbSwapped();
    d.pixmap = QPixmap::fromImage(image);
    //Format_RGB32 is fast. see document
    return true;
}
开发者ID:Czhian,项目名称:QtAV,代码行数:27,代码来源:QPainterRenderer.cpp


示例10: float

	void avLooperRenderer::draw(int x,int y,int w,int h)
	{
		// audio -> video Sync !!
		//////////////////////////
		// 1
		//VideoFrame * frame = vHeader.getVideoFrame(int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
		//printf("index %d of size %d = %d\n",aHeader2.getIndex(),aBuffer->sizeInSamples(),int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
		
		// 2
		float delayToVideo = (float(aHeader2.getIndex()) / float(audioSampleRate)) * 1000.0; 
		vHeader.setDelayMs(float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
		//printf("avR ::DELAY is = %f || maxSize %d delayToVideo in ms = %f / index %d\n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs),maximumSizeInMs,delayToVideo,aHeader2.getIndex());
		//printf("AVLR:: videoDelayMs :: %f \n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
		
		VideoFrame frame = vHeader.getNextVideoFrame();
		if(frame!=NULL){
			// draw the frame texture to screen
			ofSetColor(vHeader.getOpacity(),vHeader.getOpacity(),vHeader.getOpacity());
			frame.getTextureRef().draw(x,y,w,h);
		}
		
		// draw av header interfaces
		aBuffer->draw();
		vBuffer->draw();

		vHeader.draw();
		aHeader2.draw();
		
		ofSetColor(255,255,255);
		
	}
开发者ID:eloimaduell,项目名称:ofxPlaymodes-Joshua,代码行数:31,代码来源:avLooperRenderer.cpp


示例11: VideoDecoder

void VideoReader::init()
{
	// analyse InputFile
	avtranscoder::NoDisplayProgress p;
	_inputFile->analyse( p );
	_streamProperties = &_inputFile->getProperties().getStreamPropertiesWithIndex(_streamIndex);
	_videoStreamProperties = static_cast<const VideoProperties*>(_streamProperties);
	_inputFile->activateStream( _streamIndex );

	// setup decoder
	_decoder = new VideoDecoder( _inputFile->getStream( _streamIndex ) );
	_decoder->setupDecoder();

	// create src frame
	_srcFrame = new VideoFrame( _inputFile->getStream( _streamIndex ).getVideoCodec().getVideoFrameDesc() );
	VideoFrame* srcFrame = static_cast<VideoFrame*>(_srcFrame);
	// create dst frame
	if( _width == 0 )
		_width = srcFrame->desc().getWidth();
	if( _height == 0 )
		_height = srcFrame->desc().getHeight();
	VideoFrameDesc videoFrameDescToDisplay( _width, _height, getPixelFormat() );
	_dstFrame = new VideoFrame( videoFrameDescToDisplay );

	// create transform
	_transform = new VideoTransform();
}
开发者ID:Kthulhu,项目名称:avTranscoder,代码行数:27,代码来源:VideoReader.cpp


示例12: main

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    FrameReader r;
    r.setMedia(a.arguments().last());
    QQueue<qint64> t;
    int count = 0;
    qint64 t0 = QDateTime::currentMSecsSinceEpoch();
    while (r.readMore()) {
        while (r.hasEnoughVideoFrames()) {
            const VideoFrame f = r.getVideoFrame(); //TODO: if eof
            if (!f)
                continue;
            count++;
            //r.readMore();
            const qint64 now = QDateTime::currentMSecsSinceEpoch();
            const qint64 dt = now - t0;
            t.enqueue(now);
            printf("decode @%.3f count: %d, elapsed: %lld, fps: %.1f/%.1f\r", f.timestamp(), count, dt, count*1000.0/dt, t.size()*1000.0/(now - t.first()));fflush(0);
            if (t.size() > 10)
                t.dequeue();
        }
    }
    while (r.hasVideoFrame()) {
        const VideoFrame f = r.getVideoFrame();
        qDebug("pts: %.3f", f.timestamp());
    }
    qDebug("read done");
    return 0;
}
开发者ID:Czhian,项目名称:QtAV,代码行数:31,代码来源:main.cpp


示例13: videoFrameSize

void
ShmHolder::renderFrame(VideoFrame& src) noexcept
{
    const auto width = src.width();
    const auto height = src.height();
    const auto format = VIDEO_PIXFMT_BGRA;
    const auto frameSize = videoFrameSize(format, width, height);

    if (!resizeArea(frameSize)) {
        RING_ERR("ShmHolder[%s]: could not resize area",
                 openedName_.c_str());
        return;
    }

    {
        VideoFrame dst;
        VideoScaler scaler;

        dst.setFromMemory(area_->data + area_->writeOffset, format, width, height);
        scaler.scale(src, dst);
    }

    {
        SemGuardLock lk {area_->mutex};

        ++area_->frameGen;
        std::swap(area_->readOffset, area_->writeOffset);
        ::sem_post(&area_->frameGenMutex);
    }
}
开发者ID:asadsalman,项目名称:ring-daemon,代码行数:30,代码来源:sinkclient.cpp


示例14: pushNewVideoFrame

void VideoBuffer::pushNewVideoFrame(VideoFrame & frame){
    
    int64_t time = frame.getTimestamp().epochMicroseconds();
    if(microsOneSec==-1) microsOneSec=time;
    framesOneSec++;
    int64_t diff = time-microsOneSec;
    if(diff>=1000000){
        realFps = double(framesOneSec*1000000.)/double(diff);
        framesOneSec = 0;
        microsOneSec = time-(diff-1000000);
    }
    totalFrames++;
    if(size()==0)initTime=frame.getTimestamp();
    //timeMutex.lock();

    
    if (size() >= maxSize) {
        // THIS LINE IS GIVING ME CRASHES SOMETIMES ..... SERIOUS WTF : if i dont see this happen again its fixed
        frames[ofClamp(framePos, 0, size()-1)] = frame; // Here we use the framePos variable to specify where new frames
                                  // should be stored in the video buffer instead of using the vector push_back call.
    }
    else if (size() < maxSize) {
        frames.push_back(frame);
    }
    
    while(size() > maxSize){
        frames.erase(frames.begin()+framePos);
    }
}
开发者ID:JoshuaBatty,项目名称:ofxPlaymodes,代码行数:29,代码来源:VideoBuffer.cpp


示例15: getNextVideoFrame

void FileGrabber::update(){
	ofVideoPlayer::update();
	if(isFrameNew()){
		VideoFrame * frame = getNextVideoFrame();
		newFrameEvent.notify(this,*frame);
		frame->release();
	}
}
开发者ID:playmodes,项目名称:playmodes,代码行数:8,代码来源:FileGrabber.cpp


示例16: push

	void push(mp_image *mpi) {
		mpi->colorspace = in->format().colorspace();
		mpi->levels = in->format().range();
		mpi->display_w = in->format().displaySize().width();
		mpi->display_h = in->format().displaySize().height();
		mpi->pts = p->nextPTS();
		queue->push_back(VideoFrame(true, mpi, in->field()));
		++pushed;
	}
开发者ID:akhilo,项目名称:cmplayer,代码行数:9,代码来源:softwaredeinterlacer.cpp


示例17: DPTR_D

VideoFrame VideoDecoderVDA::frame()
{
    DPTR_D(VideoDecoderVDA);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            //CVPixelBufferRetain(cvbuf);
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
开发者ID:nbuxrr,项目名称:QtAV,代码行数:56,代码来源:VideoDecoderVDA.cpp


示例18: doDeepCopy

void VideoFrame::doDeepCopy(const VideoFrame &frame) {
	d.detach();
	Q_ASSERT(d->format == frame.format());
	auto p = d->buffer.data();
	for (int i=0; i<d->format.planes(); ++i) {
		const int len = d->format.bytesPerPlain(i);
		memcpy(p, frame.data(i),  len);
		p += len;
	}
}
开发者ID:akhilo,项目名称:cmplayer,代码行数:10,代码来源:videoframe.cpp


示例19: printHash

  void printHash(const VideoFrame& frame,
                 const std::string& imgname, const std::string& imgtype)
  {
    const uint32 h =
      jenkinshash(reinterpret_cast<const byte*>(frame.getBuffer()),
                  frame.getBufSize(),
                  0);

    this->doPrintHash(h, imgname, imgtype);
  }
开发者ID:ulyssesrr,项目名称:carmen_lcad,代码行数:10,代码来源:HashOutputSeries.C


示例20: switch

// ######################################################################
bool operator==(const GenericFrame& f1, const GenericFrame& f2)
{
  if (f1.nativeType() == f2.nativeType())
    {
      switch (f1.nativeType())
        {
        case GenericFrame::NONE: return true;
        case GenericFrame::RGB_U8: return f1.asRgbU8() == f2.asRgbU8();
        case GenericFrame::RGBD: return ((f1.asRgbU8() == f2.asRgbU8()) && (f1.asGrayU16() == f2.asGrayU16()));
        case GenericFrame::RGB_F32: return f1.asRgbF32() == f2.asRgbF32();
        case GenericFrame::GRAY_U8: return f1.asGrayU8() == f2.asGrayU8();
        case GenericFrame::GRAY_F32: return f1.asGrayF32() == f2.asGrayF32();
        case GenericFrame::VIDEO:
          {
            const VideoFrame v1 = f1.asVideo();
            const VideoFrame v2 = f2.asVideo();

            if (v1.getMode() == v2.getMode())
              return std::equal(v1.getBuffer(),
                                v1.getBuffer() + v1.getBufSize(),
                                v2.getBuffer());
            else
              return v1.toRgb() == v2.toRgb();
          }
        case GenericFrame::RGB_U16:        return f1.asRgbU16() == f2.asRgbU16();
        case GenericFrame::GRAY_U16:       return f1.asGrayU16() == f2.asGrayU16();
        }
    }

  return f1.asRgbF32() == f2.asRgbF32();
}
开发者ID:ulyssesrr,项目名称:carmen_lcad,代码行数:32,代码来源:GenericFrame.C



注:本文中的VideoFrame类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ VideoFrameContainer类代码示例发布时间:2022-05-31
下一篇:
C++ VideoFormat类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap