• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ UMat类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中UMat的典型用法代码示例。如果您正苦于以下问题:C++ UMat类的具体用法?C++ UMat怎么用?C++ UMat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了UMat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: convertToVASurface

void convertToVASurface(InputArray src, VASurfaceID surface, Size size)
{
    (void)src; (void)surface; (void)size;
#if !defined(HAVE_VAAPI)
    NO_VAAPI_SUPPORT_ERROR;
#elif !defined(HAVE_OPENCL)
    NO_OPENCL_SUPPORT_ERROR;
#else
    if (!contextInitialized)
        CV_Error(cv::Error::OpenCLInitError, "OpenCL: Context for VA-API interop hasn't been created");

    const int stype = CV_8UC4;

    int srcType = src.type();
    CV_Assert(srcType == stype);

    Size srcSize = src.size();
    CV_Assert(srcSize.width == size.width && srcSize.height == size.height);

    UMat u = src.getUMat();

    // TODO Add support for roi
    CV_Assert(u.offset == 0);
    CV_Assert(u.isContinuous());

    cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);

    using namespace cv::ocl;
    Context& ctx = Context::getDefault();
    cl_context context = (cl_context)ctx.ptr();

    cl_int status = 0;

    cl_mem clImageY = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 0, &status);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (Y plane)");
    cl_mem clImageUV = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 1, &status);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (UV plane)");

    cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();

    cl_mem images[2] = { clImageY, clImageUV };
    status = clEnqueueAcquireVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireVA_APIMediaSurfacesINTEL failed");
    if (!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImageY, clImageUV))
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed");
    clEnqueueReleaseVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseVA_APIMediaSurfacesINTEL failed");

    status = clFinish(q); // TODO Use events
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed");

    status = clReleaseMemObject(clImageY); // TODO RAII
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMem failed (Y plane)");
    status = clReleaseMemObject(clImageUV);
    if (status != CL_SUCCESS)
        CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMem failed (UV plane)");
#endif
}
开发者ID:Microsoft,项目名称:opencv,代码行数:64,代码来源:vaapi.cpp


示例2: oclk_max_pool_forward

bool OCL4DNNPool<Dtype>::Forward(const UMat& bottom,
                                 UMat& top,
                                 UMat& top_mask)
{
    bool ret = true;
    size_t global[] = { 128 * 128 };
    size_t local[] = { 128 };

    // support 2D case
    switch (pool_method_)
    {
    case LIBDNN_POOLING_METHOD_MAX:
        {
            bool haveMask = !top_mask.empty();
            ocl::Kernel oclk_max_pool_forward(
                haveMask ? CL_KERNEL_SELECT("max_pool_forward_mask") : CL_KERNEL_SELECT("max_pool_forward"),
                ocl::dnn::ocl4dnn_pooling_oclsrc,
                format("-D KERNEL_MAX_POOL=1 -D KERNEL_W=%d -D KERNEL_H=%d"
                       " -D STRIDE_W=%d -D STRIDE_H=%d"
                       " -D PAD_W=%d -D PAD_H=%d%s",
                       kernel_w_, kernel_h_,
                       stride_w_, stride_h_,
                       pad_w_, pad_h_,
                       haveMask ? " -D HAVE_MASK=1" : ""
                ));

            if (oclk_max_pool_forward.empty())
                return false;

            oclk_max_pool_forward.args(
                count_,
                ocl::KernelArg::PtrReadOnly(bottom),
                channels_,
                height_,
                width_,
                pooled_height_,
                pooled_width_,
                ocl::KernelArg::PtrWriteOnly(top),
                ocl::KernelArg::PtrWriteOnly(top_mask)
            );

            ret = oclk_max_pool_forward.run(1, global, local, false);
        }
        break;
    case LIBDNN_POOLING_METHOD_AVE:
        {
            CV_Assert(top_mask.empty());

            ocl::Kernel oclk_ave_pool_forward(CL_KERNEL_SELECT("ave_pool_forward"),
                ocl::dnn::ocl4dnn_pooling_oclsrc,
                format("-D KERNEL_AVE_POOL=1 -D KERNEL_W=%d -D KERNEL_H=%d"
                       " -D STRIDE_W=%d -D STRIDE_H=%d"
                       " -D PAD_W=%d -D PAD_H=%d%s",
                       kernel_w_, kernel_h_,
                       stride_w_, stride_h_,
                       pad_w_, pad_h_,
                       avePoolPaddedArea ? " -D AVE_POOL_PADDING_AREA" : ""
                ));

            if (oclk_ave_pool_forward.empty())
                return false;

            oclk_ave_pool_forward.args(
                count_,
                ocl::KernelArg::PtrReadOnly(bottom),
                channels_,
                height_,
                width_,
                pooled_height_,
                pooled_width_,
                ocl::KernelArg::PtrWriteOnly(top)
            );

            ret = oclk_ave_pool_forward.run(1, global, local, false);
        }
        break;
    case LIBDNN_POOLING_METHOD_STO:
        {
            CV_Assert(top_mask.empty());

            ocl::Kernel oclk_sto_pool_forward(CL_KERNEL_SELECT("sto_pool_forward_test"),
                ocl::dnn::ocl4dnn_pooling_oclsrc,
                format("-D KERNEL_STO_POOL=1 -D KERNEL_W=%d -D KERNEL_H=%d"
                       " -D STRIDE_W=%d -D STRIDE_H=%d",
                       kernel_w_, kernel_h_,
                       stride_w_, stride_h_
                ));


            if (oclk_sto_pool_forward.empty())
                return false;

            oclk_sto_pool_forward.args(
                count_,
                ocl::KernelArg::PtrReadOnly(bottom),
                channels_,
                height_,
                width_,
                pooled_height_,
                pooled_width_,
//.........这里部分代码省略.........
开发者ID:cyberCBM,项目名称:DetectO,代码行数:101,代码来源:ocl4dnn_pool.cpp


示例3: UMatToVector

 void UMatToVector(const UMat & um, std::vector<Point2f> & v) const
 {
     v.resize(um.size().area());
     um.copyTo(Mat(um.size(), CV_32FC2, &v[0]));
 }
开发者ID:cyberCBM,项目名称:DetectO,代码行数:5,代码来源:test_gftt.cpp


示例4: main

int main(int argc, char** argv)
{
    const char* keys =
        "{ i input    | | specify input image }"
        "{ h help     | | print help message }";

    cv::CommandLineParser args(argc, argv, keys);
    if (args.has("help"))
    {
        cout << "Usage : " << argv[0] << " [options]" << endl;
        cout << "Available options:" << endl;
        args.printMessage();
        return EXIT_SUCCESS;
    }

    cv::ocl::Context ctx = cv::ocl::Context::getDefault();
    if (!ctx.ptr())
    {
        cerr << "OpenCL is not available" << endl;
        return 1;
    }
    cv::ocl::Device device = cv::ocl::Device::getDefault();
    if (!device.compilerAvailable())
    {
        cerr << "OpenCL compiler is not available" << endl;
        return 1;
    }


    UMat src;
    {
        string image_file = args.get<string>("i");
        if (!image_file.empty())
        {
            Mat image = imread(image_file);
            if (image.empty())
            {
                cout << "error read image: " << image_file << endl;
                return 1;
            }
            cvtColor(image, src, COLOR_BGR2GRAY);
        }
        else
        {
            Mat frame(cv::Size(640, 480), CV_8U, Scalar::all(128));
            Point p(frame.cols / 2, frame.rows / 2);
            line(frame, Point(0, frame.rows / 2), Point(frame.cols, frame.rows / 2), 1);
            circle(frame, p, 200, Scalar(32, 32, 32), 8, LINE_AA);
            string str = "OpenCL";
            int baseLine = 0;
            Size box = getTextSize(str, FONT_HERSHEY_COMPLEX, 2, 5, &baseLine);
            putText(frame, str, Point((frame.cols - box.width) / 2, (frame.rows - box.height) / 2 + baseLine),
                    FONT_HERSHEY_COMPLEX, 2, Scalar(255, 255, 255), 5, LINE_AA);
            frame.copyTo(src);
        }
    }


    cv::String module_name; // empty to disable OpenCL cache

    {
        cout << "OpenCL program source: " << endl;
        cout << "======================================================================================================" << endl;
        cout << opencl_kernel_src << endl;
        cout << "======================================================================================================" << endl;
        //! [Define OpenCL program source]
        cv::ocl::ProgramSource source(module_name, "simple", opencl_kernel_src, "");
        //! [Define OpenCL program source]

        //! [Compile/build OpenCL for current OpenCL device]
        cv::String errmsg;
        cv::ocl::Program program(source, "", errmsg);
        if (program.ptr() == NULL)
        {
            cerr << "Can't compile OpenCL program:" << endl << errmsg << endl;
            return 1;
        }
        //! [Compile/build OpenCL for current OpenCL device]

        if (!errmsg.empty())
        {
            cout << "OpenCL program build log:" << endl << errmsg << endl;
        }

        //! [Get OpenCL kernel by name]
        cv::ocl::Kernel k("magnutude_filter_8u", program);
        if (k.empty())
        {
            cerr << "Can't get OpenCL kernel" << endl;
            return 1;
        }
        //! [Get OpenCL kernel by name]

        UMat result(src.size(), CV_8UC1);

        //! [Define kernel parameters and run]
        size_t globalSize[2] = {(size_t)src.cols, (size_t)src.rows};
        size_t localSize[2] = {8, 8};
        bool executionResult = k
            .args(
//.........这里部分代码省略.........
开发者ID:ArkaJU,项目名称:opencv,代码行数:101,代码来源:opencl_custom_kernel.cpp


示例5: highResSize

    bool BTVL1_Base::ocl_process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
                                 InputArrayOfArrays _backwardMotions, int baseIdx)
    {
        std::vector<UMat> & src = *(std::vector<UMat> *)_src.getObj(),
                & forwardMotions = *(std::vector<UMat> *)_forwardMotions.getObj(),
                & backwardMotions = *(std::vector<UMat> *)_backwardMotions.getObj();

        // update blur filter and btv weights
        if (!filter_ || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
        {
            filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
            curBlurKernelSize_ = blurKernelSize_;
            curBlurSigma_ = blurSigma_;
            curSrcType_ = src[0].type();
        }

        if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
        {
            calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
            Mat(btvWeights_, true).copyTo(ubtvWeights_);

            curBtvKernelSize_ = btvKernelSize_;
            curAlpha_ = alpha_;
        }

        // calc high res motions
        calcRelativeMotions(forwardMotions, backwardMotions, ulowResForwardMotions_, ulowResBackwardMotions_, baseIdx, src[0].size());

        upscaleMotions(ulowResForwardMotions_, uhighResForwardMotions_, scale_);
        upscaleMotions(ulowResBackwardMotions_, uhighResBackwardMotions_, scale_);

        uforwardMaps_.resize(uhighResForwardMotions_.size());
        ubackwardMaps_.resize(uhighResForwardMotions_.size());
        for (size_t i = 0; i < uhighResForwardMotions_.size(); ++i)
            buildMotionMaps(uhighResForwardMotions_[i], uhighResBackwardMotions_[i], uforwardMaps_[i], ubackwardMaps_[i]);

        // initial estimation
        const Size lowResSize = src[0].size();
        const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);

        resize(src[baseIdx], uhighRes_, highResSize, 0, 0, INTER_LINEAR); // TODO

        // iterations
        udiffTerm_.create(highResSize, uhighRes_.type());
        ua_.create(highResSize, uhighRes_.type());
        ub_.create(highResSize, uhighRes_.type());
        uc_.create(lowResSize, uhighRes_.type());

        for (int i = 0; i < iterations_; ++i)
        {
            udiffTerm_.setTo(Scalar::all(0));

            for (size_t k = 0; k < src.size(); ++k)
            {
                // a = M * Ih
                remap(uhighRes_, ua_, ubackwardMaps_[k], noArray(), INTER_NEAREST);
                // b = HM * Ih
                GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // c = DHM * Ih
                resize(ub_, uc_, lowResSize, 0, 0, INTER_NEAREST);

                diffSign(src[k], uc_, uc_);

                // a = Dt * diff
                upscale(uc_, ua_, scale_);

                // b = HtDt * diff
                GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
                // a = MtHtDt * diff
                remap(ub_, ua_, uforwardMaps_[k], noArray(), INTER_NEAREST);

                add(udiffTerm_, ua_, udiffTerm_);
            }

            if (lambda_ > 0)
            {
                calcBtvRegularization(uhighRes_, uregTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
                addWeighted(udiffTerm_, 1.0, uregTerm_, -lambda_, 0.0, udiffTerm_);
            }

            addWeighted(uhighRes_, 1.0, udiffTerm_, tau_, 0.0, uhighRes_);
        }

        Rect inner(btvKernelSize_, btvKernelSize_, uhighRes_.cols - 2 * btvKernelSize_, uhighRes_.rows - 2 * btvKernelSize_);
        uhighRes_(inner).copyTo(_dst);

        return true;
    }
开发者ID:Asafadari,项目名称:opencv,代码行数:88,代码来源:btv_l1.cpp


示例6: get

 static const Mat get(const UMat& m) { return m.getMat(ACCESS_READ); }
开发者ID:AngryNetBeans,项目名称:opencv,代码行数:1,代码来源:ocl_test.hpp


示例7: CV_INSTRUMENT_REGION

int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    Size size;
    int cn;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size();
    else
        mat = _probImage.getMat(), cn = mat.channels(), size = mat.size();

    Rect cur_rect = window;

    CV_Assert( cn == 1 );

    if( window.height <= 0 || window.width <= 0 )
        CV_Error( Error::StsBadArg, "Input window has non-positive sizes" );

    window = window & Rect(0, 0, size.width, size.height);

    double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
    eps = cvRound(eps*eps);
    int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;

    for( i = 0; i < niters; i++ )
    {
        cur_rect = cur_rect & Rect(0, 0, size.width, size.height);
        if( cur_rect == Rect() )
        {
            cur_rect.x = size.width/2;
            cur_rect.y = size.height/2;
        }
        cur_rect.width = std::max(cur_rect.width, 1);
        cur_rect.height = std::max(cur_rect.height, 1);

        Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect));

        // Calculating center of mass
        if( fabs(m.m00) < DBL_EPSILON )
            break;

        int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
        int dy = cvRound( m.m01/m.m00 - window.height*0.5 );

        int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width);
        int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height);

        dx = nx - cur_rect.x;
        dy = ny - cur_rect.y;
        cur_rect.x = nx;
        cur_rect.y = ny;

        // Check for coverage centers mass & window
        if( dx*dx + dy*dy < eps )
            break;
    }

    window = cur_rect;
    return i;
}
开发者ID:AnnaPetrovicheva,项目名称:opencv,代码行数:64,代码来源:camshift.cpp


示例8: main

int main(int argc, char** argv)
{
    const char* keys =
        "{ i input    | ../data/pic1.png   | specify input image }"
        "{ o output   | squares_output.jpg | specify output save path}"
        "{ h help     |                    | print help message }"
        "{ m cpu_mode |                    | run without OpenCL }";

    CommandLineParser cmd(argc, argv, keys);

    if(cmd.has("help"))
    {
        cout << "Usage : squares [options]" << endl;
        cout << "Available options:" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }
    if (cmd.has("cpu_mode"))
    {
        ocl::setUseOpenCL(false);
        std::cout << "OpenCL was disabled" << std::endl;
    }

    string inputName = cmd.get<string>("i");
    string outfile = cmd.get<string>("o");

    int iterations = 10;
    namedWindow( wndname, WINDOW_AUTOSIZE );
    vector<vector<Point> > squares;

    UMat image;
    imread(inputName, 1).copyTo(image);
    if( image.empty() )
    {
        cout << "Couldn't load " << inputName << endl;
        cmd.printMessage();
        return EXIT_FAILURE;
    }

    int j = iterations;
    int64 t_cpp = 0;
    //warm-ups
    cout << "warming up ..." << endl;
    findSquares(image, squares);

    do
    {
        int64 t_start = cv::getTickCount();
        findSquares(image, squares);
        t_cpp += cv::getTickCount() - t_start;

        t_start  = cv::getTickCount();

        cout << "run loop: " << j << endl;
    }
    while(--j);
    cout << "average time: " << 1000.0f * (double)t_cpp / getTickFrequency() / iterations << "ms" << endl;

    UMat result = drawSquaresBoth(image, squares);
    imshow(wndname, result);
    imwrite(outfile, result);
    waitKey(0);

    return EXIT_SUCCESS;
}
开发者ID:984273266,项目名称:opencv,代码行数:65,代码来源:squares.cpp


示例9: ocl_Canny

static bool ocl_Canny(InputArray _src, const UMat& dx_, const UMat& dy_, OutputArray _dst, float low_thresh, float high_thresh,
                      int aperture_size, bool L2gradient, int cn, const Size & size)
{
    CV_INSTRUMENT_REGION_OPENCL()

    UMat map;

    const ocl::Device &dev = ocl::Device::getDefault();
    int max_wg_size = (int)dev.maxWorkGroupSize();

    int lSizeX = 32;
    int lSizeY = max_wg_size / 32;

    if (lSizeY == 0)
    {
        lSizeX = 16;
        lSizeY = max_wg_size / 16;
    }
    if (lSizeY == 0)
    {
        lSizeY = 1;
    }

    if (aperture_size == 7)
    {
        low_thresh = low_thresh / 16.0f;
        high_thresh = high_thresh / 16.0f;
    }

    if (L2gradient)
    {
        low_thresh = std::min(32767.0f, low_thresh);
        high_thresh = std::min(32767.0f, high_thresh);

        if (low_thresh > 0)
            low_thresh *= low_thresh;
        if (high_thresh > 0)
            high_thresh *= high_thresh;
    }
    int low = cvFloor(low_thresh), high = cvFloor(high_thresh);

    if (!useCustomDeriv &&
        aperture_size == 3 && !_src.isSubmatrix())
    {
        /*
            stage1_with_sobel:
                Sobel operator
                Calc magnitudes
                Non maxima suppression
                Double thresholding
        */
        char cvt[40];
        ocl::Kernel with_sobel("stage1_with_sobel", ocl::imgproc::canny_oclsrc,
                               format("-D WITH_SOBEL -D cn=%d -D TYPE=%s -D convert_floatN=%s -D floatN=%s -D GRP_SIZEX=%d -D GRP_SIZEY=%d%s",
                                      cn, ocl::memopTypeToStr(_src.depth()),
                                      ocl::convertTypeStr(_src.depth(), CV_32F, cn, cvt),
                                      ocl::typeToStr(CV_MAKE_TYPE(CV_32F, cn)),
                                      lSizeX, lSizeY,
                                      L2gradient ? " -D L2GRAD" : ""));
        if (with_sobel.empty())
            return false;

        UMat src = _src.getUMat();
        map.create(size, CV_32S);
        with_sobel.args(ocl::KernelArg::ReadOnly(src),
                        ocl::KernelArg::WriteOnlyNoSize(map),
                        (float) low, (float) high);

        size_t globalsize[2] = { (size_t)size.width, (size_t)size.height },
                localsize[2] = { (size_t)lSizeX, (size_t)lSizeY };

        if (!with_sobel.run(2, globalsize, localsize, false))
            return false;
    }
    else
    {
        /*
            stage1_without_sobel:
                Calc magnitudes
                Non maxima suppression
                Double thresholding
        */
        double scale = 1.0;
        if (aperture_size == 7)
        {
            scale = 1 / 16.0;
        }

        UMat dx, dy;
        if (!useCustomDeriv)
        {
            Sobel(_src, dx, CV_16S, 1, 0, aperture_size, scale, 0, BORDER_REPLICATE);
            Sobel(_src, dy, CV_16S, 0, 1, aperture_size, scale, 0, BORDER_REPLICATE);
        }
        else
        {
            dx = dx_;
            dy = dy_;
        }

//.........这里部分代码省略.........
开发者ID:cyberCBM,项目名称:DetectO,代码行数:101,代码来源:canny.cpp


示例10: CV_TRACE_FUNCTION

void ICPImpl::getAb<UMat>(const UMat& oldPts, const UMat& oldNrm, const UMat& newPts, const UMat& newNrm,
                          Affine3f pose, int level, Matx66f &A, Vec6f &b) const
{
    CV_TRACE_FUNCTION();

    Size oldSize = oldPts.size(), newSize = newPts.size();
    CV_Assert(oldSize == oldNrm.size());
    CV_Assert(newSize == newNrm.size());

    // calculate 1x7 vector ab to produce b and upper triangle of A:
    // [A|b] = ab*(ab^t)
    // and then reduce it across work groups

    cv::String errorStr;
    ocl::ProgramSource source = ocl::rgbd::icp_oclsrc;
    cv::String options = "-cl-fast-relaxed-math -cl-mad-enable";
    ocl::Kernel k;
    k.create("getAb", source, options, &errorStr);

    if(k.empty())
        throw std::runtime_error("Failed to create kernel: " + errorStr);

    size_t globalSize[2];
    globalSize[0] = (size_t)newPts.cols;
    globalSize[1] = (size_t)newPts.rows;

    const ocl::Device& device = ocl::Device::getDefault();
    // workaround for Intel's integrated GPU
    size_t wgsLimit = device.isIntel() ? 64 : device.maxWorkGroupSize();
    size_t memSize = device.localMemSize();
    // local memory should keep upperTriangles for all threads in group for reduce
    const size_t ltsz = UTSIZE*sizeof(float);
    const size_t lcols = 32;
    size_t lrows = min(memSize/ltsz, wgsLimit)/lcols;
    // round lrows down to 2^n
    lrows = roundDownPow2(lrows);
    size_t localSize[2] = {lcols, lrows};
    Size ngroups((int)divUp(globalSize[0], (unsigned int)localSize[0]),
                 (int)divUp(globalSize[1], (unsigned int)localSize[1]));

    // size of local buffer for group-wide reduce
    size_t lsz = localSize[0]*localSize[1]*ltsz;

    Intr::Projector proj = intrinsics.scale(level).makeProjector();
    Vec2f fxy(proj.fx, proj.fy), cxy(proj.cx, proj.cy);

    UMat& groupedSumGpu = groupedSumBuffers[level];
    groupedSumGpu.create(Size(ngroups.width*UTSIZE, ngroups.height),
                         CV_32F);
    groupedSumGpu.setTo(0);

    // TODO: optimization possible:
    // samplers instead of oldPts/oldNrm (mask needed)
    k.args(ocl::KernelArg::ReadOnlyNoSize(oldPts),
           ocl::KernelArg::ReadOnlyNoSize(oldNrm),
           oldSize,
           ocl::KernelArg::ReadOnlyNoSize(newPts),
           ocl::KernelArg::ReadOnlyNoSize(newNrm),
           newSize,
           ocl::KernelArg::Constant(pose.matrix.val,
                                    sizeof(pose.matrix.val)),
           fxy.val, cxy.val,
           distanceThreshold*distanceThreshold,
           cos(angleThreshold),
           //TODO: replace by KernelArg::Local(lsz)
           ocl::KernelArg(ocl::KernelArg::LOCAL, 0, 1, 1, 0, lsz),
           ocl::KernelArg::WriteOnlyNoSize(groupedSumGpu)
           );

    if(!k.run(2, globalSize, localSize, true))
        throw std::runtime_error("Failed to run kernel");

    float upperTriangle[UTSIZE];
    for(int i = 0; i < UTSIZE; i++)
        upperTriangle[i] = 0;

    Mat groupedSumCpu = groupedSumGpu.getMat(ACCESS_READ);

    for(int y = 0; y < ngroups.height; y++)
    {
        const float* rowr = groupedSumCpu.ptr<float>(y);
        for(int x = 0; x < ngroups.width; x++)
        {
            const float* p = rowr + x*UTSIZE;
            for(int j = 0; j < UTSIZE; j++)
            {
                upperTriangle[j] += p[j];
            }
        }
    }
    groupedSumCpu.release();

    ABtype sumAB = ABtype::zeros();
    int pos = 0;
    for(int i = 0; i < 6; i++)
    {
        for(int j = i; j < 7; j++)
        {
            sumAB(i, j) = upperTriangle[pos++];
        }
//.........这里部分代码省略.........
开发者ID:Bleach665,项目名称:opencv_contrib,代码行数:101,代码来源:fast_icp.cpp


示例11: LOGLN

Mosaic::Status Mosaic::composePanorama(InputArrayOfArrays images, OutputArray pano) {
    LOGLN("Warping images (auxiliary)... ");

    std::vector<UMat> imgs;
    images.getUMatVector(imgs);
    if (!imgs.empty()) {
        CV_Assert(imgs.size() == imgs_.size());

        UMat img;
        seam_est_imgs_.resize(imgs.size());

        for (size_t i = 0; i < imgs.size(); ++i) {
            imgs_[i] = imgs[i];
            resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
            seam_est_imgs_[i] = img.clone();
        }

        std::vector<UMat> seam_est_imgs_subset;
        std::vector<UMat> imgs_subset;

        for (size_t i = 0; i < indices_.size(); ++i) {
            imgs_subset.push_back(imgs_[indices_[i]]);
            seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
        }

        seam_est_imgs_ = seam_est_imgs_subset;
        imgs_ = imgs_subset;
    }

    UMat pano_;

#if ENABLE_LOG
    int64 t = getTickCount();
#endif

    std::vector<Point> corners(imgs_.size());
    std::vector<UMat> masks_warped(imgs_.size());
    std::vector<UMat> images_warped(imgs_.size());
    std::vector<Size> sizes(imgs_.size());
    std::vector<UMat> masks(imgs_.size());

    // Prepare image masks
    for (size_t i = 0; i < imgs_.size(); ++i) {
        masks[i].create(seam_est_imgs_[i].size(), CV_8U);
        masks[i].setTo(Scalar::all(255));
    }

    // Warp images and their masks
    Ptr<detail::TranslationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
    for (size_t i = 0; i < imgs_.size(); ++i) {
        Mat_<float> K;
        cameras_[i].K().convertTo(K, CV_32F);
        K(0, 0) *= (float) seam_work_aspect_;
        K(0, 2) *= (float) seam_work_aspect_;
        K(1, 1) *= (float) seam_work_aspect_;
        K(1, 2) *= (float) seam_work_aspect_;

        corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, cameras_[i].t, INTER_LANCZOS4, BORDER_REFLECT,
                             images_warped[i]);
        sizes[i] = images_warped[i].size();

        w->warp(masks[i], K, cameras_[i].R, cameras_[i].t, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
    }

    std::vector<UMat> images_warped_f(imgs_.size());
    for (size_t i = 0; i < imgs_.size(); ++i)
        images_warped[i].convertTo(images_warped_f[i], CV_32F);

    LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");

    // Find seams
    exposure_comp_->feed(corners, images_warped, masks_warped);
    seam_finder_->find(images_warped_f, corners, masks_warped);

    // Release unused memory
    seam_est_imgs_.clear();
    images_warped.clear();
    images_warped_f.clear();
    masks.clear();

    LOGLN("Compositing...");
#if ENABLE_LOG
    t = getTickCount();
#endif

    UMat img_warped, img_warped_s;
    UMat dilated_mask, seam_mask, mask, mask_warped;

    //double compose_seam_aspect = 1;
    double compose_work_aspect = 1;
    bool is_blender_prepared = false;

    double compose_scale = 1;
    bool is_compose_scale_set = false;

    UMat full_img, img;
    for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx) {
        LOGLN("Compositing image #" << indices_[img_idx] + 1);
#if ENABLE_LOG
        int64 compositing_t = getTickCount();
//.........这里部分代码省略.........
开发者ID:skruzic,项目名称:stitch,代码行数:101,代码来源:Mosaic.cpp


示例12: ocl_Laplacian5

static bool ocl_Laplacian5(InputArray _src, OutputArray _dst,
                           const Mat & kd, const Mat & ks, double scale, double delta,
                           int borderType, int depth, int ddepth)
{
    const size_t tileSizeX = 16;
    const size_t tileSizeYmin = 8;

    const ocl::Device dev = ocl::Device::getDefault();

    int stype = _src.type();
    int sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype), esz = CV_ELEM_SIZE(stype);

    bool doubleSupport = dev.doubleFPConfig() > 0;
    if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
        return false;

    Mat kernelX = kd.reshape(1, 1);
    if (kernelX.cols % 2 != 1)
        return false;
    Mat kernelY = ks.reshape(1, 1);
    if (kernelY.cols % 2 != 1)
        return false;
    CV_Assert(kernelX.cols == kernelY.cols);

    size_t wgs = dev.maxWorkGroupSize();
    size_t lmsz = dev.localMemSize();
    size_t src_step = _src.step(), src_offset = _src.offset();
    const size_t tileSizeYmax = wgs / tileSizeX;

    // workaround for Nvidia: 3 channel vector type takes 4*elem_size in local memory
    int loc_mem_cn = dev.vendorID() == ocl::Device::VENDOR_NVIDIA && cn == 3 ? 4 : cn;

    if (((src_offset % src_step) % esz == 0) &&
        (
         (borderType == BORDER_CONSTANT || borderType == BORDER_REPLICATE) ||
         ((borderType == BORDER_REFLECT || borderType == BORDER_WRAP || borderType == BORDER_REFLECT_101) &&
          (_src.cols() >= (int) (kernelX.cols + tileSizeX) && _src.rows() >= (int) (kernelY.cols + tileSizeYmax)))
        ) &&
        (tileSizeX * tileSizeYmin <= wgs) &&
        (LAPLACIAN_LOCAL_MEM(tileSizeX, tileSizeYmin, kernelX.cols, loc_mem_cn * 4) <= lmsz)
       )
    {
        Size size = _src.size(), wholeSize;
        Point origin;
        int dtype = CV_MAKE_TYPE(ddepth, cn);
        int wdepth = CV_32F;

        size_t tileSizeY = tileSizeYmax;
        while ((tileSizeX * tileSizeY > wgs) || (LAPLACIAN_LOCAL_MEM(tileSizeX, tileSizeY, kernelX.cols, loc_mem_cn * 4) > lmsz))
        {
            tileSizeY /= 2;
        }
        size_t lt2[2] = { tileSizeX, tileSizeY};
        size_t gt2[2] = { lt2[0] * (1 + (size.width - 1) / lt2[0]), lt2[1] };

        char cvt[2][40];
        const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
                                           "BORDER_REFLECT_101" };

        String opts = cv::format("-D BLK_X=%d -D BLK_Y=%d -D RADIUS=%d%s%s"
                                 " -D convertToWT=%s -D convertToDT=%s"
                                 " -D %s -D srcT1=%s -D dstT1=%s -D WT1=%s"
                                 " -D srcT=%s -D dstT=%s -D WT=%s"
                                 " -D CN=%d ",
                                 (int)lt2[0], (int)lt2[1], kernelX.cols / 2,
                                 ocl::kernelToStr(kernelX, wdepth, "KERNEL_MATRIX_X").c_str(),
                                 ocl::kernelToStr(kernelY, wdepth, "KERNEL_MATRIX_Y").c_str(),
                                 ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
                                 ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
                                 borderMap[borderType],
                                 ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), ocl::typeToStr(wdepth),
                                 ocl::typeToStr(CV_MAKETYPE(sdepth, cn)),
                                 ocl::typeToStr(CV_MAKETYPE(ddepth, cn)),
                                 ocl::typeToStr(CV_MAKETYPE(wdepth, cn)),
                                 cn);

        ocl::Kernel k("laplacian", ocl::imgproc::laplacian5_oclsrc, opts);
        if (k.empty())
            return false;
        UMat src = _src.getUMat();
        _dst.create(size, dtype);
        UMat dst = _dst.getUMat();

        int src_offset_x = static_cast<int>((src_offset % src_step) / esz);
        int src_offset_y = static_cast<int>(src_offset / src_step);

        src.locateROI(wholeSize, origin);

        k.args(ocl::KernelArg::PtrReadOnly(src), (int)src_step, src_offset_x, src_offset_y,
               wholeSize.height, wholeSize.width, ocl::KernelArg::WriteOnly(dst),
               static_cast<float>(scale), static_cast<float>(delta));

        return k.run(2, gt2, lt2, false);
    }
    int iscale = cvRound(scale), idelta = cvRound(delta);
    bool floatCoeff = std::fabs(delta - idelta) > DBL_EPSILON || std::fabs(scale - iscale) > DBL_EPSILON;
    int wdepth = std::max(depth, floatCoeff ? CV_32F : CV_32S), kercn = 1;

    if (!doubleSupport && wdepth == CV_64F)
        return false;
//.........这里部分代码省略.........
开发者ID:165-goethals,项目名称:opencv,代码行数:101,代码来源:deriv.cpp


示例13: main

int main(int argc, char** argv)
{
    CV_TRACE_FUNCTION();

    cv::CommandLineParser parser(argc, argv,
        "{help h ? |     | help message}"
        "{n        | 100 | number of frames to process }"
        "{@video   | 0   | video filename or cameraID }"
    );
    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    VideoCapture capture;
    std::string video = parser.get<string>("@video");
    if (video.size() == 1 && isdigit(video[0]))
        capture.open(parser.get<int>("@video"));
    else
        capture.open(video);
    int nframes = 0;
    if (capture.isOpened())
    {
        nframes = (int)capture.get(CAP_PROP_FRAME_COUNT);
        cout << "Video " << video <<
            ": width=" << capture.get(CAP_PROP_FRAME_WIDTH) <<
            ", height=" << capture.get(CAP_PROP_FRAME_HEIGHT) <<
            ", nframes=" << nframes << endl;
    }
    else
    {
        cout << "Could not initialize video capturing...\n";
        return -1;
    }

    int N = parser.get<int>("n");
    if (nframes > 0 && N > nframes)
        N = nframes;

    cout << "Start processing..." << endl
        << "Press ESC key to terminate" << endl;

    UMat frame;
    for (int i = 0; N > 0 ? (i < N) : true; i++)
    {
        CV_TRACE_REGION("FRAME"); // OpenCV Trace macro for named "scope" region
        {
            CV_TRACE_REGION("read");
            capture.read(frame);

            if (frame.empty())
            {
                cerr << "Can't capture frame: " << i << std::endl;
                break;
            }

            // OpenCV Trace macro for NEXT named region in the same C++ scope
            // Previous "read" region will be marked complete on this line.
            // Use this to eliminate unnecessary curly braces.
            CV_TRACE_REGION_NEXT("process");
            process_frame(frame);

            CV_TRACE_REGION_NEXT("delay");
            if (waitKey(1) == 27/*ESC*/)
                break;
        }
    }

    return 0;
}
开发者ID:ElenaGvozdeva,项目名称:opencv,代码行数:71,代码来源:application_trace.cpp


示例14: locateROI

UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
{
    UMat hdr;
    if(!data)
        return hdr;
    if (data != datastart)
    {
        Size wholeSize;
        Point ofs;
        locateROI(wholeSize, ofs);
        Size sz(cols, rows);
        if (ofs.x != 0 || ofs.y != 0)
        {
            Mat src = *this;
            int dtop = ofs.y;
            int dbottom = wholeSize.height - src.rows - ofs.y;
            int dleft = ofs.x;
            int dright = wholeSize.width - src.cols - ofs.x;
            src.adjustROI(dtop, dbottom, dleft, dright);
            return src.getUMat(accessFlags, usageFlags)(cv::Rect(ofs.x, ofs.y, sz.width, sz.height));
        }
    }
    CV_Assert(data == datastart);

    accessFlags |= ACCESS_RW;
    UMatData* new_u = NULL;
    {
        MatAllocator *a = allocator, *a0 = getDefaultAllocator();
        if(!a)
            a = a0;
        new_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags);
    }
    bool allocated = false;
    try
    {
        allocated = UMat::getStdAllocator()->allocate(new_u, accessFlags, usageFlags);
    }
    catch (const cv::Exception& e)
    {
        fprintf(stderr, "Exception: %s\n", e.what());
    }
    if (!allocated)
    {
        allocated = getDefaultAllocator()->allocate(new_u, accessFlags, usageFlags);
        CV_Assert(allocated);
    }
    if (u != NULL)
    {
#ifdef HAVE_OPENCL
        if (ocl::useOpenCL() && new_u->currAllocator == ocl::getOpenCLAllocator())
        {
            CV_Assert(new_u->tempUMat());
        }
#endif
        new_u->originalUMatData = u;
        CV_XADD(&(u->refcount), 1);
        CV_XADD(&(u->urefcount), 1);
    }
    hdr.flags = flags;
    setSize(hdr, dims, size.p, step.p);
    finalizeHdr(hdr);
    hdr.u = new_u;
    hdr.offset = 0; //data - datastart;
    hdr.addref();
    return hdr;
}
开发者ID:MCobias,项目名称:opencv,代码行数:66,代码来源:umatrix.cpp


示例15: CV_Assert

/* dst = src */
void Mat::copyTo( OutputArray _dst ) const
{
    int dtype = _dst.type();
    if( _dst.fixedType() && dtype != type() )
    {
        CV_Assert( channels() == CV_MAT_CN(dtype) );
        convertTo( _dst, dtype );
        return;
    }

    if( empty() )
    {
        _dst.release();
        return;
    }

    if( _dst.isUMat() )
    {
        _dst.create( dims, size.p, type() );
        UMat dst = _dst.getUMat();

        size_t i, sz[CV_MAX_DIM], dstofs[CV_MAX_DIM], esz = elemSize();
        for( i = 0; i < (size_t)dims; i++ )
            sz[i] = size.p[i];
        sz[dims-1] *= esz;
        dst.ndoffset(dstofs);
        dstofs[dims-1] *= esz;
        dst.u->currAllocator->upload(dst.u, data, dims, sz, dstofs, dst.step.p, step.p);
        return;
    }

    if( dims <= 2 )
    {
        _dst.create( rows, cols, type() );
        Mat dst = _dst.getMat();
        if( data == dst.data )
            return;

        if( rows > 0 && cols > 0 )
        {
            const uchar* sptr = data;
            uchar* dptr = dst.data;

            Size sz = getContinuousSize(*this, dst);
            size_t len = sz.width*elemSize();

            for( ; sz.height--; sptr += step, dptr += dst.step )
                memcpy( dptr, sptr, len );
        }
        return;
    }

    _dst.create( dims, size, type() );
    Mat dst = _dst.getMat();
    if( data == dst.data )
        return;

    if( total() != 0 )
    {
        const Mat* arrays[] = { this, &dst };
        uchar* ptrs[2];
        NAryMatIterator it(arrays, ptrs, 2);
        size_t sz = it.size*elemSize();

        for( size_t i = 0; i < it.nplanes; i++, ++it )
            memcpy(ptrs[1], ptrs[0], sz);
    }
}
开发者ID:AntonBoytsov,项目名称:opencv,代码行数:69,代码来源:copy.cpp


示例16: findSquares

// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const UMat& image, vector<vector<Point> >& squares )
{
    squares.clear();
    UMat pyr, timg, gray0(image.size(), CV_8U), gray;

    // down-scale and upscale the image to filter out the noise
    pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
    pyrUp(pyr, timg, image.size());
    vector<vector<Point> > contours;

    // find squares in every color plane of the image
    for( int c = 0; c < 3; c++ )
    {
        int ch[] = {c, 0};
        mixChannels(timg, gray0, ch, 1);

        // try several threshold levels
        for( int l = 0; l < N; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                Canny(gray0, gray, 0, thresh, 5);
                // dilate canny output to remove potential
                // holes between edge segments
                dilate(gray, gray, UMat(), Point(-1,-1));
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cv::threshold(gray0, gray, (l+1)*255/N, 255, THRESH_BINARY);
            }

            // find contours and store them all as a list
            findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);

            vector<Point> approx;

            // test each contour
            for( size_t i = 0; i < contours.size(); i++ )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter

                approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( approx.size() == 4 &&
                        fabs(contourArea(Mat(approx))) >  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ UMaterial类代码示例发布时间:2022-05-31
下一篇:
C++ UMLDoc类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap