本文整理汇总了C++中cv::OutputArray类的典型用法代码示例。如果您正苦于以下问题:C++ OutputArray类的具体用法?C++ OutputArray怎么用?C++ OutputArray使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了OutputArray类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: calcLut
static bool calcLut(cv::InputArray _src, cv::OutputArray _dst,
const int tilesX, const int tilesY, const cv::Size tileSize,
const int clipLimit, const float lutScale)
{
cv::ocl::Kernel k("calcLut", cv::ocl::imgproc::clahe_oclsrc);
if(k.empty())
return false;
cv::UMat src = _src.getUMat();
_dst.create(tilesX * tilesY, 256, CV_8UC1);
cv::UMat dst = _dst.getUMat();
int tile_size[2];
tile_size[0] = tileSize.width;
tile_size[1] = tileSize.height;
size_t localThreads[3] = { 32, 8, 1 };
size_t globalThreads[3] = { tilesX * localThreads[0], tilesY * localThreads[1], 1 };
int idx = 0;
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src));
idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst));
idx = k.set(idx, tile_size);
idx = k.set(idx, tilesX);
idx = k.set(idx, clipLimit);
k.set(idx, lutScale);
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:ArkaJU,项目名称:opencv,代码行数:29,代码来源:clahe.cpp
示例2: colorChange
void colorChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float r,
float g,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float red = r;
float green = g;
float blue = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.local_color_change(src, cs_mask, gray, blend, red, green, blue);
}
开发者ID:pablospe,项目名称:seamless_cloning,代码行数:30,代码来源:seamless_cloning.cpp
示例3: globalMatting
void globalMatting(cv::InputArray _image, cv::InputArray _trimap, cv::OutputArray _foreground, cv::OutputArray _alpha, cv::OutputArray _conf)
{
cv::Mat image = _image.getMat();
cv::Mat trimap = _trimap.getMat();
if (image.empty())
CV_Error(CV_StsBadArg, "image is empty");
if (image.type() != CV_8UC3)
CV_Error(CV_StsBadArg, "image mush have CV_8UC3 type");
if (trimap.empty())
CV_Error(CV_StsBadArg, "trimap is empty");
if (trimap.type() != CV_8UC1)
CV_Error(CV_StsBadArg, "trimap mush have CV_8UC1 type");
if (image.size() != trimap.size())
CV_Error(CV_StsBadArg, "image and trimap mush have same size");
cv::Mat &foreground = _foreground.getMatRef();
cv::Mat &alpha = _alpha.getMatRef();
cv::Mat tempConf;
globalMattingHelper(image, trimap, foreground, alpha, tempConf);
if(_conf.needed())
tempConf.copyTo(_conf);
}
开发者ID:pfShawn,项目名称:global-matting,代码行数:27,代码来源:globalmatting.cpp
示例4: glAssert
void BackgroundSubtractorLOBSTER_<ParallelUtils::eGLSL>::getBackgroundDescriptorsImage(cv::OutputArray oBGDescImg) const {
lvDbgExceptionWatch;
CV_Assert(m_bInitialized);
glAssert(m_bGLInitialized && !m_vnBGModelData.empty());
CV_Assert(LBSP::DESC_SIZE==2);
oBGDescImg.create(m_oFrameSize,CV_16UC(int(m_nImgChannels)));
cv::Mat oOutputImg = oBGDescImg.getMatRef();
glBindBuffer(GL_SHADER_STORAGE_BUFFER,getSSBOId(BackgroundSubtractorLOBSTER_::eLOBSTERStorageBuffer_BGModelBinding));
glGetBufferSubData(GL_SHADER_STORAGE_BUFFER,0,m_nBGModelSize*sizeof(uint),(void*)m_vnBGModelData.data());
glErrorCheck;
for(size_t nRowIdx=0; nRowIdx<(size_t)m_oFrameSize.height; ++nRowIdx) {
const size_t nModelRowOffset = nRowIdx*m_nRowStepSize;
const size_t nImgRowOffset = nRowIdx*oOutputImg.step.p[0];
for(size_t nColIdx=0; nColIdx<(size_t)m_oFrameSize.width; ++nColIdx) {
const size_t nModelColOffset = nColIdx*m_nColStepSize+nModelRowOffset;
const size_t nImgColOffset = nColIdx*oOutputImg.step.p[1]+nImgRowOffset;
std::array<float,4> afCurrPxSum = {0.0f,0.0f,0.0f,0.0f};
for(size_t nSampleIdx=0; nSampleIdx<m_nBGSamples; ++nSampleIdx) {
const size_t nModelPxOffset_color = nSampleIdx*m_nSampleStepSize+nModelColOffset;
const size_t nModelPxOffset_desc = nModelPxOffset_color+(m_nBGSamples*m_nSampleStepSize);
for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) {
const size_t nModelTotOffset = nChannelIdx+nModelPxOffset_desc;
afCurrPxSum[nChannelIdx] += m_vnBGModelData[nModelTotOffset];
}
}
for(size_t nChannelIdx=0; nChannelIdx<m_nImgChannels; ++nChannelIdx) {
const size_t nSampleChannelIdx = ((nChannelIdx==3||m_nImgChannels==1)?nChannelIdx:2-nChannelIdx);
const size_t nImgTotOffset = nSampleChannelIdx*2+nImgColOffset;
*(ushort*)(oOutputImg.data+nImgTotOffset) = (ushort)(afCurrPxSum[nChannelIdx]/m_nBGSamples);
}
}
}
}
开发者ID:caomw,项目名称:litiv,代码行数:33,代码来源:BackgroundSubtractorLOBSTER.cpp
示例5: transform
static bool transform(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _lut,
const int tilesX, const int tilesY, const cv::Size & tileSize)
{
cv::ocl::Kernel k("transform", cv::ocl::imgproc::clahe_oclsrc);
if(k.empty())
return false;
int tile_size[2];
tile_size[0] = tileSize.width;
tile_size[1] = tileSize.height;
cv::UMat src = _src.getUMat();
_dst.create(src.size(), src.type());
cv::UMat dst = _dst.getUMat();
cv::UMat lut = _lut.getUMat();
size_t localThreads[3] = { 32, 8, 1 };
size_t globalThreads[3] = { (size_t)src.cols, (size_t)src.rows, 1 };
int idx = 0;
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src));
idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst));
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(lut));
idx = k.set(idx, src.cols);
idx = k.set(idx, src.rows);
idx = k.set(idx, tile_size);
idx = k.set(idx, tilesX);
k.set(idx, tilesY);
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:15751064254,项目名称:opencv,代码行数:32,代码来源:clahe.cpp
示例6: stereo_disparity_normal
void stereo_disparity_normal(cv::InputArray left_image, cv::InputArray right_image, cv::OutputArray disp_,
int max_dis_level, int scale, float sigma) {
cv::Mat imL = left_image.getMat();
cv::Mat imR = right_image.getMat();
CV_Assert(imL.size() == imR.size());
CV_Assert(imL.type() == CV_8UC3 && imR.type() == CV_8UC3);
cv::Size imageSize = imL.size();
disp_.create(imageSize, CV_8U);
cv::Mat disp = disp_.getMat();
CDisparityHelper dispHelper;
//step 1: cost initialization
cv::Mat costVol = dispHelper.GetMatchingCost(imL, imR, max_dis_level);
//step 2: cost aggregation
CSegmentTree stree;
CColorWeight cWeight(imL);
stree.BuildSegmentTree(imL.size(), sigma, TAU, cWeight);
stree.Filter(costVol, max_dis_level);
//step 3: disparity computation
cv::Mat disparity = dispHelper.GetDisparity_WTA((float*)costVol.data,
imageSize.width, imageSize.height, max_dis_level);
MeanFilter(disparity, disparity, 3);
disparity *= scale;
disparity.copyTo(disp);
}
开发者ID:YuanhaoGong,项目名称:STCostAggregation,代码行数:33,代码来源:StereoDisparity.cpp
示例7: compute_derivative_kernels
/**
* @brief Compute derivative kernels for sizes different than 3
* @param _kx Horizontal kernel ues
* @param _ky Vertical kernel values
* @param dx Derivative order in X-direction (horizontal)
* @param dy Derivative order in Y-direction (vertical)
* @param scale_ Scale factor or derivative size
*/
void compute_derivative_kernels(cv::OutputArray _kx, cv::OutputArray _ky, int dx, int dy, int scale) {
int ksize = 3 + 2 * (scale - 1);
// The standard Scharr kernel
if (scale == 1) {
getDerivKernels(_kx, _ky, dx, dy, 0, true, CV_32F);
return;
}
_kx.create(ksize, 1, CV_32F, -1, true);
_ky.create(ksize, 1, CV_32F, -1, true);
Mat kx = _kx.getMat();
Mat ky = _ky.getMat();
float w = 10.0f / 3.0f;
float norm = 1.0f / (2.0f*scale*(w + 2.0f));
for (int k = 0; k < 2; k++) {
Mat* kernel = k == 0 ? &kx : &ky;
int order = k == 0 ? dx : dy;
std::vector<float> kerI(ksize, 0.0f);
if (order == 0) {
kerI[0] = norm, kerI[ksize / 2] = w*norm, kerI[ksize - 1] = norm;
}
else if (order == 1) {
kerI[0] = -1, kerI[ksize / 2] = 0, kerI[ksize - 1] = 1;
}
Mat temp(kernel->rows, kernel->cols, CV_32F, &kerI[0]);
temp.copyTo(*kernel);
}
}
开发者ID:007Indian,项目名称:opencv,代码行数:42,代码来源:nldiffusion_functions.cpp
示例8: illuminationChange
void illuminationChange(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
float a,
float b)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
float alpha = a;
float beta = b;
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.illum_change(src, cs_mask, gray, blend, alpha, beta);
}
开发者ID:pablospe,项目名称:seamless_cloning,代码行数:29,代码来源:seamless_cloning.cpp
示例9: glAssert
void EdgeDetector_<ParallelUtils::eGLSL>::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) {
_oLastEdgeMask.create(m_oFrameSize,CV_8UC1);
cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat();
if(!GLImageProcAlgo::m_bFetchingOutput)
glAssert(GLImageProcAlgo::setOutputFetching(true))
GLImageProcAlgo::fetchLastOutput(oLastEdgeMask);
}
开发者ID:caomw,项目名称:litiv,代码行数:7,代码来源:EdgeDetectionUtils.cpp
示例10: textureFlattening
void textureFlattening(cv::InputArray _src,
cv::InputArray _mask,
cv::OutputArray _dst,
double low_threshold,
double high_threshold,
int kernel_size)
{
Mat src = _src.getMat();
Mat mask = _mask.getMat();
_dst.create(src.size(), src.type());
Mat blend = _dst.getMat();
Mat gray = Mat::zeros(mask.size(), CV_8UC1);
if(mask.channels() == 3)
cvtColor(mask, gray, COLOR_BGR2GRAY);
else
gray = mask;
Mat cs_mask = Mat::zeros(src.size(), CV_8UC3);
src.copyTo(cs_mask, gray);
Cloning obj;
obj.texture_flatten(src, cs_mask, gray, low_threshold, high_threshold, kernel_size, blend);
}
开发者ID:pablospe,项目名称:seamless_cloning,代码行数:27,代码来源:seamless_cloning.cpp
示例11: operator
void BackgroundSubtractorMedian::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate)
{
framecount++;
cv::Mat image = _image.getMat();
if (image.channels() > 1) {
cvtColor(image,image,CV_BGR2GRAY);
}
if (image.cols == 0 || image.rows == 0) {
return;
}
_fgmask.create(image.size(), CV_8U);
cv::Mat fgmask = _fgmask.getMat();
if (!init)
{
init = true;
bgmodel = cv::Mat(image.size(), CV_8U);
}
//printf("(%d,%d)(%d) ",image.cols,image.rows,image.type());
//printf("(%d,%d)(%d)\n",bgmodel.cols,bgmodel.rows,bgmodel.type());
cv::Mat cmpArr = cv::Mat(image.size(),CV_8U);
cv::compare(image, bgmodel, cmpArr, CV_CMP_GT);
cv::bitwise_and(cmpArr, 1, cmpArr);
cv::add(bgmodel, cmpArr, bgmodel);
cmpArr = cv::Mat(image.size(),CV_8U);
cv::compare(image, bgmodel, cmpArr, CV_CMP_LT);
cv::bitwise_and(cmpArr, 1, cmpArr);
cv::subtract(bgmodel, cmpArr, bgmodel);
cv::absdiff(image, bgmodel,fgmask);
cv::threshold(fgmask,fgmask,fg_threshold,255,CV_THRESH_TOZERO);
cv::medianBlur(fgmask,fgmask,median_filter_level);
}
开发者ID:jlo1,项目名称:CognitiveVideo,代码行数:34,代码来源:BackgroundSubtractorMedian.cpp
示例12: getLatestForegroundMask
void IBackgroundSubtractor_GLSL::getLatestForegroundMask(cv::OutputArray _oLastFGMask) {
_oLastFGMask.create(m_oImgSize,CV_8UC1);
cv::Mat oLastFGMask = _oLastFGMask.getMat();
glAssert(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true))
if(GLImageProcAlgo::m_nInternalFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastFGMask);
else
oLastFGMask = cv::Scalar_<uchar>(0);
}
开发者ID:imagedl,项目名称:litiv,代码行数:9,代码来源:BackgroundSubtractionUtils.cpp
示例13:
bool OpenNI2Grabber::grabFrame(cv::OutputArray _color) {
if (_color.kind() != cv::_InputArray::MAT)
BOOST_THROW_EXCEPTION(GrabberException("Grabbing only into cv::Mat"));
_color.create(p->color_image_resolution.height, p->color_image_resolution.width, CV_8UC3);
cv::Mat color = _color.getMat();
return p->grabFrame(color);
}
开发者ID:taketwo,项目名称:radical,代码行数:9,代码来源:openni2_grabber.cpp
示例14: if
void BackgroundSubtractor_<ParallelUtils::eGLSL>::getLatestForegroundMask(cv::OutputArray _oLastFGMask) {
_oLastFGMask.create(m_oImgSize,CV_8UC1);
cv::Mat oLastFGMask = _oLastFGMask.getMat();
if(!GLImageProcAlgo::m_bFetchingOutput)
glAssert(GLImageProcAlgo::setOutputFetching(true))
else if(m_nFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastFGMask);
else
oLastFGMask = cv::Scalar_<uchar>(0);
}
开发者ID:caomw,项目名称:litiv,代码行数:10,代码来源:BackgroundSubtractionUtils.cpp
示例15: warmify
void warmify(cv::InputArray src, cv::OutputArray dst, uchar delta)
{
CV_Assert(src.type() == CV_8UC3);
Mat imgSrc = src.getMat();
CV_Assert(imgSrc.data);
dst.create(src.size(), CV_8UC3);
Mat imgDst = dst.getMat();
imgDst = imgSrc + Scalar(0, delta, delta);
}
开发者ID:ArtemSkrebkov,项目名称:opencv_contrib,代码行数:10,代码来源:warmify.cpp
示例16: stereoMatching
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2)
{
Mat img1 = _recImage1.getMat();
Mat img2 = _recIamge2.getMat();
_disparityMap.create(img1.size(), CV_16S);
Mat dis = _disparityMap.getMat();
StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
开发者ID:caomw,项目名称:stereo_matching,代码行数:10,代码来源:stereo.cpp
示例17: getLatestEdgeMask
void IEdgeDetector_GLSL::getLatestEdgeMask(cv::OutputArray _oLastEdgeMask) {
lvAssert_(GLImageProcAlgo::m_bGLInitialized,"algo must be initialized first");
_oLastEdgeMask.create(GLImageProcAlgo::m_oFrameSize,CV_8UC1);
cv::Mat oLastEdgeMask = _oLastEdgeMask.getMat();
lvAssert_(GLImageProcAlgo::m_bFetchingOutput || GLImageProcAlgo::setOutputFetching(true),"algo not initialized with mat output support")
if(GLImageProcAlgo::m_nInternalFrameIdx>0)
GLImageProcAlgo::fetchLastOutput(oLastEdgeMask);
else
oLastEdgeMask = cv::Scalar_<uchar>(0);
}
开发者ID:plstcharles,项目名称:litiv,代码行数:10,代码来源:EdgeDetectionUtils.cpp
示例18: seqToMat
static void seqToMat(const CvSeq* seq, cv::OutputArray _arr)
{
if( seq && seq->total > 0 )
{
_arr.create(1, seq->total, seq->flags, -1, true);
cv::Mat arr = _arr.getMat();
cvCvtSeqToArray(seq, arr.data);
}
else
_arr.release();
}
开发者ID:gyoerkaa,项目名称:GraphRecog,代码行数:11,代码来源:HoughTrans.cpp
示例19: StereoMatching
void StereoMatch::StereoMatching(cv::InputArray rec_image1, cv::InputArray rec_image2,
cv::OutputArray disparity_map, int min_disparity, int num_disparities, int SAD_window_size,
int P1, int P2)
{
cv::Mat img1 = rec_image1.getMat();
cv::Mat img2 = rec_image2.getMat();
disparity_map.create(img1.size(), CV_16S);
cv::Mat dis = disparity_map.getMat();
cv::StereoSGBM matcher(min_disparity, num_disparities, SAD_window_size, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
开发者ID:rpankka,项目名称:Stereo,代码行数:13,代码来源:stereo_match.cpp
示例20: apply
void EdgeDetectorCanny::apply(cv::InputArray _oInputImage, cv::OutputArray _oEdgeMask) {
cv::Mat oInputImg = _oInputImage.getMat();
CV_Assert(!oInputImg.empty());
CV_Assert(oInputImg.channels()==1 || oInputImg.channels()==3 || oInputImg.channels()==4);
_oEdgeMask.create(oInputImg.size(),CV_8UC1);
cv::Mat oEdgeMask = _oEdgeMask.getMat();
oEdgeMask = cv::Scalar_<uchar>(0);
cv::Mat oTempEdgeMask = oEdgeMask.clone();
for(size_t nCurrThreshold=0; nCurrThreshold<UCHAR_MAX; ++nCurrThreshold) {
apply_threshold(oInputImg,oTempEdgeMask,double(nCurrThreshold));
oEdgeMask += oTempEdgeMask/UCHAR_MAX;
}
cv::normalize(oEdgeMask,oEdgeMask,0,UCHAR_MAX,cv::NORM_MINMAX);
}
开发者ID:medyakovvit,项目名称:litiv,代码行数:14,代码来源:EdgeDetectorCanny.cpp
注:本文中的cv::OutputArray类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论