本文整理汇总了C++中CV_MAT_DEPTH函数的典型用法代码示例。如果您正苦于以下问题:C++ CV_MAT_DEPTH函数的具体用法?C++ CV_MAT_DEPTH怎么用?C++ CV_MAT_DEPTH使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了CV_MAT_DEPTH函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: CV_MAT_DEPTH
void CmShow::SaveShow(CMat& img, CStr& title)
{
if (title.size() == 0)
return;
int mDepth = CV_MAT_DEPTH(img.type());
double scale = (mDepth == CV_32F || mDepth == CV_64F ? 255 : 1);
if (title.size() > 4 && title[title.size() - 4] == '.')
imwrite(title, img*scale);
else if (title.size())
imshow(title, img);
}
开发者ID:zjucsxxd,项目名称:Bing-Objectness-linux,代码行数:12,代码来源:CmShow.cpp
示例2: toHppType
//! convert OpenCV data type to hppDataType
inline int toHppType(const int cvType)
{
int depth = CV_MAT_DEPTH(cvType);
int hppType = depth == CV_8U ? HPP_DATA_TYPE_8U :
depth == CV_16U ? HPP_DATA_TYPE_16U :
depth == CV_16S ? HPP_DATA_TYPE_16S :
depth == CV_32S ? HPP_DATA_TYPE_32S :
depth == CV_32F ? HPP_DATA_TYPE_32F :
depth == CV_64F ? HPP_DATA_TYPE_64F : -1;
CV_Assert( hppType >= 0 );
return hppType;
}
开发者ID:EmergentVR,项目名称:opencv_win312,代码行数:13,代码来源:ippasync.hpp
示例3: input
/*! Find Canny edges
\param input input (mat) image (must be color or gray 8-bit image)
\param output edges (mat) image (single channel)
\param threshold1 minimum threshold
\param threshold2 maximum threshold
\param apertureSize aperture size for Sobel operator
\return <a>true</a> if edges were computed successfully
The maximum threshold is set to three times the value of
<a>threshold1</a>, unless <a>threshold2</a> is greater than 0.
\note See OpenCV reference for an explanation on the thresholds
and aperture size.
*/
bool findCannyEdges(const Mat& input, Mat& output,
double threshold1, double threshold2, int apertureSize)
{
// check input type
if (input.type() != CV_8UC1 &&
input.type() != CV_8UC3)
{
ofLog(OF_LOG_WARNING, "in findCannyEdges, input image format is invalid");
return false;
}
// set threshold2 if necessary
if (threshold2 <= 0.0) threshold2 = threshold1*3;
// create output image
if (output.empty() ||
!sameProperties(input, output))
{
output.create(input.rows,
input.cols,
CV_MAKETYPE(CV_MAT_DEPTH(input.depth()),1));
}
// convert input image to single channel if necessary
Mat gray;
if (input.channels() == 3)
{
gray.create(input.rows,
input.cols,
CV_MAKETYPE(CV_MAT_DEPTH(input.depth()),1));
cvtColor(input, gray, CV_RGB2GRAY);
}
else {
gray = input;
}
// find edges
Canny(gray, output, threshold1, threshold2, apertureSize);
return true;
}
开发者ID:marynel,项目名称:ofxOpenCv2,代码行数:54,代码来源:ofxCv2ImageUtil.cpp
示例4: toCvCopyImpl
// Internal, used by toCvCopy and cvtColor
CvImagePtr toCvCopyImpl(const cv::Mat& source,
const std_msgs::Header& src_header,
const std::string& src_encoding,
const std::string& dst_encoding)
{
// Copy metadata
CvImagePtr ptr = boost::make_shared<CvImage>();
ptr->header = src_header;
// Copy to new buffer if same encoding requested
if (dst_encoding.empty() || dst_encoding == src_encoding)
{
ptr->encoding = src_encoding;
source.copyTo(ptr->image);
}
else
{
// Convert the source data to the desired encoding
const std::vector<int> conversion_codes = getConversionCode(src_encoding, dst_encoding);
cv::Mat image1 = source;
cv::Mat image2;
for(size_t i=0; i<conversion_codes.size(); ++i) {
int conversion_code = conversion_codes[i];
if (conversion_code == SAME_FORMAT)
{
// Same number of channels, but different bit depth
int src_depth = enc::bitDepth(src_encoding);
int dst_depth = enc::bitDepth(dst_encoding);
// Keep the number of channels for now but changed to the final depth
int image2_type = CV_MAKETYPE(CV_MAT_DEPTH(getCvType(dst_encoding)), image1.channels());
// Do scaling between CV_8U [0,255] and CV_16U [0,65535] images.
if (src_depth == 8 && dst_depth == 16)
image1.convertTo(image2, image2_type, 65535. / 255.);
else if (src_depth == 16 && dst_depth == 8)
image1.convertTo(image2, image2_type, 255. / 65535.);
else
image1.convertTo(image2, image2_type);
}
else
{
// Perform color conversion
cv::cvtColor(image1, image2, conversion_code);
}
image1 = image2;
}
ptr->image = image2;
ptr->encoding = dst_encoding;
}
return ptr;
}
开发者ID:LouLinear,项目名称:APC_vision,代码行数:53,代码来源:cv_bridge.cpp
示例5: CV_UNUSED
double CxCore_MulSpectrumsTest::get_success_error_level( int test_case_idx, int i, int j )
{
CV_UNUSED(test_case_idx);
CV_Assert(i == OUTPUT);
CV_Assert(j == 0);
int elem_depth = CV_MAT_DEPTH(cvGetElemType(test_array[i][j]));
CV_Assert(elem_depth == CV_32F || elem_depth == CV_64F);
element_wise_relative_error = false;
double maxInputValue = 1000; // ArrayTest::get_minmax_bounds
double err = 8 * maxInputValue; // result = A*B + C*D
return (elem_depth == CV_32F ? FLT_EPSILON : DBL_EPSILON) * err;
}
开发者ID:AliMiraftab,项目名称:opencv,代码行数:13,代码来源:test_dxt.cpp
示例6: CV_MAT_DEPTH
int cv::connectedComponents(InputArray _img, OutputArray _labels, int connectivity, int ltype){
const cv::Mat img = _img.getMat();
_labels.create(img.size(), CV_MAT_DEPTH(ltype));
cv::Mat labels = _labels.getMat();
connectedcomponents::NoOp sop;
if(ltype == CV_16U){
return connectedComponents_sub1(img, labels, connectivity, sop);
}else if(ltype == CV_32S){
return connectedComponents_sub1(img, labels, connectivity, sop);
}else{
CV_Error(CV_StsUnsupportedFormat, "the type of labels must be 16u or 32s");
return 0;
}
}
开发者ID:sclee0095,项目名称:vco,代码行数:14,代码来源:connectedcomponents.cpp
示例7: ocl_accumulate
static bool ocl_accumulate( InputArray _src, InputArray _src2, InputOutputArray _dst, double alpha,
InputArray _mask, int op_type )
{
CV_Assert(op_type == ACCUMULATE || op_type == ACCUMULATE_SQUARE ||
op_type == ACCUMULATE_PRODUCT || op_type == ACCUMULATE_WEIGHTED);
int stype = _src.type(), cn = CV_MAT_CN(stype);
int sdepth = CV_MAT_DEPTH(stype), ddepth = _dst.depth();
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
haveMask = !_mask.empty();
if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
return false;
const char * const opMap[4] = { "ACCUMULATE", "ACCUMULATE_SQUARE", "ACCUMULATE_PRODUCT",
"ACCUMULATE_WEIGHTED" };
ocl::Kernel k("accumulate", ocl::imgproc::accumulate_oclsrc,
format("-D %s%s -D srcT=%s -D cn=%d -D dstT=%s%s",
opMap[op_type], haveMask ? " -D HAVE_MASK" : "",
ocl::typeToStr(sdepth), cn, ocl::typeToStr(ddepth),
doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
if (k.empty())
return false;
UMat src = _src.getUMat(), src2 = _src2.getUMat(), dst = _dst.getUMat(), mask = _mask.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
dstarg = ocl::KernelArg::ReadWrite(dst),
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask);
int argidx = k.set(0, srcarg);
if (op_type == ACCUMULATE_PRODUCT)
argidx = k.set(argidx, src2arg);
argidx = k.set(argidx, dstarg);
if (op_type == ACCUMULATE_WEIGHTED)
{
if (ddepth == CV_32F)
argidx = k.set(argidx, (float)alpha);
else
argidx = k.set(argidx, alpha);
}
if (haveMask)
k.set(argidx, maskarg);
size_t globalsize[2] = { src.cols, src.rows };
return k.run(2, globalsize, NULL, false);
}
开发者ID:stalinizer,项目名称:opencv,代码行数:50,代码来源:accum.cpp
示例8: matchTemplate_CCOEFF
static bool matchTemplate_CCOEFF(InputArray _image, InputArray _templ, OutputArray _result)
{
matchTemplate(_image, _templ, _result, CV_TM_CCORR);
UMat image_sums, temp;
integral(_image, temp);
if (temp.depth() == CV_64F)
temp.convertTo(image_sums, CV_32F);
else
image_sums = temp;
int type = image_sums.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
ocl::Kernel k("matchTemplate_Prepared_CCOEFF", ocl::imgproc::match_template_oclsrc,
format("-D CCOEFF -D T=%s -D elem_type=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn));
if (k.empty())
return false;
UMat templ = _templ.getUMat();
Size size = _image.size(), tsize = templ.size();
_result.create(size.height - templ.rows + 1, size.width - templ.cols + 1, CV_32F);
UMat result = _result.getUMat();
if (cn == 1)
{
float templ_sum = static_cast<float>(sum(_templ)[0]) / tsize.area();
k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadWrite(result),
templ.rows, templ.cols, templ_sum);
}
else
{
Vec4f templ_sum = Vec4f::all(0);
templ_sum = sum(templ) / tsize.area();
if (cn == 2)
k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols,
templ_sum[0], templ_sum[1]);
else if (cn==3)
k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols,
templ_sum[0], templ_sum[1], templ_sum[2]);
else
k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols,
templ_sum[0], templ_sum[1], templ_sum[2], templ_sum[3]);
}
size_t globalsize[2] = { result.cols, result.rows };
return k.run(2, globalsize, NULL, false);
}
开发者ID:BYTERHIT,项目名称:opencv,代码行数:50,代码来源:templmatch.cpp
示例9: get_minmax_bounds
void CV_MHIBaseTest::get_minmax_bounds( int i, int j, int type, CvScalar* low, CvScalar* high )
{
CvArrTest::get_minmax_bounds( i, j, type, low, high );
if( i == INPUT && CV_MAT_DEPTH(type) == CV_8U )
{
*low = cvScalarAll(cvRound(-1./silh_ratio)+2.);
*high = cvScalarAll(2);
}
else if( i == mhi_i || i == mhi_ref_i )
{
*low = cvScalarAll(-exp(max_log_duration));
*high = cvScalarAll(0.);
}
}
开发者ID:ChristophGuillermet,项目名称:WHITECAT_opensource,代码行数:14,代码来源:amotiontemplates.cpp
示例10: ocl_dot
static bool ocl_dot( InputArray _src1, InputArray _src2, double & res )
{
UMat src1 = _src1.getUMat().reshape(1), src2 = _src2.getUMat().reshape(1);
int type = src1.type(), depth = CV_MAT_DEPTH(type),
kercn = ocl::predictOptimalVectorWidth(src1, src2);
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
if ( !doubleSupport && depth == CV_64F )
return false;
int dbsize = ocl::Device::getDefault().maxComputeUnits();
size_t wgs = ocl::Device::getDefault().maxWorkGroupSize();
int ddepth = std::max(CV_32F, depth);
int wgs2_aligned = 1;
while (wgs2_aligned < (int)wgs)
wgs2_aligned <<= 1;
wgs2_aligned >>= 1;
char cvt[40];
ocl::Kernel k("reduce", ocl::core::reduce_oclsrc,
format("-D srcT=%s -D srcT1=%s -D dstT=%s -D dstTK=%s -D ddepth=%d -D convertToDT=%s -D OP_DOT "
"-D WGS=%d -D WGS2_ALIGNED=%d%s%s%s -D kercn=%d",
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)), ocl::typeToStr(depth),
ocl::typeToStr(ddepth), ocl::typeToStr(CV_MAKE_TYPE(ddepth, kercn)),
ddepth, ocl::convertTypeStr(depth, ddepth, kercn, cvt),
(int)wgs, wgs2_aligned, doubleSupport ? " -D DOUBLE_SUPPORT" : "",
_src1.isContinuous() ? " -D HAVE_SRC_CONT" : "",
_src2.isContinuous() ? " -D HAVE_SRC2_CONT" : "", kercn));
if (k.empty())
return false;
UMat db(1, dbsize, ddepth);
ocl::KernelArg src1arg = ocl::KernelArg::ReadOnlyNoSize(src1),
src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
dbarg = ocl::KernelArg::PtrWriteOnly(db);
k.args(src1arg, src1.cols, (int)src1.total(), dbsize, dbarg, src2arg);
size_t globalsize = dbsize * wgs;
if (k.run(1, &globalsize, &wgs, false))
{
res = sum(db.getMat(ACCESS_READ))[0];
return true;
}
return false;
}
开发者ID:165-goethals,项目名称:opencv,代码行数:49,代码来源:umatrix.cpp
示例11: ocl_pyrUp
static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
{
int type = _src.type(), depth = CV_MAT_DEPTH(type), channels = CV_MAT_CN(type);
if (channels > 4 || borderType != BORDER_DEFAULT)
return false;
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
if (depth == CV_64F && !doubleSupport)
return false;
Size ssize = _src.size();
if ((_dsz.area() != 0) && (_dsz != Size(ssize.width * 2, ssize.height * 2)))
return false;
UMat src = _src.getUMat();
Size dsize = Size(ssize.width * 2, ssize.height * 2);
_dst.create( dsize, src.type() );
UMat dst = _dst.getUMat();
int float_depth = depth == CV_64F ? CV_64F : CV_32F;
const int local_size = 16;
char cvt[2][50];
String buildOptions = format(
"-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
"-D T1=%s -D cn=%d -D LOCAL_SIZE=%d",
ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, channels)),
ocl::convertTypeStr(float_depth, depth, channels, cvt[0]),
ocl::convertTypeStr(depth, float_depth, channels, cvt[1]),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
ocl::typeToStr(depth), channels, local_size
);
size_t globalThreads[2] = { dst.cols, dst.rows };
size_t localThreads[2] = { local_size, local_size };
ocl::Kernel k;
if (ocl::Device::getDefault().isIntel() && channels == 1)
{
k.create("pyrUp_unrolled", ocl::imgproc::pyr_up_oclsrc, buildOptions);
globalThreads[0] = dst.cols/2; globalThreads[1] = dst.rows/2;
}
else
k.create("pyrUp", ocl::imgproc::pyr_up_oclsrc, buildOptions);
if (k.empty())
return false;
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:Asafadari,项目名称:opencv,代码行数:49,代码来源:pyramids.cpp
示例12: ocl_pyrDown
static bool ocl_pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType)
{
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0;
if (cn > 4 || (depth == CV_64F && !doubleSupport))
return false;
Size ssize = _src.size();
Size dsize = _dsz.area() == 0 ? Size((ssize.width + 1) / 2, (ssize.height + 1) / 2) : _dsz;
if (dsize.height < 2 || dsize.width < 2)
return false;
CV_Assert( ssize.width > 0 && ssize.height > 0 &&
std::abs(dsize.width*2 - ssize.width) <= 2 &&
std::abs(dsize.height*2 - ssize.height) <= 2 );
UMat src = _src.getUMat();
_dst.create( dsize, src.type() );
UMat dst = _dst.getUMat();
int float_depth = depth == CV_64F ? CV_64F : CV_32F;
const int local_size = 256;
int kercn = 1;
if (depth == CV_8U && float_depth == CV_32F && cn == 1 && ocl::Device::getDefault().isIntel())
kercn = 4;
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", "BORDER_WRAP",
"BORDER_REFLECT_101" };
char cvt[2][50];
String buildOptions = format(
"-D T=%s -D FT=%s -D convertToT=%s -D convertToFT=%s%s "
"-D T1=%s -D cn=%d -D kercn=%d -D fdepth=%d -D %s -D LOCAL_SIZE=%d",
ocl::typeToStr(type), ocl::typeToStr(CV_MAKETYPE(float_depth, cn)),
ocl::convertTypeStr(float_depth, depth, cn, cvt[0]),
ocl::convertTypeStr(depth, float_depth, cn, cvt[1]),
doubleSupport ? " -D DOUBLE_SUPPORT" : "", ocl::typeToStr(depth),
cn, kercn, float_depth, borderMap[borderType], local_size
);
ocl::Kernel k("pyrDown", ocl::imgproc::pyr_down_oclsrc, buildOptions);
if (k.empty())
return false;
k.args(ocl::KernelArg::ReadOnly(src), ocl::KernelArg::WriteOnly(dst));
size_t localThreads[2] = { local_size/kercn, 1 };
size_t globalThreads[2] = { (src.cols + (kercn-1))/kercn, (dst.rows + 1) / 2 };
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:Asafadari,项目名称:opencv,代码行数:48,代码来源:pyramids.cpp
示例13: libopencv_
static void libopencv_(Main_opencvMat2torch)(CvMat *source, THTensor *dest) {
int mat_step;
CvSize mat_size;
THTensor *tensor;
// type dependent variables
float * data_32F;
float * data_32Fp;
double * data_64F;
double * data_64Fp;
uchar * data_8U;
uchar * data_8Up;
char * data_8S;
char * data_8Sp;
unsigned int * data_16U;
unsigned int * data_16Up;
short * data_16S;
short * data_16Sp;
switch (CV_MAT_DEPTH(source->type))
{
case CV_32F:
cvGetRawData(source, (uchar**)&data_32F, &mat_step, &mat_size);
// Resize target
THTensor_(resize3d)(dest, 1, source->rows, source->cols);
tensor = THTensor_(newContiguous)(dest);
data_32Fp = data_32F;
// copy
TH_TENSOR_APPLY(real, tensor,
*tensor_data = ((real)(*data_32Fp));
// step through channels of ipl
data_32Fp++;
);
THTensor_(free)(tensor);
break;
case CV_64F:
cvGetRawData(source, (uchar**)&data_64F, &mat_step, &mat_size);
// Resize target
THTensor_(resize3d)(dest, 1, source->rows, source->cols);
tensor = THTensor_(newContiguous)(dest);
data_64Fp = data_64F;
// copy
TH_TENSOR_APPLY(real, tensor,
*tensor_data = ((real)(*data_64Fp));
// step through channels of ipl
data_64Fp++;
);
开发者ID:marcoscoffier,项目名称:lua---opencv,代码行数:47,代码来源:opencv.c
示例14: CV_MAT_DEPTH
void FilterBase::apply(cv::InputArray _src, cv::OutputArray _dst, const int &ddepth){
int stype = _src.type();
int dcn = _src.channels();
int depth = CV_MAT_DEPTH(stype);
if (0 <= ddepth)
depth = ddepth;
Mat src, dst;
src = _src.getMat();
Size sz = src.size();
_dst.create(sz, CV_MAKETYPE(depth, dcn));
dst = _dst.getMat();
int imageWidth = src.rows;
int imageHeight = src.cols;
Mat srcChannels[3];
split(src, srcChannels);
int margineWidth = kernel.cols / 2;
int margineHeight = kernel.rows / 2;
double kernelElemCount = (double)(kernel.cols * kernel.rows);
for(int ch = 0; ch < dcn; ++ch){
for(int y = 0; y < imageHeight; ++y){
Vec3d *ptr = dst.ptr<Vec3d>(y);
for(int x = 0; x < imageWidth; ++x){
if (isEdge(x, y, imageWidth, imageHeight, margineWidth, margineWidth)){
ptr[x][ch]
= calcKernelOutputAtEdge(srcChannels[ch],
kernel, x, y,
imageWidth, imageHeight,
margineWidth, margineHeight);
}else{
ptr[x][ch]
= calcKernelOutput(srcChannels[ch],
kernel, x, y,
margineWidth, margineHeight,
kernelElemCount);
}
}
}
}
}
开发者ID:YoshiProton,项目名称:lets_enjoy_cv_programing,代码行数:47,代码来源:filter.cpp
示例15: CV_MAT_DEPTH
void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
int ksize, double scale, double delta, int borderType )
{
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
if (ddepth < 0)
ddepth = sdepth;
int dtype = CV_MAKE_TYPE(ddepth, cn);
_dst.create( _src.size(), dtype );
#ifdef HAVE_TEGRA_OPTIMIZATION
if (scale == 1.0 && delta == 0)
{
Mat src = _src.getMat(), dst = _dst.getMat();
if (ksize == 3 && tegra::sobel3x3(src, dst, dx, dy, borderType))
return;
if (ksize == -1 && tegra::scharr(src, dst, dx, dy, borderType))
return;
}
#endif
#ifdef HAVE_IPP
if (ksize < 0)
{
if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType))
return;
}
else if (0 < ksize)
{
if (IPPDerivSobel(_src, _dst, ddepth, dx, dy, ksize, scale, delta, borderType))
return;
}
#endif
int ktype = std::max(CV_32F, std::max(ddepth, sdepth));
Mat kx, ky;
getDerivKernels( kx, ky, dx, dy, ksize, false, ktype );
if( scale != 1 )
{
// usually the smoothing part is the slowest to compute,
// so try to scale it instead of the faster differenciating part
if( dx == 0 )
kx *= scale;
else
ky *= scale;
}
sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
}
开发者ID:Asafadari,项目名称:opencv,代码行数:47,代码来源:deriv.cpp
示例16: copyTo
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
{
if( _mask.empty() )
{
copyTo(_dst);
return;
}
#ifdef HAVE_OPENCL
int cn = channels(), mtype = _mask.type(), mdepth = CV_MAT_DEPTH(mtype), mcn = CV_MAT_CN(mtype);
CV_Assert( mdepth == CV_8U && (mcn == 1 || mcn == cn) );
if (ocl::useOpenCL() && _dst.isUMat() && dims <= 2)
{
UMatData * prevu = _dst.getUMat().u;
_dst.create( dims, size, type() );
UMat dst = _dst.getUMat();
bool haveDstUninit = false;
if( prevu != dst.u ) // do not leave dst uninitialized
haveDstUninit = true;
String opts = format("-D COPY_TO_MASK -D T1=%s -D scn=%d -D mcn=%d%s",
ocl::memopTypeToStr(depth()), cn, mcn,
haveDstUninit ? " -D HAVE_DST_UNINIT" : "");
ocl::Kernel k("copyToMask", ocl::core::copyset_oclsrc, opts);
if (!k.empty())
{
k.args(ocl::KernelArg::ReadOnlyNoSize(*this),
ocl::KernelArg::ReadOnlyNoSize(_mask.getUMat()),
haveDstUninit ? ocl::KernelArg::WriteOnly(dst) :
ocl::KernelArg::ReadWrite(dst));
size_t globalsize[2] = { cols, rows };
if (k.run(2, globalsize, NULL, false))
{
CV_IMPL_ADD(CV_IMPL_OCL);
return;
}
}
}
#endif
Mat src = getMat(ACCESS_READ);
src.copyTo(_dst, _mask);
}
开发者ID:165-goethals,项目名称:opencv,代码行数:46,代码来源:umatrix.cpp
示例17: ocl_norm
static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & result )
{
const ocl::Device & d = ocl::Device::getDefault();
#ifdef __ANDROID__
if (d.isNVidia())
return false;
#endif
const int cn = _src.channels();
if (cn > 4)
return false;
int type = _src.type(), depth = CV_MAT_DEPTH(type);
bool doubleSupport = d.doubleFPConfig() > 0,
haveMask = _mask.kind() != _InputArray::NONE;
if ( !(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) ||
(!doubleSupport && depth == CV_64F))
return false;
UMat src = _src.getUMat();
if (normType == NORM_INF)
{
if (!ocl_minMaxIdx(_src, NULL, &result, NULL, NULL, _mask,
std::max(depth, CV_32S), depth != CV_8U && depth != CV_16U))
return false;
}
else if (normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR)
{
Scalar sc;
bool unstype = depth == CV_8U || depth == CV_16U;
if ( !ocl_sum(haveMask ? src : src.reshape(1), sc, normType == NORM_L2 || normType == NORM_L2SQR ?
OCL_OP_SUM_SQR : (unstype ? OCL_OP_SUM : OCL_OP_SUM_ABS), _mask) )
return false;
double s = 0.0;
for (int i = 0; i < (haveMask ? cn : 1); ++i)
s += sc[i];
result = normType == NORM_L1 || normType == NORM_L2SQR ? s : std::sqrt(s);
}
return true;
}
开发者ID:adamrankin,项目名称:opencv,代码行数:45,代码来源:norm.cpp
示例18: cvUpdateMotionHistory
/* motion templates */
CV_IMPL void
cvUpdateMotionHistory( const void* silhouette, void* mhimg,
double timestamp, double mhi_duration )
{
CvSize size;
CvMat silhstub, *silh = (CvMat*)silhouette;
CvMat mhistub, *mhi = (CvMat*)mhimg;
int mhi_step, silh_step;
CV_FUNCNAME( "cvUpdateMHIByTime" );
__BEGIN__;
CV_CALL( silh = cvGetMat( silh, &silhstub ));
CV_CALL( mhi = cvGetMat( mhi, &mhistub ));
if( !CV_IS_MASK_ARR( silh ))
CV_ERROR( CV_StsBadMask, "" );
if( CV_MAT_CN( mhi->type ) > 1 )
CV_ERROR( CV_BadNumChannels, "" );
if( CV_MAT_DEPTH( mhi->type ) != CV_32F )
CV_ERROR( CV_BadDepth, "" );
if( !CV_ARE_SIZES_EQ( mhi, silh ))
CV_ERROR( CV_StsUnmatchedSizes, "" );
size = cvGetMatSize( mhi );
mhi_step = mhi->step;
silh_step = silh->step;
if( CV_IS_MAT_CONT( mhi->type & silh->type ))
{
size.width *= size.height;
mhi_step = silh_step = CV_STUB_STEP;
size.height = 1;
}
IPPI_CALL( icvUpdateMotionHistory_8u32f_C1IR( (const uchar*)(silh->data.ptr), silh_step,
mhi->data.fl, mhi_step, size,
(float)timestamp, (float)mhi_duration ));
__END__;
}
开发者ID:allanca,项目名称:otterdive,代码行数:46,代码来源:cvmotempl.cpp
示例19: OCL_PERF_TEST_P
OCL_PERF_TEST_P(ConvertToFixture, ConvertTo,
::testing::Combine(OCL_TEST_SIZES, OCL_TEST_TYPES))
{
const Size_MatType_t params = GetParam();
const Size srcSize = get<0>(params);
const int type = get<1>(params), ddepth = CV_MAT_DEPTH(type) == CV_8U ? CV_32F : CV_8U,
cn = CV_MAT_CN(type), dtype = CV_MAKE_TYPE(ddepth, cn);
checkDeviceMaxMemoryAllocSize(srcSize, type);
checkDeviceMaxMemoryAllocSize(srcSize, dtype);
UMat src(srcSize, type), dst(srcSize, dtype);
declare.in(src, WARMUP_RNG).out(dst);
OCL_TEST_CYCLE() src.convertTo(dst, dtype);
SANITY_CHECK(dst);
}
开发者ID:AGAT172,项目名称:opencv,代码行数:18,代码来源:perf_matop.cpp
示例20: static_assert
void LBSP::calcDescImgDiff(const cv::Mat& oDesc1, const cv::Mat& oDesc2, cv::Mat& oOutput, bool bForceMergeChannels) {
static_assert(LBSP::DESC_SIZE_BITS<=UCHAR_MAX,"bad assumptions in impl below");
static_assert(LBSP::DESC_SIZE==2,"bad assumptions in impl below");
CV_DbgAssert(oDesc1.size()==oDesc2.size() && oDesc1.type()==oDesc2.type());
CV_DbgAssert(oDesc1.type()==CV_16UC1 || oDesc1.type()==CV_16UC3);
CV_DbgAssert(CV_MAT_DEPTH(oDesc1.type())==CV_16U);
CV_DbgAssert(oDesc1.step.p[0]==oDesc2.step.p[0] && oDesc1.step.p[1]==oDesc2.step.p[1]);
const float fScaleFactor = (float)UCHAR_MAX/(LBSP::DESC_SIZE_BITS);
const size_t nChannels = CV_MAT_CN(oDesc1.type());
const size_t _step_row = oDesc1.step.p[0];
if(nChannels==1) {
oOutput.create(oDesc1.size(),CV_8UC1);
oOutput = cv::Scalar(0);
for(int i=0; i<oDesc1.rows; ++i) {
const size_t idx = _step_row*i;
const ushort* const desc1_ptr = (ushort*)(oDesc1.data+idx);
const ushort* const desc2_ptr = (ushort*)(oDesc2.data+idx);
for(int j=0; j<oDesc1.cols; ++j)
oOutput.at<uchar>(i,j) = (uchar)(fScaleFactor*DistanceUtils::hdist(desc1_ptr[j],desc2_ptr[j]));
}
}
else { //nChannels==3
if(bForceMergeChannels)
oOutput.create(oDesc1.size(),CV_8UC1);
else
oOutput.create(oDesc1.size(),CV_8UC3);
oOutput = cv::Scalar::all(0);
for(int i=0; i<oDesc1.rows; ++i) {
const size_t idx = _step_row*i;
const ushort* const desc1_ptr = (ushort*)(oDesc1.data+idx);
const ushort* const desc2_ptr = (ushort*)(oDesc2.data+idx);
uchar* output_ptr = oOutput.data + oOutput.step.p[0]*i;
for(int j=0; j<oDesc1.cols; ++j) {
for(size_t n=0;n<3; ++n) {
const size_t idx2 = 3*j+n;
if(bForceMergeChannels)
output_ptr[j] += (uchar)((fScaleFactor*DistanceUtils::hdist(desc1_ptr[idx2],desc2_ptr[idx2]))/3);
else
output_ptr[idx2] = (uchar)(fScaleFactor*DistanceUtils::hdist(desc1_ptr[idx2],desc2_ptr[idx2]));
}
}
}
}
}
开发者ID:caomw,项目名称:litiv,代码行数:44,代码来源:LBSP.cpp
注:本文中的CV_MAT_DEPTH函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论