本文整理汇总了C++中cv::InputArray类的典型用法代码示例。如果您正苦于以下问题:C++ InputArray类的具体用法?C++ InputArray怎么用?C++ InputArray使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了InputArray类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: transform
static bool transform(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _lut,
const int tilesX, const int tilesY, const cv::Size & tileSize)
{
cv::ocl::Kernel k("transform", cv::ocl::imgproc::clahe_oclsrc);
if(k.empty())
return false;
int tile_size[2];
tile_size[0] = tileSize.width;
tile_size[1] = tileSize.height;
cv::UMat src = _src.getUMat();
_dst.create(src.size(), src.type());
cv::UMat dst = _dst.getUMat();
cv::UMat lut = _lut.getUMat();
size_t localThreads[3] = { 32, 8, 1 };
size_t globalThreads[3] = { src.cols, src.rows, 1 };
int idx = 0;
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src));
idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst));
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(lut));
idx = k.set(idx, src.cols);
idx = k.set(idx, src.rows);
idx = k.set(idx, tile_size);
idx = k.set(idx, tilesX);
k.set(idx, tilesY);
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:112000,项目名称:opencv,代码行数:32,代码来源:clahe.cpp
示例2: d
inline void bs::pattern::shiftPattern(cv::InputArray src, cv::OutputArray dst, const int direction, const int shift)
{
cv::Mat d;
cv::copyMakeBorder(src, d, shift, shift, shift, shift, cv::BORDER_WRAP);
const auto direc = static_cast<Direction>(direction);
switch (direc)
{
default:
break;
case Top:
d = d(cv::Rect(cv::Point(shift, 2 * shift), src.size()));
break;
case Bottom:
d = d(cv::Rect(cv::Point(shift, 0), src.size()));
break;
case Left:
d = d(cv::Rect(cv::Point(2 * shift, shift), src.size()));
break;
case Right:
d = d(cv::Rect(cv::Point(0, shift), src.size()));
break;
}
d.copyTo(dst);
return;
}
开发者ID:sippu0722,项目名称:my_libraries,代码行数:30,代码来源:imgproc.hpp
示例3:
FilterCall::FilterCall(cv::InputArray in, cv::InputArray out,
impl::CallMetaData data, QString type,
QString description, QString requestedView)
: Call{ data, std::move(type),
std::move(description), std::move(requestedView) },
input_{ in.getMat().clone() }, output_{ out.getMat().clone() }
{
}
开发者ID:AWin9,项目名称:opencv_contrib,代码行数:8,代码来源:filter_call.cpp
示例4: sadTemplate
void sadTemplate(cv::InputArray tar, cv::InputArray tmp, cv::OutputArray res, int *minx, int *miny){
//引数の入力をMatとして受け取る
cv::Mat tarM = tar.getMat();
cv::Mat tmpM = tmp.getMat();
cv::Mat resM = res.getMat();
//sadが最小値のところがマッチングしたい箇所なので
int minsad = std::numeric_limits<int>::max();
int sad = 0; //各回のsadを格納
int diff; //sadに加算する前の作業変数
int tarx,tary; //目的のxy座標
for(int y=0;y<tarM.rows - tmpM.rows;y++){
for(int x=0;x<tarM.cols - tmpM.cols;x++){
sad = 0; //次の領域の計算の前に初期化
//探索
for(int yt = 0; yt < tmpM.rows; yt++){
for(int xt = 0; xt < tmpM.cols; xt++){
diff = (int)(tarM.at<uchar>(y+yt,x+xt) - tmpM.at<uchar>(yt,xt));
if(diff < 0){ //負なら正に変換
diff = -diff;
}
sad += diff;
////残差逐次検定法
if(sad > minsad){
yt = tmpM.rows;
break;
}
}
}
//探索結果:sadが今までで最小なら
if(sad < minsad){
minsad = sad; //最小値を更新
//目的のxyを格納
tarx = x;
tary = y;
}
}
}
//outputに出力
for(int y=0;y<resM.rows;y++){
for(int x=0;x<resM.cols;x++){
if(x==tarx && y==tary){
resM.at<uchar>(y,x) = (uchar)0;
}else{
resM.at<uchar>(y,x) = (uchar)255;
}
}
}
std::cout << "最小値=" << minsad << std::endl;
std::cout << "最小点=[" << tarx << ", " << tary << "]" << std::endl;
*minx = tarx;
*miny = tary;
}
开发者ID:HisatakaSuzuki,项目名称:ImageRecognition,代码行数:57,代码来源:main02.cpp
示例5: warmify
void warmify(cv::InputArray src, cv::OutputArray dst, uchar delta)
{
CV_Assert(src.type() == CV_8UC3);
Mat imgSrc = src.getMat();
CV_Assert(imgSrc.data);
dst.create(src.size(), CV_8UC3);
Mat imgDst = dst.getMat();
imgDst = imgSrc + Scalar(0, delta, delta);
}
开发者ID:ArtemSkrebkov,项目名称:opencv_contrib,代码行数:10,代码来源:warmify.cpp
示例6: stereoMatching
void stereo::stereoMatching(cv::InputArray _recImage1, cv::InputArray _recIamge2, cv::OutputArray _disparityMap, int minDisparity, int numDisparities, int SADWindowSize, int P1, int P2)
{
Mat img1 = _recImage1.getMat();
Mat img2 = _recIamge2.getMat();
_disparityMap.create(img1.size(), CV_16S);
Mat dis = _disparityMap.getMat();
StereoSGBM matcher(minDisparity, numDisparities, SADWindowSize, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
开发者ID:caomw,项目名称:stereo_matching,代码行数:10,代码来源:stereo.cpp
示例7: testFunction
void testFunction(cv::InputArray ip, cv::InputArray op) {
cv::Mat img = ip.getMat();
cv::Mat obj = op.getMat();
printMatrix(img);
printMatrix(obj);
// std::cerr<<img.checkVector()<<std::endl;
}
开发者ID:NBXApp,项目名称:vision-of-movement,代码行数:10,代码来源:visualOdometry.cpp
示例8: stereoRectify
void stereo::stereoRectify(cv::InputArray _K1, cv::InputArray _K2, cv::InputArray _R, cv::InputArray _T,
cv::OutputArray _R1, cv::OutputArray _R2, cv::OutputArray _P1, cv::OutputArray _P2)
{
Mat K1 = _K1.getMat(), K2 = _K2.getMat(), R = _R.getMat(), T = _T.getMat();
_R1.create(3, 3, CV_32F);
_R2.create(3, 3, CV_32F);
Mat R1 = _R1.getMat();
Mat R2 = _R2.getMat();
_P1.create(3, 4, CV_32F);
_P2.create(3, 4, CV_32F);
Mat P1 = _P1.getMat();
Mat P2 = _P2.getMat();
if(K1.type()!=CV_32F)
K1.convertTo(K1,CV_32F);
if(K2.type()!=CV_32F)
K2.convertTo(K2,CV_32F);
if(R.type()!=CV_32F)
R.convertTo(R,CV_32F);
if(T.type()!=CV_32F)
T.convertTo(T,CV_32F);
if(T.rows != 3)
T = T.t();
// R and T is the transformation from the first to the second camera
// Get the transformation from the second to the first camera
Mat R_inv = R.t();
Mat T_inv = -R.t()*T;
Mat e1, e2, e3;
e1 = T_inv.t() / norm(T_inv);
/*Mat z = (Mat_<float>(1, 3) << 0.0,0.0,-1.0);
e2 = e1.cross(z);
e2 = e2 / norm(e2);*/
e2 = (Mat_<float>(1,3) << T_inv.at<float>(1)*-1, T_inv.at<float>(0), 0.0 );
e2 = e2 / (sqrt(e2.at<float>(0)*e2.at<float>(0) + e2.at<float>(1)*e2.at<float>(1)));
e3 = e1.cross(e2);
e3 = e3 / norm(e3);
e1.copyTo(R1.row(0));
e2.copyTo(R1.row(1));
e3.copyTo(R1.row(2));
R2 = R_inv * R1;
P1.setTo(Scalar(0));
R1.copyTo(P1.colRange(0, 3));
P1 = K1 * P1;
P2.setTo(Scalar(0));
R2.copyTo(P2.colRange(0, 3));
P2 = K2 * P2;
}
开发者ID:caomw,项目名称:stereo_matching,代码行数:52,代码来源:stereo.cpp
示例9: SLICSegment2Vector3D_
void SLICSegment2Vector3D_(cv::InputArray segment, cv::InputArray signal, double invalidValue, std::vector<std::vector<cv::Point3_<S>>>& segmentPoint)
{
double minv, maxv;
minMaxLoc(segment, &minv, &maxv);
segmentPoint.clear();
segmentPoint.resize((int)maxv + 1);
if (signal.depth() == CV_8U) _SLICSegment2Vector3D_<uchar, S>(segment, signal, (uchar)invalidValue, segmentPoint);
else if (signal.depth() == CV_16S) _SLICSegment2Vector3D_<short, S>(segment, signal, (short)invalidValue, segmentPoint);
else if (signal.depth() == CV_16U) _SLICSegment2Vector3D_<ushort, S>(segment, signal, (ushort)invalidValue, segmentPoint);
else if (signal.depth() == CV_32S) _SLICSegment2Vector3D_<int, S>(segment, signal, (int)invalidValue, segmentPoint);
else if (signal.depth() == CV_32F) _SLICSegment2Vector3D_<float, S>(segment, signal, (float)invalidValue, segmentPoint);
else if (signal.depth() == CV_64F) _SLICSegment2Vector3D_<double, S>(segment, signal, (double)invalidValue, segmentPoint);
}
开发者ID:norishigefukushima,项目名称:OpenCP,代码行数:13,代码来源:disparityFitPlane.cpp
示例10: estimate_rigid_transform
cv::Mat flutter::estimate_rigid_transform(cv::InputArray src1, cv::InputArray src2,
double ransac_good_ratio, double ransac_threshold)
{
Mat M(2, 3, CV_64F), A = src1.getMat(), B = src2.getMat();
CvMat matA = A, matB = B, matM = M;
int err = estimate_rigid_transform_detail(&matA, &matB, &matM,
ransac_good_ratio, ransac_threshold);
if (err == 1) {
return M;
} else {
return Mat();
}
}
开发者ID:jlep,项目名称:flutter,代码行数:13,代码来源:registration.cpp
示例11: StereoMatching
void StereoMatch::StereoMatching(cv::InputArray rec_image1, cv::InputArray rec_image2,
cv::OutputArray disparity_map, int min_disparity, int num_disparities, int SAD_window_size,
int P1, int P2)
{
cv::Mat img1 = rec_image1.getMat();
cv::Mat img2 = rec_image2.getMat();
disparity_map.create(img1.size(), CV_16S);
cv::Mat dis = disparity_map.getMat();
cv::StereoSGBM matcher(min_disparity, num_disparities, SAD_window_size, P1, P2);
matcher(img1, img2, dis);
dis = dis / 16.0;
}
开发者ID:rpankka,项目名称:Stereo,代码行数:13,代码来源:stereo_match.cpp
示例12: check_transform_quality
void CV_HomographyTest::check_transform_quality(cv::InputArray src_points, cv::InputArray dst_points, const cv::Mat& H, const int norm_type)
{
Mat src, dst_original;
cv::transpose(src_points.getMat(), src); cv::transpose(dst_points.getMat(), dst_original);
cv::Mat src_3d(src.rows+1, src.cols, CV_32FC1);
src_3d(Rect(0, 0, src.rows, src.cols)) = src;
src_3d(Rect(src.rows, 0, 1, src.cols)) = Mat(1, src.cols, CV_32FC1, Scalar(1.0f));
cv::Mat dst_found, dst_found_3d;
cv::multiply(H, src_3d, dst_found_3d);
dst_found = dst_found_3d/dst_found_3d.row(dst_found_3d.rows-1);
double reprojection_error = cv::norm(dst_original, dst_found, norm_type);
CV_Assert ( reprojection_error > max_diff );
}
开发者ID:heroacool,项目名称:OpenCVMirror,代码行数:14,代码来源:test_homography.cpp
示例13: write
void Regression::write(cv::InputArray array)
{
write() << "kind" << array.kind();
write() << "type" << array.type();
if (isVector(array))
{
int total = (int)array.total();
int idx = regRNG.uniform(0, total);
write() << "len" << total;
write() << "idx" << idx;
cv::Mat m = array.getMat(idx);
if (m.total() * m.channels() < 26) //5x5 or smaller
write() << "val" << m;
else
write(m);
}
else
{
if (array.total() * array.channels() < 26) //5x5 or smaller
write() << "val" << array.getMat();
else
write(array.getMat());
}
}
开发者ID:Belial2010,项目名称:opencv,代码行数:26,代码来源:ts_perf.cpp
示例14: calculate
FeatureValue FeatureShiCorner::calculate( cv::InputArray image )
{
Mat image_gray;
cvtColor( image, image_gray, CV_BGR2GRAY );
Mat corner( image.rows(), image.cols(), CV_32SC1, Scalar( 0 ) );
computeRawCornerMat( image_gray, corner );
auto points = genPoints( corner );
// return genDescriptor;
return FeatureValue();
}
开发者ID:CDanU,项目名称:CV_ObjectClassification,代码行数:14,代码来源:FeatureShiCorner.cpp
示例15: ReduceRowByMost
void Tools::ReduceRowByMost(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask) {
cv::Mat src = _src.getMat();
_dst.create(1, src.cols, CV_8UC1);
cv::Mat dst = _dst.getMat();
cv::Mat mask = _mask.getMat();
if (src.depth() != CV_8U || mask.depth() != CV_8U) {
throw "TYPE DON'T SUPPORT";
}
int i, j, addr;
cv::Size size = src.size();
uchar *srcd = src.data;
uchar *dstd = dst.data;
uchar *maskd = mask.data;
std::map<uchar, int> m;
std::map<uchar, int>::iterator p;
uchar mostVal;
uchar rVal;
int r;
for (i = 0;i < size.width;++i) {
m.clear();
mostVal = 0;
for (j = 0;j < size.height;++j) {
addr = j*size.width + i;
if (!maskd[addr])
continue;
for (r = -2;r <= 2;++r) {
rVal = (uchar)(((int)srcd[addr] + r) % 180);
if (m.find(rVal) != m.end()) {
m[rVal]++;
}
else {
m[rVal] = 1;
}
}
}
if (m.size() == 0) {
dstd[i] = 0;
continue;
}
mostVal = m.begin()->first;
for (p = m.begin();p != m.end();++p) {
if (p->second > m[mostVal]) {
mostVal = p->first;
}
}
dstd[i] = mostVal;
}
}
开发者ID:glfpes,项目名称:VLP,代码行数:48,代码来源:Tools.cpp
示例16: train
// Computes an Lidfaces model with images in src and corresponding labels
// in labels.
void Lidfaces::train(cv::InputArrayOfArrays src, cv::InputArray labels)
{
std::vector<std::vector<cv::KeyPoint> > allKeyPoints;
cv::Mat descriptors;
// Get SIFT keypoints and LID descriptors
detectKeypointsAndDescriptors(src, allKeyPoints, descriptors);
// kmeans function requires points to be CV_32F
descriptors.convertTo(descriptors, CV_32FC1);
// Do k-means clustering
const int CLUSTER_COUNT = params::lidFace::clustersAsPercentageOfKeypoints*descriptors.rows;
cv::Mat histogramLabels;
// This function populates histogram bin labels
// The nth element of histogramLabels is an integer which represents the cluster that the
// nth element of allKeyPoints is a member of.
kmeans(
descriptors, // The points we are clustering are the descriptors
CLUSTER_COUNT, // The number of clusters (K)
histogramLabels, // The label of the corresponding keypoint
params::kmeans::termCriteria,
params::kmeans::attempts,
params::kmeans::flags,
mCenters);
// Convert to single channel 32 bit float as the matrix needs to be in a form supported
// by calcHist
histogramLabels.convertTo(histogramLabels, CV_32FC1);
// We end up with a histogram for each image
const size_t NUM_IMAGES = getSize(src);
std::vector<cv::Mat> hists(NUM_IMAGES);
// mCodebook.resize(NUM_IMAGES);
// The histogramLabels vector contains ALL the points from EVERY image. We need to split
// it up into groups of points for each image.
// Because there are the same number of points in each image, and the points were put
// into the labels vector in order, we can simply divide the labels vector evenly to get
// the individual image's points.
std::vector<cv::Mat> separatedLabels;
for (unsigned int i = 0, startRow = 0; i < NUM_IMAGES; ++i)
{
separatedLabels.push_back(
histogramLabels.rowRange(
startRow,
startRow + allKeyPoints[i].size()));
startRow += allKeyPoints[i].size();
}
// Populate the hists vector
generateHistograms(hists, separatedLabels, CLUSTER_COUNT);
// Make the magnitude of each histogram equal to 1
normalizeHistograms(hists);
mCodebook = hists;
mLabels = labels.getMat();
}
开发者ID:anubhavrohatgi,项目名称:Face-Recognition-and-Detection-Analysis,代码行数:62,代码来源:lid.cpp
示例17: directMap
void RadiometricResponse::directMap(cv::InputArray _E, cv::OutputArray _I) const {
if (_E.empty()) {
_I.clear();
return;
}
auto E = _E.getMat();
_I.create(_E.size(), CV_8UC3);
auto I = _I.getMat();
#if CV_MAJOR_VERSION > 2
E.forEach<cv::Vec3f>(
[&I, this](cv::Vec3f& v, const int* p) { I.at<cv::Vec3b>(p[0], p[1]) = inverseLUT(response_channels_, v); });
#else
for (int i = 0; i < E.rows; i++)
for (int j = 0; j < E.cols; j++) I.at<cv::Vec3b>(i, j) = inverseLUT(response_channels_, E.at<cv::Vec3f>(i, j));
#endif
}
开发者ID:caomw,项目名称:radical,代码行数:16,代码来源:radiometric_response.cpp
示例18: calcLut
static bool calcLut(cv::InputArray _src, cv::OutputArray _dst,
const int tilesX, const int tilesY, const cv::Size tileSize,
const int clipLimit, const float lutScale)
{
cv::ocl::Kernel k("calcLut", cv::ocl::imgproc::clahe_oclsrc);
if(k.empty())
return false;
cv::UMat src = _src.getUMat();
_dst.create(tilesX * tilesY, 256, CV_8UC1);
cv::UMat dst = _dst.getUMat();
int tile_size[2];
tile_size[0] = tileSize.width;
tile_size[1] = tileSize.height;
size_t localThreads[3] = { 32, 8, 1 };
size_t globalThreads[3] = { tilesX * localThreads[0], tilesY * localThreads[1], 1 };
int idx = 0;
idx = k.set(idx, cv::ocl::KernelArg::ReadOnlyNoSize(src));
idx = k.set(idx, cv::ocl::KernelArg::WriteOnlyNoSize(dst));
idx = k.set(idx, tile_size);
idx = k.set(idx, tilesX);
idx = k.set(idx, clipLimit);
k.set(idx, lutScale);
return k.run(2, globalThreads, localThreads, false);
}
开发者ID:ArkaJU,项目名称:opencv,代码行数:29,代码来源:clahe.cpp
示例19: boostColor
int boostColor(cv::InputArray src, cv::OutputArray dst, float intensity)
{
const int MAX_INTENSITY = 255;
Mat srcImg = src.getMat();
CV_Assert(srcImg.channels() == 3);
CV_Assert(intensity >= 0.0f && intensity <= 1.0f);
if (srcImg.type() != CV_8UC3)
{
srcImg.convertTo(srcImg, CV_8UC3);
}
Mat srcHls;
cvtColor(srcImg, srcHls, CV_BGR2HLS);
int intensityInt = intensity * MAX_INTENSITY;
srcHls += Scalar(0, 0, intensityInt);
cvtColor(srcHls, dst, CV_HLS2BGR);
dst.getMat().convertTo(dst, srcImg.type());
return 0;
}
开发者ID:ArtemSkrebkov,项目名称:itlab-vision,代码行数:25,代码来源:boostColor.cpp
示例20: CannyThreshold
void StairDetection::CannyThreshold(cv::InputArray image, cv::Mat &edges) {
if (image.channels() > 1) {
/// Convert the image to grayscale
cvtColor(image, edges, CV_RGB2GRAY);
/// Reduce noise with a kernel 3x3
blur(edges, edges, cv::Size(3, 3));
}
else {
/// src is already a grayscale image.
edges = image.getMat().clone();
}
/// Canny detector
Canny(edges, edges, cannyLowThreshold, cannyLowThreshold*cannyRatio, cannyKernelSize);
}
开发者ID:kaholau,项目名称:ACH1_FYP_OpenNI_Kinect,代码行数:16,代码来源:StairDetection.cpp
注:本文中的cv::InputArray类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论