本文整理汇总了C++中cv::Mat类的典型用法代码示例。如果您正苦于以下问题:C++ Mat类的具体用法?C++ Mat怎么用?C++ Mat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Mat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: QDialog
PowertfDialog::PowertfDialog(cv::Mat& img, QWidget *parent)
: QDialog(parent)
{
r = 1;
c = 1;
b = 0;
img.copyTo(image);
pimage = &img;
rSlider = new QSlider(Qt::Horizontal);
rSlider->setRange(0,10);
rSlider->setValue(r);
cSlider = new QSlider(Qt::Horizontal);
cSlider->setRange(0,10);
cSlider->setValue(c);
bSlider = new QSlider(Qt::Horizontal);
bSlider->setRange(0,10);
bSlider->setValue(b);
rSBx = new QSpinBox();
rSBx->setRange(0,10);
rSBx->setValue(r);
cSBx = new QSpinBox();
cSBx->setRange(0,10);
cSBx->setValue(c);
bSBx = new QSpinBox();
bSBx->setRange(0,10);
bSBx->setValue(b);
connect(rSlider,SIGNAL(valueChanged(int)),this,SLOT(rChanged(int)));
connect(cSlider,SIGNAL(valueChanged(int)),this,SLOT(cChanged(int)));
connect(bSlider,SIGNAL(valueChanged(int)),this,SLOT(bChanged(int)));
connect(rSlider,SIGNAL(valueChanged(int)),rSBx,SLOT(setValue(int)));
connect(cSlider,SIGNAL(valueChanged(int)),cSBx,SLOT(setValue(int)));
connect(bSlider,SIGNAL(valueChanged(int)),bSBx,SLOT(setValue(int)));
connect(rSBx,SIGNAL(valueChanged(int)),this,SLOT(rChanged(int)));
connect(cSBx,SIGNAL(valueChanged(int)),this,SLOT(cChanged(int)));
connect(bSBx,SIGNAL(valueChanged(int)),this,SLOT(bChanged(int)));
connect(rSBx,SIGNAL(valueChanged(int)),rSlider,SLOT(setValue(int)));
connect(cSBx,SIGNAL(valueChanged(int)),cSlider,SLOT(setValue(int)));
connect(bSBx,SIGNAL(valueChanged(int)),bSlider,SLOT(setValue(int)));
rLabel = new QLabel(tr("r"));
cLabel = new QLabel(tr("c"));
bLabel = new QLabel(tr("b"));
okButton = new QPushButton(tr("&OK"));
okButton->setDefault(true);
okButton->setEnabled(true);
connect(okButton, SIGNAL(clicked()), this, SLOT(okClicked()));
closeButton = new QPushButton(tr("&Close"));
connect(closeButton, SIGNAL(clicked()), this, SLOT(closePowertf()));
QHBoxLayout *rLayout = new QHBoxLayout;
rLayout->addWidget(rLabel);
rLayout->addWidget(rSlider);
rLayout->addWidget(rSBx);
QHBoxLayout *cLayout = new QHBoxLayout;
cLayout->addWidget(cLabel);
cLayout->addWidget(cSlider);
cLayout->addWidget(cSBx);
QHBoxLayout *bLayout = new QHBoxLayout;
bLayout->addWidget(bLabel);
bLayout->addWidget(bSlider);
bLayout->addWidget(bSBx);
QVBoxLayout *leftLayout = new QVBoxLayout;
leftLayout->addLayout(rLayout);
leftLayout->addLayout(cLayout);
leftLayout->addLayout(bLayout);
QVBoxLayout *rightLayout = new QVBoxLayout;
rightLayout->addWidget(okButton);
rightLayout->addWidget(closeButton);
rightLayout->addStretch();
QHBoxLayout *mainLayout = new QHBoxLayout;
mainLayout->addLayout(leftLayout);
mainLayout->addLayout(rightLayout);
setLayout(mainLayout);
setWindowTitle(tr("Power Tranform"));
setFixedHeight(sizeHint().height());
}
开发者ID:JuannyWang,项目名称:opencv_study,代码行数:86,代码来源:powertf_dialog.cpp
示例2: calVDisparity
//general processing function of UVDisparity based function
cv::Mat UVDisparity::Process(cv::Mat& img_L, cv::Mat& disp_sgbm,
VisualOdometryStereo& vo, cv::Mat& xyz,
cv::Mat& roi_mask, cv::Mat& ground_mask,
double& pitch1, double& pitch2)
{
cv::Mat mask_moving;
calVDisparity(disp_sgbm,xyz);
//sequentially estimate pitch angles by Kalman Filter
vector<cv::Mat> pitch_measures;
pitch_measures = Pitch_Classify(xyz,ground_mask);
pitch1_KF->predict();
pitch1_KF->correct(pitch_measures[0]);
pitch2_KF->predict();
pitch2_KF->correct(pitch_measures[1]);
pitch1 = pitch_measures[0].at<float>(0);
pitch2 = pitch_measures[1].at<float>(0);
//Improve 3D reconstruction results by pitch angles
correct3DPoints(xyz,roi_,pitch1_KF->statePost.at<float>(0),pitch2_KF->statePost.at<float>(0));
//set image ROI according to ROI3D (ROI within a 3D space)
setImageROI(xyz, roi_mask);
//filter inliers and outliers
filterInOut(img_L,roi_mask,disp_sgbm,vo,pitch1);
//calculate Udisparity image
calUDisparity(disp_sgbm,xyz,roi_mask,ground_mask);
//using sigmoid function to adjust Udisparity image for segmentation
double scale = 0.02, range = 32;
adjustUdisIntense(scale,range);
//Find all possible segmentation
findAllMasks(vo,img_L,xyz,roi_mask);
if(masks_.size()>0)
{
//merge overlapped masks
mergeMasks();
//improve the segments by inliers
verifyByInliers(vo,img_L);
}
//perform segmentation in disparity image
segmentation(disp_sgbm,img_L,roi_mask,mask_moving);
//demonstration
cv::Mat img_show;
img_L.copyTo(img_show,mask_moving);
cv::imshow("moving",img_show);
cv::waitKey(1);
masks_.clear();
return mask_moving;
}
开发者ID:jfqiu-git,项目名称:keti,代码行数:63,代码来源:uvdisparity.cpp
示例3: dominantTransforms
static void dominantTransforms(const cv::Mat &img, std::vector <cv::Matx33f> &transforms,
const int nTransform, const int psize)
{
/** Walsh-Hadamard Transformation **/
std::vector <cv::Mat> channels;
cv::split(img, channels);
int cncase = std::max(img.channels() - 2, 0);
const int np[] = {cncase == 0 ? 12 : (cncase == 1 ? 16 : 10),
cncase == 0 ? 12 : (cncase == 1 ? 04 : 02),
cncase == 0 ? 00 : (cncase == 1 ? 04 : 02),
cncase == 0 ? 00 : (cncase == 1 ? 00 : 10)};
for (int i = 0; i < img.channels(); ++i)
rgb2whs(channels[i], channels[i], np[i], psize);
cv::Mat whs; // Walsh-Hadamard series
cv::merge(channels, whs);
KDTree <float, 24> kdTree(whs, 16, 32);
std::vector <int> annf( whs.total(), 0 );
/** Propagation-assisted kd-tree search **/
for (int i = 0; i < whs.rows; ++i)
for (int j = 0; j < whs.cols; ++j)
{
double dist = std::numeric_limits <double>::max();
int current = i*whs.cols + j;
int dy[] = {0, 1, 0}, dx[] = {0, 0, 1};
for (int k = 0; k < int( sizeof(dy)/sizeof(int) ); ++k)
if (i - dy[k] >= 0 && j - dx[k] >= 0)
{
int neighbor = (i - dy[k])*whs.cols + (j - dx[k]);
int leafIdx = k == 0 ? neighbor :
annf[neighbor] + dy[k]*whs.cols + dx[k];
kdTree.updateDist(leafIdx, current,
annf[i*whs.cols + j], dist);
}
}
/** Local maxima extraction **/
cv::Mat_<double> annfHist(2*whs.rows - 1, 2*whs.cols - 1, 0.0),
_annfHist(2*whs.rows - 1, 2*whs.cols - 1, 0.0);
for (size_t i = 0; i < annf.size(); ++i)
++annfHist( (annf[i] - int(i))/whs.cols + whs.rows - 1,
(annf[i] - int(i))%whs.cols + whs.cols - 1 );
cv::GaussianBlur( annfHist, annfHist,
cv::Size(0, 0), std::sqrt(2.0), 0.0, cv::BORDER_CONSTANT);
cv::dilate( annfHist, _annfHist,
cv::Matx<uchar, 9, 9>::ones() );
std::vector < std::pair<double, int> > amount;
std::vector <cv::Point2i> shiftM;
for (int i = 0, t = 0; i < annfHist.rows; ++i)
{
double *pAnnfHist = annfHist.ptr<double>(i);
double *_pAnnfHist = _annfHist.ptr<double>(i);
for (int j = 0; j < annfHist.cols; ++j)
if ( pAnnfHist[j] != 0 && pAnnfHist[j] == _pAnnfHist[j] )
{
amount.push_back( std::make_pair(pAnnfHist[j], t++) );
shiftM.push_back( cv::Point2i(j - whs.cols + 1,
i - whs.rows + 1) );
}
}
std::partial_sort( amount.begin(), amount.begin() + nTransform,
amount.end(), std::greater< std::pair<double, int> >() );
transforms.resize(nTransform);
for (int i = 0; i < nTransform; ++i)
{
int idx = amount[i].second;
transforms[i] = cv::Matx33f(1, 0, float(shiftM[idx].x),
0, 1, float(shiftM[idx].y),
0, 0, 1 );
}
}
开发者ID:23pointsNorth,项目名称:opencv_contrib,代码行数:85,代码来源:annf.hpp
示例4: rescaleGrayLevelMat
/*
* objective : get the gray level map of the input image and rescale it to the range [0-255]
*/
static void rescaleGrayLevelMat(const cv::Mat &inputMat, cv::Mat &outputMat, const float histogramClippingLimit)
{
// adjust output matrix wrt the input size but single channel
std::cout<<"Input image rescaling with histogram edges cutting (in order to eliminate bad pixels created during the HDR image creation) :"<<std::endl;
//std::cout<<"=> image size (h,w,channels) = "<<inputMat.size().height<<", "<<inputMat.size().width<<", "<<inputMat.channels()<<std::endl;
//std::cout<<"=> pixel coding (nbchannel, bytes per channel) = "<<inputMat.elemSize()/inputMat.elemSize1()<<", "<<inputMat.elemSize1()<<std::endl;
// rescale between 0-255, keeping floating point values
cv::normalize(inputMat, outputMat, 0.0, 255.0, cv::NORM_MINMAX);
// extract a 8bit image that will be used for histogram edge cut
cv::Mat intGrayImage;
if (inputMat.channels()==1)
{
outputMat.convertTo(intGrayImage, CV_8U);
}else
{
cv::Mat rgbIntImg;
outputMat.convertTo(rgbIntImg, CV_8UC3);
cv::cvtColor(rgbIntImg, intGrayImage, cv::COLOR_BGR2GRAY);
}
// get histogram density probability in order to cut values under above edges limits (here 5-95%)... usefull for HDR pixel errors cancellation
cv::Mat dst, hist;
int histSize = 256;
calcHist(&intGrayImage, 1, 0, cv::Mat(), hist, 1, &histSize, 0);
cv::Mat normalizedHist;
normalize(hist, normalizedHist, 1, 0, cv::NORM_L1, CV_32F); // normalize histogram so that its sum equals 1
double min_val, max_val;
minMaxLoc(normalizedHist, &min_val, &max_val);
//std::cout<<"Hist max,min = "<<max_val<<", "<<min_val<<std::endl;
// compute density probability
cv::Mat denseProb=cv::Mat::zeros(normalizedHist.size(), CV_32F);
denseProb.at<float>(0)=normalizedHist.at<float>(0);
int histLowerLimit=0, histUpperLimit=0;
for (int i=1;i<normalizedHist.size().height;++i)
{
denseProb.at<float>(i)=denseProb.at<float>(i-1)+normalizedHist.at<float>(i);
//std::cout<<normalizedHist.at<float>(i)<<", "<<denseProb.at<float>(i)<<std::endl;
if ( denseProb.at<float>(i)<histogramClippingLimit)
histLowerLimit=i;
if ( denseProb.at<float>(i)<1-histogramClippingLimit)
histUpperLimit=i;
}
// deduce min and max admitted gray levels
float minInputValue = (float)histLowerLimit/histSize*255;
float maxInputValue = (float)histUpperLimit/histSize*255;
std::cout<<"=> Histogram limits "
<<"\n\t"<<histogramClippingLimit*100<<"% index = "<<histLowerLimit<<" => normalizedHist value = "<<denseProb.at<float>(histLowerLimit)<<" => input gray level = "<<minInputValue
<<"\n\t"<<(1-histogramClippingLimit)*100<<"% index = "<<histUpperLimit<<" => normalizedHist value = "<<denseProb.at<float>(histUpperLimit)<<" => input gray level = "<<maxInputValue
<<std::endl;
//drawPlot(denseProb, "input histogram density probability", histLowerLimit, histUpperLimit);
drawPlot(normalizedHist, "input histogram", histLowerLimit, histUpperLimit);
// rescale image range [minInputValue-maxInputValue] to [0-255]
outputMat-=minInputValue;
outputMat*=255.0/(maxInputValue-minInputValue);
// cut original histogram and back project to original image
cv::threshold( outputMat, outputMat, 255.0, 255.0, 2 ); //THRESH_TRUNC, clips values above 255
cv::threshold( outputMat, outputMat, 0.0, 0.0, 3 ); //THRESH_TOZERO, clips values under 0
}
开发者ID:Belial2010,项目名称:opencv,代码行数:69,代码来源:OpenEXRimages_HighDynamicRange_Retina_toneMapping.cpp
示例5: float
bool CaffeFeatExtractor<Dtype>::extract_singleFeat_1D(cv::Mat &image, vector<Dtype> &features, float (×)[2])
{
times[0] = 0.0f;
times[1] = 0.0f;
// Check input image
if (image.empty())
{
std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): empty imMat!" << std::endl;
return false;
}
// Start timing
cudaEvent_t startPrep, stopPrep, startNet, stopNet;
if (timing)
{
cudaEventCreate(&startPrep);
cudaEventCreate(&startNet);
cudaEventCreate(&stopPrep);
cudaEventCreate(&stopNet);
cudaEventRecord(startPrep, NULL);
cudaEventRecord(startNet, NULL);
}
// Prepare Caffe
// Set the GPU/CPU mode for Caffe (here in order to be thread-safe)
if (gpu_mode)
{
Caffe::set_mode(Caffe::GPU);
Caffe::SetDevice(device_id);
}
else
{
Caffe::set_mode(Caffe::CPU);
}
// Initialize labels to zero
int label = 0;
// Get pointer to data layer to set the input
caffe::shared_ptr<MemoryDataLayer<Dtype> > memory_data_layer = boost::dynamic_pointer_cast<caffe::MemoryDataLayer<Dtype> >(feature_extraction_net->layers()[0]);
// Set batch size to 1
if (memory_data_layer->batch_size()!=1)
{
memory_data_layer->set_batch_size(1);
std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): BATCH SIZE = " << memory_data_layer->batch_size() << std::endl;
}
// Image preprocessing
// The image passed to AddMatVector must be same size as the mean image
// If not, it is resized:
// if it is downsampled, an anti-aliasing Gaussian Filter is applied
if (image.rows != mean_height || image.cols != mean_height)
{
if (image.rows > mean_height || image.cols > mean_height)
{
cv::resize(image, image, cv::Size(mean_height, mean_width), 0, 0, CV_INTER_LANCZOS4);
}
else
{
cv::resize(image, image, cv::Size(mean_height, mean_width), 0, 0, CV_INTER_LINEAR);
}
}
memory_data_layer->AddMatVector(vector<cv::Mat>(1, image),vector<int>(1,label));
size_t num_features = blob_names.size();
if(num_features!=1)
{
std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): Error! The list of features to be extracted has not size one!" << std::endl;
return false;
}
if (timing)
{
// Record the stop event
cudaEventRecord(stopPrep, NULL);
// Wait for the stop event to complete
cudaEventSynchronize(stopPrep);
cudaEventElapsedTime(times, startPrep, stopPrep);
}
// Run network and retrieve features!
// depending on your net's architecture, the blobs will hold accuracy and/or labels, etc
std::vector<Blob<Dtype>*> results = feature_extraction_net->Forward();
const caffe::shared_ptr<Blob<Dtype> > feature_blob = feature_extraction_net->blob_by_name(blob_names[0]);
int batch_size = feature_blob->num(); // should be 1
if (batch_size!=1)
{
std::cout << "CaffeFeatExtractor::extract_singleFeat_1D(): Error! Retrieved more than one feature, exiting..." << std::endl;
return -1;
//.........这里部分代码省略.........
开发者ID:GiuliaP,项目名称:caffe_feat_extraction,代码行数:101,代码来源:CaffeFeatExtractor.hpp
示例6: baseGraph
/**
@brief 色相だけのヒストグラムを計算する
@param src ヒストグラムを計算する画像
*/
void HIST::calcHistgramHue(cv::Mat &src)
{
if(src.data == NULL)
return;
// グラフのデータ間の距離
stepH = (double)ui.histgramH->width()/180;
cv::cvtColor(src, src, cv::COLOR_BGR2HSV);
int count[180];
int max = 0;
// 初期化
for (int i = 0; i < 180; i++)
count[i] = 0;
// 色相の各値の個数を取得
for (int j = 0; j < src.cols; j++)
for (int i = 0; i < src.rows; i++)
// ほぼ白(彩度≒0)は無視
if(src.data[i * src.step + j * src.elemSize() + 1] > 3)
count[src.data[i * src.step + j * src.elemSize()]]++;
cv::cvtColor(src, src, cv::COLOR_HSV2BGR);
// スケーリング定数(一番多い個数)の取得
for (int i = 0; i < 180; i++)
if(max < count[i])
max = count[i];
// スケーリング
double histgram[180];
for (int i = 0; i < 180; i++)
histgram[i] = (double)count[i] / max * 200;
/** 簡易グラフ作成 **/
int gWidth = 180 * stepH;
int gHeight = 200;
// 格子画像
cv::Mat baseGraph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
// 色相のヒストグラム画像
cv::Mat hueGraph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
// 上記2つを乗算ブレンディングした最終的に描画する画像
cv::Mat graph(gHeight, gWidth, CV_8UC3, cv::Scalar(255, 255, 255));
cv::cvtColor(hueGraph, hueGraph, cv::COLOR_BGR2HSV);
for (int j = 0; j < hueGraph.rows; j++){
for (int i = 0; i < hueGraph.cols; i++){
hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 0] = (int)((double)i/stepH);
hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 1] = 220;
hueGraph.data[j * hueGraph.step + i * hueGraph.elemSize() + 2] = 180;
}
}
cv::cvtColor(hueGraph, hueGraph, cv::COLOR_HSV2BGR);
// 横のメモリ
for (int i = 0; i < 20; i++)
if(!(i%4))
cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(180, 180, 180), 2);
else
cv::line(baseGraph, cv::Point(0, i*10), cv::Point(gWidth, i*10), cv::Scalar(200, 200, 200), 1);
// 色相のグラフ
for (int i = 0; i < 180; i++)
cv::line(hueGraph, cv::Point((int)(i*stepH), 0), cv::Point((int)(i*stepH), (int)histgram[i]), cv::Scalar(180, 180, 180), 2);
// 折れ線
for (int i = 0; i < 180; i++)
cv::line(hueGraph, cv::Point((int)(i*stepH), (int)histgram[i]), cv::Point((int)((i+1)*stepH), (int)histgram[i+1]), cv::Scalar(90, 90, 90), 2, CV_AA);
// 合成
blend(baseGraph, hueGraph, graph, blendType::MULTI);
// 上下を反転
cv::flip(graph, graph, 0);
drawForQtLabel(graph, ui.histgramH, false);
}
开发者ID:sensq,项目名称:home,代码行数:77,代码来源:hist.cpp
示例7: ParallaxErrorAnalysis_GradientPatch
bool ParallaxErrorAnalysis_GradientPatch(const cv::Mat& img1,const shape& img1_shp, const cv::Mat& img2, const shape& img2_shp, const cv::Mat& img2_original,
const cv::Mat& H, const cv::Size2i& img_size ,double& res_error)
{
vector<cv::Point2f> overlap_points;
for(int r = 0 ; r < img_size.height; r++)
{
for(int c = 0; c < img_size.width; c++)
{
shape t_img1_shape = img1_shp;
shape t_img2_shape = img2_shp;
cv::Point2f t_point(c, r);
if(t_img1_shape.isInShape(c, r) && t_img2_shape.isInShape(c, r) )
{
// 记下这些点
overlap_points.push_back(t_point);
}
}
}
// 找到这些点在 img2_original 上的对应点
cv::Mat Hn = H.inv();
vector<cv::Point2f> correPoints_ori(overlap_points.size(),*new cv::Point2f);
cv::perspectiveTransform(overlap_points, correPoints_ori, Hn); // 两个参数都要是 vector<cv::Point2f> f 不能是 i
//检测有没有越界的
/*ofstream Dout("2.txt",ios::out);
for(int i = 0; i < correPoints_ori.size(); i++)
{
if(correPoints_ori[i].x < 0 || correPoints_ori[i].x > img2_original.cols
|| correPoints_ori[i].y < 0 || correPoints_ori[i].y > img2_original.rows)
{
Dout<< correPoints_ori[i].x << " "<<correPoints_ori[i].y << endl;
}
}
Dout.clear();
Dout.close();*/
// 计算 error
res_error = 0;
cv::Mat img_blend;
vector<cv::Mat> imgs;
vector<shape> shapes;
imgs.push_back(img1);
imgs.push_back(img2);
shapes.push_back(img1_shp);
shapes.push_back(img2_shp);
blending_all(imgs, shapes, img_size, img_blend);
// 求 Iij 和 Ij 的 Gradient 图
cv::Mat img_blend_G;
cv::Mat img2_original_G;
gradientGray(img_blend, img_blend_G);
gradientGray(img2_original, img2_original_G);
// test 测试 到底有哪些点 不一样。
vector<cv::Point2f> err_points_img2;
vector<uchar> err_img2;
// Gradient 图求 error
double res = 0;
for(int i = 0; i < overlap_points.size(); i++)
{
uchar Gray1 = img_blend_G.at<uchar>(overlap_points[i]);
uchar Gray2 = img2_original_G.at<uchar>(correPoints_ori[i]);
int tr = abs(Gray1 - Gray2);
//if(tr != 0) // 很多是相差就1,这样基本一样的
//{
// res += tr;
//}
if(tr >= 5) // 很多是相差就1,这样基本一样的
{
res += tr;
err_points_img2.push_back(correPoints_ori[i]);
err_img2.push_back(tr);
}
}
res_error = res;
// 测试的图
// 新建一张 img_size 的黑色图
IplImage* st_img = cvCreateImage(img2_original.size(), IPL_DEPTH_8U, 3);
for(int i = 0; i < st_img->height; i++)
{
uchar *ptrImage = (uchar*)(st_img->imageData + i * st_img->widthStep);
for (int j = 0; j < st_img->width; j++)
{
ptrImage[3 * j + 0]=0;
ptrImage[3 * j + 1]=0;
ptrImage[3 * j + 2]=0;
}
}
cv::Mat test_img = st_img;
for(int i = 0; i < err_points_img2.size(); i++)
{
test_img.at<Vec3b>(err_points_img2[i])[2] = err_img2[i];
}
imwrite("error_ori.jpg",img2_original);
//.........这里部分代码省略.........
开发者ID:XavierCao,项目名称:OpenCVtest,代码行数:101,代码来源:Homo1.cpp
示例8: rotateMat
//void CutoutImage::rotateMat (const cv::Mat srcMat ,cv::Mat &dstMat,const cv::Mat colorMat)
void CutoutImage::rotateMat (const cv::Mat srcMat ,cv::Mat &dstMat,const cv::Mat colorMat, const cv::Mat wholeImage, cv::Mat &wholeImageCut)
{
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4f> lineVector;
cv::Mat aMat = srcMat.clone();
cv::findContours(aMat, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
cv::Mat showMat = cv::Mat(aMat.rows,aMat.cols,CV_8UC3,cv::Scalar(0,0,0));
//dstMat = aMat.clone();
dstMat = cv::Mat(srcMat.size(), CV_8UC4, cv::Scalar(0,0,0,0));
std::vector<cv::Rect> nailRect;
cv::Mat wholeImageRotate;
for(int i = 0;i<(int)contours.size();i++)
{
if(contours[i].size() > 5)
{
cv::RotatedRect tmp = cv::minAreaRect(contours[i]);
//ellipses.push_back(temp);
cv::drawContours(showMat, contours, i, cv::Scalar(255,0,0), 1, 8);
cv::ellipse(showMat, tmp, cv::Scalar(0,255,255), 2, 8);
//cv::line(<#cv::Mat &img#>, <#Point pt1#>, <#Point pt2#>, <#const Scalar &color#>)
cv::rectangle(showMat, tmp.boundingRect(), cv::Scalar(255,255,0),1,8);
//imshow("Ellipses", showMat);
float rotAngle = tmp.angle;
tmp.angle = 0;
//cv::circle(showMat, cv::Point(tmp.boundingRect().x,tmp.boundingRect().y) , 2, cv::Scalar(0,0,255));
if(tmp.boundingRect().width > tmp.boundingRect().height)
{
tmp.angle = 90;
rotAngle = rotAngle - 90;
}
cv::rectangle(showMat, tmp.boundingRect(), cv::Scalar(255,255,0),1,8);
nailRect.push_back(tmp.boundingRect());
cv::ellipse(showMat, tmp, cv::Scalar(255,255,255), 2, 8);
//imshow("Ellipses", showMat);
//cv::Mat rotMat = cv::Mat(2,3,CV_32FC1);
cv::Mat rotMat = cv::getRotationMatrix2D(tmp.center,rotAngle, 1);
//cv::transform(srcMat, dstMat, rotMat);
cv::warpAffine(colorMat, dstMat, rotMat, cv::Size(std::max(srcMat.rows,srcMat.cols),std::max(srcMat.rows,srcMat.cols)), CV_INTER_NN);
cv::warpAffine(wholeImage.clone(), wholeImageRotate, rotMat, cv::Size(std::max(srcMat.rows,srcMat.cols),std::max(srcMat.rows,srcMat.cols)), CV_INTER_LANCZOS4);
//cv::imshow("RRRRRR", dstMat);
}
else
{
// cv::drawContours(showMat, contours, i, cv::Scalar(255,255,255), -1, 8);
// imshow("Ellipses", showMat);
}
}
cv::RNG rng; //随机数
int rows = dstMat.rows;
int cols = dstMat.cols;
// printf(" b cutImageByRect rows = %d \n",rows);
// printf(" b cutImageByRect cols = %d \n",cols);
int lx = cols;
int rx = 0;
int ty = rows;
int by = 0;
cv::Mat grayDst;
cv::cvtColor(dstMat, grayDst, CV_BGRA2GRAY);
for(int y = 0; y<rows; y++ ){
uchar *grayDstRowsData = grayDst.ptr<uchar>(y);
for(int x = 0; x<cols; x++ ){
if(grayDstRowsData[x] != 0 )
{
if(x<lx){
lx = x;
}
if(x>rx){
rx = x;
}
if(y<ty){
ty = y;
}
if(y>by){
by = y;
}
}
}
}
//扩大一下截图范围
if(lx - 10 >= 0)
lx = lx - 10;
if(rx + 10 <= cols - 1)
rx = rx + 10;
if(ty - 10 >= 0)
ty = ty - 10;
if(by + 10 <= rows - 1)
by = by + 10;
//cv::Point lt = cv::Point(lx,ty);
//cv::Point rb = cv::Point(rx,by);
cv::Rect cutRect = *new cv::Rect;
cutRect.x = lx;
//.........这里部分代码省略.........
开发者ID:celesius,项目名称:cutImageIOSFramework,代码行数:101,代码来源:CutoutImage.cpp
示例9: filterImageEdgeAndBlurMerge
/*
将输入的二值图边缘平滑,用锐利边缘抠取输入的彩色图,然后再将彩色图与平滑边缘的图进行融合
*/
void CutoutImage::filterImageEdgeAndBlurMerge( const cv::Mat colorMat, const cv::Mat bitMat, cv::Mat &dstMat )
{
cv::Mat aBitMat = bitMat.clone();
cv::Mat aColorMat = colorMat.clone();
cv::Mat filterMat;
//CutoutImage::filterImage(aBitMat, filterMat); //使预模糊区域大于正常区域
filterMat = aBitMat; //使预模糊区域与正常区域一样大
std::cout<<"aColorMat channels = " <<aColorMat.channels()<<std::endl;
int blockSize = 5;
int constValue = 10;
// cv::adaptiveThreshold( filterMat, filterMat, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue );
cv::threshold(filterMat, filterMat, 1, 255, CV_THRESH_BINARY );
//jiangbo test
cv::Mat tmpMMat;
//CutoutImage::smoothContours( filterMat, tmpMMat );
int rows = aBitMat.rows;
int cols = aBitMat.cols;
//扣取范围较大的彩色图,并进行模糊,主要需要其边缘部分数据
cv::Mat cutBigColorMat = cv::Mat( rows , cols, CV_8UC3, cv::Scalar(255,255,255) );
for (int y = 0; y < rows; y++) {
uchar *filterCutMatRowData = filterMat.ptr<uchar>(y);
uchar *colorMatRowData = aColorMat.ptr<uchar>(y);
uchar *cutBigColorMatRowData = cutBigColorMat.ptr<uchar>(y);
for (int x = 0; x < cols; x++) {
if(filterCutMatRowData[x] != 0){
cutBigColorMatRowData[x*3] = colorMatRowData[x*3];
cutBigColorMatRowData[x*3 + 1] = colorMatRowData[x*3 + 1];
cutBigColorMatRowData[x*3 + 2] = colorMatRowData[x*3 + 2];
}
}
}
cv::Mat cutBigColorMatFilter;
//CutoutImage::filterImage(cutBigColorMat, cutBigColorMatFilter);
CutoutImage::filterImageForEdgeBlur(cutBigColorMat, cutBigColorMatFilter);
cv::Mat bgrFilterMat;
cv::cvtColor(filterMat, bgrFilterMat, CV_GRAY2BGR);
cv::Mat smoothMask;
CutoutImage::smoothContours(colorMat, bgrFilterMat, 21 , tmpMMat, smoothMask);
cv::Mat fooRusultMat = cv::Mat( rows , cols, CV_8UC3, cv::Scalar(0,0,0) );
cv::Mat testEdgeData= cv::Mat( rows , cols, CV_8UC3, cv::Scalar(0,0,0) );
//融合
for(int y = 0; y < rows; y++){
uchar *aBitMatRowData = aBitMat.ptr<uchar>(y);
uchar *colorMatRowData = aColorMat.ptr<uchar>(y);
uchar *cutBigColorMatRowData = cutBigColorMatFilter.ptr<uchar>(y);
uchar *fooRusultMatRowData = fooRusultMat.ptr<uchar>(y);
uchar *testEdgeDataRowData = testEdgeData.ptr<uchar>(y);
for(int x = 0; x < cols; x++){
if(aBitMatRowData[x] != 0){
fooRusultMatRowData[x*3] = colorMatRowData[x*3];
fooRusultMatRowData[x*3 + 1] = colorMatRowData[x*3 + 1];
fooRusultMatRowData[x*3 + 2] = colorMatRowData[x*3 + 2];
}
if(cutBigColorMatRowData[x*3] != 255 || cutBigColorMatRowData[x*3 + 1] != 255 || cutBigColorMatRowData[x*3 + 2] != 255){
if(aBitMatRowData[x] == 0){
fooRusultMatRowData[x*3] = cutBigColorMatRowData[x*3];
fooRusultMatRowData[x*3 + 1] = cutBigColorMatRowData[x*3 + 1];
fooRusultMatRowData[x*3 + 2] = cutBigColorMatRowData[x*3 + 2];
testEdgeDataRowData[x*3] = cutBigColorMatRowData[x*3];
testEdgeDataRowData[x*3 + 1] = cutBigColorMatRowData[x*3 + 1];
testEdgeDataRowData[x*3 + 2] = cutBigColorMatRowData[x*3 + 2];
}
}
}
}
}
开发者ID:celesius,项目名称:cutImageIOSFramework,代码行数:76,代码来源:CutoutImage.cpp
示例10: setColorImg
void CutoutImage::setColorImg(cv::Mat colorImg)
{
_cloverGrabCut->setImage(colorImg);
inputColorImageSize = colorImg.size();
}
开发者ID:celesius,项目名称:cutImageIOSFramework,代码行数:5,代码来源:CutoutImage.cpp
示例11: processImageCreatMask
void CutoutImage::processImageCreatMask( std::vector<cv::Point> mouseSlideRegionDiscrete , const cv::Mat srcMat, cv::Mat &dstMat, int lineWidth, int expandWidth )
{
cv::Mat showMat;// = srcMat.clone();
cv::cvtColor(srcMat, showMat, CV_BGR2GRAY);
cv::Mat showMergeColorImg = srcMat.clone();
cv::Mat seedStoreMat = dstMat; //seedStoreMat 在外部存储了本次操作所生成的全部种子点。
mouseSlideRegion.clear();
cv::Size matSize = *new cv::Size;
matSize.width = showMat.cols;
matSize.height = showMat.rows;
CutoutImage::drawLineAndMakePointSet(mouseSlideRegionDiscrete,matSize,lineWidth,mouseSlideRegion);
int lx = showMat.cols,rx = 0,ty = showMat.rows,by = 0;
//求画线范围
for(int i = 0;i<(int)mouseSlideRegion.size();i++)
{
//std::cout<<"point = "<< mouseSlideRegion[i]<<std::endl;
//cv::circle(showMatClone, mouseSlideRegion[i], 0.5, cv::Scalar(255)); //绘制现实点
//最左面点x,最右面点x,最上面点y,最下面点y
if(mouseSlideRegion[i].x < lx)
{
lx = mouseSlideRegion[i].x;
}
if(mouseSlideRegion[i].x > rx)
{
rx = mouseSlideRegion[i].x;
}
if(mouseSlideRegion[i].y <ty )
{
ty = mouseSlideRegion[i].y;
}
if(mouseSlideRegion[i].y > by){
by = mouseSlideRegion[i].y;
}
//CvPoint forePtsCvPoint =
}
std::cout<<" lx " << lx << " rx " << rx << " ty " << ty << " by " << by <<std::endl;
std::cout<<" orgMat cols " <<showMat.cols<< "orgMat rows " << showMat.rows <<std::endl;
if( lx - expandWidth >= 0 )
lx = lx - expandWidth;
if( rx + expandWidth <= showMat.cols - 1 )
rx = rx + expandWidth;
if( ty - expandWidth >= 0)
ty = ty - expandWidth;
if( by + expandWidth <= showMat.rows - 1 )
by = by + expandWidth;
std::cout<<" lx " << lx << " rx " << rx << " ty " << ty << " by " << by <<std::endl;
cv::Point ltP = cv::Point(lx,ty);
cv::Point rtP = cv::Point(rx,ty);
cv::Point lbP = cv::Point(lx,by);
cv::Point rbP = cv::Point(rx,by);
//要截取的图形
int rectMatRow = by - ty + 1;
int rectMatCol = rx - lx + 1;
cv::Mat recMat = cv::Mat (rectMatRow,rectMatCol,CV_8UC1,cv::Scalar(0));
//cv::rectangle(showMatClone, ltP, rbP, cv::Scalar(255),1); //画图形
cv::Mat mouseSlideSeedStoreMat = cv::Mat(rectMatRow,rectMatCol,CV_8UC1,cv::Scalar(0));
for(int y = 0;y<rectMatRow;y++){
uchar *rectMatLineData = recMat.ptr<uchar>(y);
uchar *orgMatLineData = showMat.ptr<uchar>(ty+y);
uchar *msssMatLineData = mouseSlideSeedStoreMat.ptr<uchar>(y);
uchar *ssMatLineData = seedStoreMat.ptr<uchar>(ty+y);
for(int x = 0; x < rectMatCol; x++){
rectMatLineData[x] = orgMatLineData[lx+x];
msssMatLineData[x] = ssMatLineData[lx + x];
}
}
//cutMat = recMat.clone();
//cv::imshow("mouseSlideSeedStoreMat", mouseSlideSeedStoreMat);
cv::Mat bitMat;
int blockSize = 25;
int constValue = 10;
cv::adaptiveThreshold(recMat, bitMat, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, blockSize, constValue);
cv::Mat filterImg;
CutoutImage::filterImage(recMat,filterImg);
cv::Mat nextImg = filterImg.clone();
cv::Mat regionGrowMat;
CutoutImage::rectRegionGrow( mouseSlideRegion, ltP, filterImg, mouseSlideSeedStoreMat ,regionGrowMat);
// cv::imshow("regionGrowMat", regionGrowMat);
cv::Mat mergeMat;
CutoutImage::mergeProcess(regionGrowMat,mergeMat);
// cv::imshow("mergeMat", mergeMat);
CutoutImage::storeSeed(seedStoreMat,mergeMat,ltP); // seedStoreMat 需要扣取的mask
// cv::imshow("seedStoreMat", seedStoreMat);
//cv::imshow("showMat", showMatClone);
cv::Mat colorMergeMat;
CutoutImage::colorDispResultWithFullSeedMat(showMergeColorImg,seedStoreMat);
}
开发者ID:celesius,项目名称:cutImageIOSFramework,代码行数:96,代码来源:CutoutImage.cpp
示例12: rR
/// <summary>
/// Calculates for each image patch the focus measure value. Result is a vector of Patches (see class Patch).
/// If the image is binary, the relative foreground (foreground pixel / patchSize) and the weight is stored for each Patch.
/// </summary>
/// <param name="fm">The specified focuse measure method fm (e.g. Brenner).</param>
/// <param name="fmImg">The image fmImg to calculate the focus measure on. If empty, the src image is taken.</param>
/// <param name="binary">if set to <c>true</c> [binary] the input image is binary, specifying the foreground image. The foreground area and the weight is saved to the image patch</param>
/// <returns>True if the focus measure could be computed, false otherwise.</returns>
bool FocusEstimation::compute(FocusMeasure fm, cv::Mat fmImg, bool binary)
{
cv::Mat fImg = fmImg;
if (fImg.empty())
fImg = mSrcImg;
if (fImg.empty())
return false;
if (fmImg.channels() != 1 || fImg.depth() != CV_32F)
return false;
BasicFM fmClass;
double f;
mFmPatches.clear();
for (int row = 0; row < fImg.rows; row += (mWindowSize+mSplitSize)) {
for (int col = 0; col < fImg.cols; col += (mWindowSize+mSplitSize)) {
cv::Range rR(row, cv::min(row + mWindowSize, fImg.rows));
cv::Range cR(col, cv::min(col + mWindowSize, fImg.cols));
cv::Mat tile = fImg(rR, cR);
fmClass.setImg(tile);
switch (fm)
{
case dsc::FocusEstimation::BREN:
f = fmClass.computeBREN();
break;
case dsc::FocusEstimation::GLVA:
f = fmClass.computeGLVA();
break;
case dsc::FocusEstimation::GLVN:
f = fmClass.computeGLVN();
break;
case dsc::FocusEstimation::GLLV:
f = fmClass.computeGLLV();
break;
case dsc::FocusEstimation::GRAT:
f = fmClass.computeGRAT();
break;
case dsc::FocusEstimation::GRAS:
f = fmClass.computeGRAS();
break;
case dsc::FocusEstimation::LAPE:
f = fmClass.computeLAPE();
break;
case dsc::FocusEstimation::LAPV:
f = fmClass.computeLAPV();
break;
case dsc::FocusEstimation::ROGR:
f = fmClass.computeROGR();
break;
default:
f = -1;
break;
}
Patch r(cv::Point(col, row), mWindowSize, mWindowSize, f);
if (binary) {
cv::Scalar relArea = cv::sum(tile);
r.setArea(relArea[0]);
relArea[0] = relArea[0] / (double)(mWindowSize * mWindowSize);
//area completely written with text ~ 0.1
//normalize to 1
relArea[0] *= 10.0;
r.setWeight(relArea[0]);
//weight with sigmoid function
//-6: shift sigmoid to the right
//*10: scale normalized Area
//double a = 10.0;
//double b = -6.0;
//double weight = 1.0 / (1 + std::exp(-(relArea[0] * a + b)));
//r.setWeight(weight);
}
mFmPatches.push_back(r);
}
}
return true;
}
开发者ID:TUWien,项目名称:DocScan,代码行数:97,代码来源:FocusMeasure.cpp
示例13: computeMaxGradient
cv::Point EyeTracker::computePupilLocation(cv::Mat eye) {
cv::Mat x_gradient = computeMaxGradient(eye);
cv::Mat y_gradient = computeMaxGradient(eye.t()).t();
cv::Mat magnitude = computeMagnitudes(x_gradient, y_gradient);
double gradient_threshold =
computeDynamicThreshold(magnitude, 50.0);
resizeAndRender(magnitude, "sample_eye_gradient_mag");
DEBUG("Gradient threshold: " << gradient_threshold);
for (int y = 0; y < eye.rows; ++y) {
double* x_row = x_gradient.ptr<double>(y);
double* y_row = y_gradient.ptr<double>(y);
const double* mag_row = magnitude.ptr<double>(y);
for (int x = 0; x < eye.cols; ++x) {
double gX = x_row[x];
double gY = y_row[x];
double m = mag_row[x];
if (m > gradient_threshold) {
x_row[x] = gX / m;
y_row[x] = gY / m;
} else {
x_row[x] = 0;
y_row[x] = 0;
}
}
}
resizeAndRender(x_gradient, "sample_eye_gradient_x");
resizeAndRender(y_gradient, "sample_eye_gradient_y");
cv::Mat weight;
cv::GaussianBlur(eye, weight,
cv::Size(5, 5), 0, 0);
for (int y = 0; y < weight.rows; ++y) {
unsigned char* row = weight.ptr<unsigned char>(y);
for (int x = 0; x < weight.cols; ++x) {
row[x] = (255 - row[x]);
}
}
resizeAndRender(weight, "sample_eye_weight");
cv::Mat out_sum = cv::Mat::zeros(eye.rows, eye.cols, CV_64F);
for (int y = 0; y < weight.rows; ++y) {
const double* Xr = x_gradient.ptr<double>(y);
const double* Yr = y_gradient.ptr<double>(y);
for (int x = 0; x < weight.cols; ++x) {
double gX = Xr[x];
double gY = Yr[x];
if (gX == 0.0 && gY == 0.0) {
continue;
}
test_center(x, y, weight, gX, gY, out_sum);
}
}
double gradients_num = weight.rows * weight.cols;
cv::Mat out;
out_sum.convertTo(out, CV_32F, 1.0 / gradients_num);
cv::Point max_point;
double max_value;
cv::minMaxLoc(out, NULL, &max_value, NULL, &max_point);
cv::Mat flood_clone;
double flood_thresh = max_value * 0.97;
cv::threshold(out, flood_clone, flood_thresh, 0.0f, cv::THRESH_TOZERO);
cv::Mat mask = floodKillEdges(flood_clone);
cv::minMaxLoc(out, NULL, &max_value, NULL, &max_point, mask);
resizeAndRender(mask, "sample_eye_mask");
resizeAndRender(out, "sample_eye_possible_centers");
return max_point;
}
开发者ID:AnimatedRNG,项目名称:node-ui,代码行数:75,代码来源:eye_input.cpp
示例14: ROS_ERROR
void Depth::calcPointCloud(
const cv::Mat& input_disparity, const cv::Mat& left_image,
const double baseline, const double focal_length, const int cx,
const int cy, pcl::PointCloud<pcl::PointXYZRGB>* pointcloud,
pcl::PointCloud<pcl::PointXYZRGB>* freespace_pointcloud) {
pointcloud->clear();
freespace_pointcloud->clear();
if (left_image.depth() != CV_8U) {
ROS_ERROR(
"Pointcloud generation is currently only supported on 8 bit images");
return;
}
cv::Mat disparity_filled, input_valid;
bulidFilledDisparityImage(input_disparity, &disparity_filled, &input_valid);
int side_bound = sad_window_size_ / 2;
// build pointcloud
for (int y_pixels = side_bound; y_pixels < input_disparity.rows - side_bound;
++y_pixels) {
for (int x_pixels = side_bound + min_disparity_ + num_disparities_;
x_pixels < input_disparity.cols - side_bound; ++x_pixels) {
const uint8_t& is_valid = input_valid.at<uint8_t>(y_pixels, x_pixels);
const int16_t& input_value =
input_disparity.at<int16_t>(y_pixels, x_pixels);
const int16_t& filled_value =
disparity_filled.at<int16_t>(y_pixels, x_pixels);
bool freespace;
double disparity_value;
// if the filled disparity is valid it must be a freespace ray
if (filled_value < std::numeric_limits<int16_t>::max()) {
disparity_value = static_cast<double>(filled_value);
freespace = true;
}
// else it is a normal ray
else if (is_valid) {
disparity_value = static_cast<double>(input_value);
freespace = false;
} else {
continue;
}
pcl::PointXYZRGB point;
// the 16* is needed as opencv stores disparity maps as 16 * the true
// values
point.z = (16 * focal_length * baseline) / disparity_value;
point.x = point.z * (x_pixels - cx) / focal_length;
point.y = point.z * (y_pixels - cy) / focal_length;
if (left_image.channels() == 3) {
const cv::Vec3b& color = left_image.at<cv::Vec3b>(y_pixels, x_pixels);
point.b = color[0];
point.g = color[1];
point.r = color[2];
} else if (left_image.channels() == 4) {
const cv::Vec4b& color = left_image.at<cv::Vec4b>(y_pixels, x_pixels);
point.b = color[0];
point.g = color[1];
point.r = color[2];
} else {
point.b = left_image.at<uint8_t>(y_pixels, x_pixels);
point.g = point.b;
point.r = point.b;
}
if (freespace) {
freespace_pointcloud->push_back(point);
} else {
pointcloud->push_back(point);
}
}
}
}
开发者ID:ethz-asl,项目名称:image_undistort,代码行数:78,代码来源:depth.cpp
示例15: extract
bool QrExtractor::extract( const cv::Mat & img )
{
if ( pd->debug )
pd->orig = img.clone();
cv::cvtColor( img, pd->gray, CV_RGB2GRAY );
if ( pd->smoothSz > 0 )
{
// Med
|
请发表评论