本文整理汇总了C++中cv::Ptr类的典型用法代码示例。如果您正苦于以下问题:C++ Ptr类的具体用法?C++ Ptr怎么用?C++ Ptr使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Ptr类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: callback
void callback(const sensor_msgs::ImageConstPtr& msg)
{
if (image_0_ == NULL)
{
// Take first image:
try
{
image_0_ = cv_bridge::toCvCopy(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take first image: " << e.what());
return;
}
ROS_INFO("First image taken");
// Detect keypoints:
detector_->detect(image_0_->image, keypoints_0_);
ROS_INFO_STREAM(keypoints_0_.size() << " points found.");
// Extract keypoints' descriptors:
extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
}
else
{
// Take second image:
try
{
image_1_ = cv_bridge::toCvShare(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take image: " << e.what());
return;
}
// Detect keypoints:
std::vector<cv::KeyPoint> keypoints_1;
detector_->detect(image_1_->image, keypoints_1);
ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");
// Extract keypoints' descriptors:
cv::Mat descriptors_1;
extractor_->compute(image_1_->image, keypoints_1, descriptors_1);
// Compute matches:
std::vector<cv::DMatch> matches;
match(descriptors_0_, descriptors_1, matches);
// Compute homography:
cv::Mat H;
homography(keypoints_0_, keypoints_1, matches, H);
// Draw matches:
const int s = std::max(image_0_->image.rows, image_0_->image.cols);
cv::Size size(s, s);
cv::Mat draw_image;
warped_image_ = boost::make_shared<cv_bridge::CvImage>(
image_0_->header, image_0_->encoding,
cv::Mat(size, image_0_->image.type()));
if (!H.empty()) // filter outliers
{
std::vector<char> matchesMask(matches.size(), 0);
const size_t N = matches.size();
std::vector<int> queryIdxs(N), trainIdxs(N);
for (size_t i = 0; i < N; ++i)
{
queryIdxs[i] = matches[i].queryIdx;
trainIdxs[i] = matches[i].trainIdx;
}
std::vector<cv::Point2f> points1, points2;
cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);
cv::Mat points1t;
cv::perspectiveTransform(cv::Mat(points1), points1t, H);
double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
for (size_t i1 = 0; i1 < points1.size(); ++i1)
{
if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
cv::drawMatches(
image_0_->image, keypoints_0_,
image_1_->image, keypoints_1, matches,
draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
matchesMask,
cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//.........这里部分代码省略.........
开发者ID:PETGreen,项目名称:effective_robotics_programming_with_ros,代码行数:101,代码来源:homography.cpp
示例2: CascadeDetectorAdapter
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
}
开发者ID:Linyes,项目名称:opencv,代码行数:5,代码来源:detection_based_tracker_sample.cpp
示例3: calculate
void MapperGradEuclid::calculate(
const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
Mat gradx, grady, imgDiff;
Mat img2;
CV_DbgAssert(img1.size() == image2.size());
CV_DbgAssert(img1.channels() == image2.channels());
CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);
if(!res.empty()) {
// We have initial values for the registration: we move img2 to that initial reference
res->inverseWarp(image2, img2);
} else {
img2 = image2;
}
// Matrices with reference frame coordinates
Mat grid_r, grid_c;
grid(img1, grid_r, grid_c);
// Get gradient in all channels
gradient(img1, img2, gradx, grady, imgDiff);
// Calculate parameters using least squares
Matx<double, 3, 3> A;
Vec<double, 3> b;
// For each value in A, all the matrix elements are added and then the channels are also added,
// so we have two calls to "sum". The result can be found in the first element of the final
// Scalar object.
Mat xIy_yIx = grid_c.mul(grady);
xIy_yIx -= grid_r.mul(gradx);
A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
A(0, 1) = sum(sum(gradx.mul(grady)))[0];
A(0, 2) = sum(sum(gradx.mul(xIy_yIx)))[0];
A(1, 1) = sum(sum(grady.mul(grady)))[0];
A(1, 2) = sum(sum(grady.mul(xIy_yIx)))[0];
A(2, 2) = sum(sum(xIy_yIx.mul(xIy_yIx)))[0];
A(1, 0) = A(0, 1);
A(2, 0) = A(0, 2);
A(2, 1) = A(1, 2);
b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
b(1) = -sum(sum(imgDiff.mul(grady)))[0];
b(2) = -sum(sum(imgDiff.mul(xIy_yIx)))[0];
// Calculate parameters. We use Cholesky decomposition, as A is symmetric.
Vec<double, 3> k = A.inv(DECOMP_CHOLESKY)*b;
double cosT = cos(k(2));
double sinT = sin(k(2));
Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT);
Vec<double, 2> shift(k(0), k(1));
if(res.empty()) {
res = Ptr<Map>(new MapAffine(linTr, shift));
} else {
MapAffine newTr(linTr, shift);
res->compose(newTr);
}
}
开发者ID:23pointsNorth,项目名称:opencv_contrib,代码行数:62,代码来源:mappergradeuclid.cpp
示例4: main
int main(int argc, char* argv[]) {
// welcome message
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
std::cout<<"* The retina model still have the following properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
// basic input arguments checking
if (argc<2)
{
help("bad number of parameter");
return -1;
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
int chosenMethod=0;
if (!strcmp(argv[argc-1], "fast"))
{
chosenMethod=1;
std::cout<<"Using fast method (no spectral whithning), adaptation of Meylan&al 2008 method"<<std::endl;
}
std::string inputImageName=argv[1];
//////////////////////////////////////////////////////////////////////////////
// checking input media type (still image, video file, live video acquisition)
std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
// image processing case
// declare the retina input buffer... that will be fed differently in regard of the input media
inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
if (!inputImage.total())
{
help("could not load image, program end");
return -1;
}
// rescale between 0 and 1
normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
cv::Mat gammaTransformedImage;
cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
if (inputImage.empty())
{
help("Input image could not be loaded, aborting");
return -1;
}
//////////////////////////////////////////////////////////////////////////////
// Program start in a try/catch safety context (Retina may throw errors)
try
{
/* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
* -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
*/
if (useLogSampling)
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::bioinspired::createRetina(inputImage.size());
// create a fast retina tone mapper (Meyla&al algorithm)
std::cout<<"Allocating fast tone mapper..."<<std::endl;
//cv::Ptr<cv::RetinaFastToneMapping> fastToneMapper=createRetinaFastToneMapping(inputImage.size());
std::cout<<"Fast tone mapper allocated"<<std::endl;
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers
cv::Mat retinaOutput_parvo;
/////////////////////////////////////////////
// prepare displays and interactions
histogramClippingValue=0; // default value... updated with interface slider
//inputRescaleMat = inputImage;
//outputRescaleMat = imageInputRescaled;
//.........这里部分代码省略.........
开发者ID:AnnaMariaM,项目名称:opencv,代码行数:101,代码来源:OpenEXRimages_HDR_Retina_toneMapping.cpp
示例5: findCirclesGridAB
bool findCirclesGridAB( cv::InputArray _image, cv::Size patternSize,
cv::OutputArray _centers, int flags, const cv::Ptr<cv::FeatureDetector> &blobDetector )
{
bool isAsymmetricGrid = (flags & cv::CALIB_CB_ASYMMETRIC_GRID) ? true : false;
bool isSymmetricGrid = (flags & cv::CALIB_CB_SYMMETRIC_GRID ) ? true : false;
CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);
cv::Mat image = _image.getMat();
std::vector<cv::Point2f> centers;
std::vector<cv::KeyPoint> keypoints;
blobDetector->detect(image, keypoints);
std::vector<cv::Point2f> points;
for (size_t i = 0; i < keypoints.size(); i++)
{
points.push_back (keypoints[i].pt);
}
if(flags & cv::CALIB_CB_CLUSTERING)
{
CirclesGridClusterFinder circlesGridClusterFinder(isAsymmetricGrid);
circlesGridClusterFinder.findGrid(points, patternSize, centers);
cv::Mat(centers).copyTo(_centers);
return !centers.empty();
}
CirclesGridFinderParameters parameters;
parameters.vertexPenalty = -0.6f;
parameters.vertexGain = 1;
parameters.existingVertexGain = 10000;
parameters.edgeGain = 1;
parameters.edgePenalty = -0.6f;
if(flags & cv::CALIB_CB_ASYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::ASYMMETRIC_GRID;
if(flags & cv::CALIB_CB_SYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::SYMMETRIC_GRID;
const int attempts = 2;
const size_t minHomographyPoints = 4;
cv::Mat H;
for (int i = 0; i < attempts; i++)
{
centers.clear();
CirclesGridFinder boxFinder(patternSize, points, parameters);
bool isFound = false;
//#define BE_QUIET 1
#if BE_QUIET
void* oldCbkData;
//cv::ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
try
{
isFound = boxFinder.findHoles();
}
catch (cv::Exception)
{
}
#if BE_QUIET
redirectError(oldCbk, oldCbkData);
#endif
if (isFound)
{
switch(parameters.gridType)
{
case CirclesGridFinderParameters::SYMMETRIC_GRID:
boxFinder.getHoles(centers);
break;
case CirclesGridFinderParameters::ASYMMETRIC_GRID:
boxFinder.getAsymmetricHoles(centers);
break;
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
}
if (i != 0)
{
cv::Mat orgPointsMat;
cv::transform(centers, orgPointsMat, H.inv());
cv::convertPointsFromHomogeneous(orgPointsMat, centers);
}
cv::Mat(centers).copyTo(_centers);
return true;
}
boxFinder.getHoles(centers);
if (i != attempts - 1)
{
if (centers.size() < minHomographyPoints)
break;
H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
}
}
cv::Mat(centers).copyTo(_centers);
return false;
}
开发者ID:abroun,项目名称:text_mapping,代码行数:97,代码来源:camera_calibration.cpp
示例6: calculate
void MapperGradProj::calculate(
const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
Mat gradx, grady, imgDiff;
Mat img2;
CV_DbgAssert(img1.size() == image2.size());
CV_DbgAssert(img1.channels() == image2.channels());
CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);
if(!res.empty()) {
// We have initial values for the registration: we move img2 to that initial reference
res->inverseWarp(image2, img2);
} else {
img2 = image2;
}
// Get gradient in all channels
gradient(img1, img2, gradx, grady, imgDiff);
// Matrices with reference frame coordinates
Mat grid_r, grid_c;
grid(img1, grid_r, grid_c);
// Calculate parameters using least squares
Matx<double, 8, 8> A;
Vec<double, 8> b;
// For each value in A, all the matrix elements are added and then the channels are also added,
// so we have two calls to "sum". The result can be found in the first element of the final
// Scalar object.
Mat xIx = grid_c.mul(gradx);
Mat xIy = grid_c.mul(grady);
Mat yIx = grid_r.mul(gradx);
Mat yIy = grid_r.mul(grady);
Mat Ix2 = gradx.mul(gradx);
Mat Iy2 = grady.mul(grady);
Mat xy = grid_c.mul(grid_r);
Mat IxIy = gradx.mul(grady);
Mat x2 = grid_c.mul(grid_c);
Mat y2 = grid_r.mul(grid_r);
Mat G = xIx + yIy;
Mat G2 = sqr(G);
Mat IxG = gradx.mul(G);
Mat IyG = grady.mul(G);
A(0, 0) = sum(sum(x2.mul(Ix2)))[0];
A(1, 0) = sum(sum(xy.mul(Ix2)))[0];
A(2, 0) = sum(sum(grid_c.mul(Ix2)))[0];
A(3, 0) = sum(sum(x2.mul(IxIy)))[0];
A(4, 0) = sum(sum(xy.mul(IxIy)))[0];
A(5, 0) = sum(sum(grid_c.mul(IxIy)))[0];
A(6, 0) = -sum(sum(x2.mul(IxG)))[0];
A(7, 0) = -sum(sum(xy.mul(IxG)))[0];
A(1, 1) = sum(sum(y2.mul(Ix2)))[0];
A(2, 1) = sum(sum(grid_r.mul(Ix2)))[0];
A(3, 1) = A(4, 0);
A(4, 1) = sum(sum(y2.mul(IxIy)))[0];
A(5, 1) = sum(sum(grid_r.mul(IxIy)))[0];
A(6, 1) = A(7, 0);
A(7, 1) = -sum(sum(y2.mul(IxG)))[0];
A(2, 2) = sum(sum(Ix2))[0];
A(3, 2) = A(5, 0);
A(4, 2) = A(5, 1);
A(5, 2) = sum(sum(IxIy))[0];
A(6, 2) = -sum(sum(grid_c.mul(IxG)))[0];
A(7, 2) = -sum(sum(grid_r.mul(IxG)))[0];
A(3, 3) = sum(sum(x2.mul(Iy2)))[0];
A(4, 3) = sum(sum(xy.mul(Iy2)))[0];
A(5, 3) = sum(sum(grid_c.mul(Iy2)))[0];
A(6, 3) = -sum(sum(x2.mul(IyG)))[0];
A(7, 3) = -sum(sum(xy.mul(IyG)))[0];
A(4, 4) = sum(sum(y2.mul(Iy2)))[0];
A(5, 4) = sum(sum(grid_r.mul(Iy2)))[0];
A(6, 4) = A(7, 3);
A(7, 4) = -sum(sum(y2.mul(IyG)))[0];
A(5, 5) = sum(sum(Iy2))[0];
A(6, 5) = -sum(sum(grid_c.mul(IyG)))[0];
A(7, 5) = -sum(sum(grid_r.mul(IyG)))[0];
A(6, 6) = sum(sum(x2.mul(G2)))[0];
A(7, 6) = sum(sum(xy.mul(G2)))[0];
A(7, 7) = sum(sum(y2.mul(G2)))[0];
// Upper half values (A is symmetric)
A(0, 1) = A(1, 0);
A(0, 2) = A(2, 0);
A(0, 3) = A(3, 0);
A(0, 4) = A(4, 0);
A(0, 5) = A(5, 0);
A(0, 6) = A(6, 0);
A(0, 7) = A(7, 0);
A(1, 2) = A(2, 1);
A(1, 3) = A(3, 1);
//.........这里部分代码省略.........
开发者ID:alfonsosanchezbeato,项目名称:OpenCV_reg,代码行数:101,代码来源:mappergradproj.cpp
示例7: stereoSelectorCallback
void stereoSelectorCallback(const sensor_msgs::Image::ConstPtr& image_ptr)
{
if(!capture_image)
return;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(image_ptr, sensor_msgs::image_encodings::RGB8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
cv::cvtColor(cv_ptr->image, input_image, CV_BGR2RGB);
string object_name;
//input_image = crop_hand(input_image);
float certainty = orbit->recognizeObject(input_image, object_name, Orbit::BAG_OF_WORDS_SVM);
/*
* Clean stabilizer if gesture has not been seen in a while
*/
ros::Time now = ros::Time::now();
ros::Duration diff_last_hand_received = now - last_hand_received;
last_hand_received = now;
if(diff_last_hand_received.toSec()>1)
{
for(unsigned int i = 0; i<stabilizer.size(); i++)
{
stabilizer[i] = 0;
}
}
/*
* Update stabilizer when the gesture is not recognized
*/
if(certainty<(float)certainty_threshold)
{
for(unsigned int i = 0; i<stabilizer.size()-1; i++)
{
if(stabilizer[i]>0)
stabilizer[i]--;
}
if(stabilizer[stabilizer.size()-1] < max_stabilizer)
stabilizer[stabilizer.size()-1]++;
return;
}
else
{
if(stabilizer[stabilizer.size()-1] >= 2)
stabilizer[stabilizer.size()-1]-=2;
else if(stabilizer[stabilizer.size()-1] == 1)
stabilizer[stabilizer.size()-1]--;
}
/*
* Update stabilizer when gesture is known
*/
for(unsigned int i = 0; i<stabilizer.size()-1; i++)
{
if(object_name == hands[i])
{
if(stabilizer[i] < max_stabilizer)
stabilizer[i]++;
}
else
{ if(stabilizer[i]>0)
stabilizer[i]--;
}
}
/*
* Print Stabilizer values
*/
for(unsigned int i = 0; i<stabilizer.size(); i++)
{
if(i<stabilizer.size()-1)
printf("%s: %d, ",hands[i].c_str(), stabilizer[i]);
else
//.........这里部分代码省略.........
开发者ID:Atom-machinerule,项目名称:OpenQbo,代码行数:101,代码来源:hand_gesture_node.cpp
示例8: backgroundSubstractionDetection
void backgroundSubstractionDetection(cv::Mat sequence, std::vector<cv::Rect> &detectedPedestrianFiltered, cv::Ptr<cv::BackgroundSubtractor> &pMOG2, trackingOption &tracking)
{
int threshold = 150;
cv::Mat mask;
cv::Mat sequenceGrayDiff;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point> > contours_poly;
std::vector<cv::Rect> detectedPedestrian;
pMOG2->apply(sequence,sequenceGrayDiff);
cv::threshold(sequenceGrayDiff, mask, threshold, 255, cv::THRESH_BINARY);
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(6,6)));
cv::dilate(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(25,55)));
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,6)));
/*
cv::Mat dist;
cv::distanceTransform(mask, dist, CV_DIST_L2, 3);
cv::normalize(dist, dist, 0, 1., cv::NORM_MINMAX);
cv::threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
cv::imshow("temp", dist);
*/
cv::findContours(mask, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0));
contours_poly.resize(contours.size());
detectedPedestrian.resize(contours.size());
for(size_t j=0;j<contours.size();j++)
{
cv::approxPolyDP(cv::Mat(contours[j]), contours_poly[j], 3, true);
detectedPedestrian[j] = cv::boundingRect(cv::Mat(contours_poly[j]));
//test
/*
double pix = 30;
if(detectedPedestrian[j].x >= pix)
detectedPedestrian[j].x -= pix;
else
detectedPedestrian[j].x = 0;
if((detectedPedestrian[j].x+detectedPedestrian[j].width) <= (sequence.cols-pix))
detectedPedestrian[j].width += pix;
else
detectedPedestrian[j].width = sequence.cols - detectedPedestrian[j].x;
if(detectedPedestrian[j].y >= pix)
detectedPedestrian[j].y -= pix;
else
detectedPedestrian[j].y = 0;
if((detectedPedestrian[j].y+detectedPedestrian[j].height) <= (sequence.rows-pix))
detectedPedestrian[j].height += pix;
else
detectedPedestrian[j].height = sequence.rows - detectedPedestrian[j].y;
*/
}
if(detectedPedestrian.size() != 0)
{
tracking = GOOD_FEATURES_TO_TRACK;
detectedPedestrianFiltered.clear();
detectedPedestrianFiltered.resize(detectedPedestrian.size());
detectedPedestrianFiltered = detectedPedestrian;
}
else
tracking = NOTHING_TO_TRACK;
}
开发者ID:Pandhariix,项目名称:Tracking,代码行数:72,代码来源:main.cpp
示例9: createGICPStructures
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
cv::Ptr<cv::FeatureDetector> detector,
cv::Ptr<cv::DescriptorExtractor> extractor,
cv::Ptr<cv::DescriptorMatcher> matcher,
const sensor_msgs::PointCloud2ConstPtr point_cloud,
const cv::Mat& detection_mask)
: id_(0),
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
gicp_initialized = false;
#endif
std::clock_t starttime=std::clock();
#ifdef USE_SIFT_GPU
SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
if (descriptors == NULL) {
ROS_FATAL("Can't run SiftGPU");
}
#else
ROS_FATAL_COND(detector.empty(), "No valid detector!");
detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif
ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
/*
if (id_ == 0)
cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
else{
*/
cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
// cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
//} */
// get pcl::Pointcloud to extract depthValues a pixel positions
std::clock_t starttime5=std::clock();
// TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
// which would slim down the Memory requirements
pcl::fromROSMsg(*point_cloud,pc_col);
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );
// project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
// removes also unused descriptors from the descriptors matrix
// build descriptor matrix
projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec
if (descriptors != NULL) delete descriptors;
#else
projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif
// projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
std::clock_t starttime4=std::clock();
createGICPStructures();
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif
std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
// ROS_INFO("Use extractor");
//cv::Mat topleft, topright;
//topleft = visual.colRange(0,visual.cols/2+50);
//topright= visual.colRange(visual.cols/2+50, visual.cols-1);
//std::vector<cv::KeyPoint> kp1, kp2;
//extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information
extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information
#endif
assert(feature_locations_2d_.size() == feature_locations_3d_.size());
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC <<"sec");
}
开发者ID:trantu,项目名称:fu_tools,代码行数:79,代码来源:node.cpp
示例10: generateVocabTrainData
/*
generate the data needed to train a codebook/vocabulary for bag-of-words methods
*/
int generateVocabTrainData(std::string trainPath,
std::string vocabTrainDataPath,
cv::Ptr<cv::FeatureDetector> &detector,
cv::Ptr<cv::DescriptorExtractor> &extractor)
{
//Do not overwrite any files
std::ifstream checker;
checker.open(vocabTrainDataPath.c_str());
if(checker.is_open()) {
std::cerr << vocabTrainDataPath << ": Training Data already present" <<
std::endl;
checker.close();
return -1;
}
//load training movie
cv::VideoCapture movie;
movie.open(trainPath);
if (!movie.isOpened()) {
std::cerr << trainPath << ": training movie not found" << std::endl;
return -1;
}
//extract data
std::cout << "Extracting Descriptors" << std::endl;
cv::Mat vocabTrainData;
cv::Mat frame, descs, feats;
std::vector<cv::KeyPoint> kpts;
std::cout.setf(std::ios_base::fixed);
std::cout.precision(0);
while(movie.read(frame)) {
//detect & extract features
detector->detect(frame, kpts);
extractor->compute(frame, kpts, descs);
//add all descriptors to the training data
vocabTrainData.push_back(descs);
//show progress
cv::drawKeypoints(frame, kpts, feats);
cv::imshow("Training Data", feats);
std::cout << 100.0*(movie.get(CV_CAP_PROP_POS_FRAMES) /
movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%. " <<
vocabTrainData.rows << " descriptors \r";
fflush(stdout);
if(cv::waitKey(5) == 27) {
cv::destroyWindow("Training Data");
std::cout << std::endl;
return -1;
}
}
cv::destroyWindow("Training Data");
std::cout << "Done: " << vocabTrainData.rows << " Descriptors" << std::endl;
//save the training data
cv::FileStorage fs;
fs.open(vocabTrainDataPath, cv::FileStorage::WRITE);
fs << "VocabTrainData" << vocabTrainData;
fs.release();
return 0;
}
开发者ID:Aleem21,项目名称:lsd_slam_noros,代码行数:72,代码来源:openFABMAPcli.cpp
示例11: close
void cv::gpu::VideoReader_GPU::open(const cv::Ptr<VideoSource>& source)
{
CV_Assert( !source.empty() );
close();
impl_.reset(new Impl(source));
}
开发者ID:Linyes,项目名称:opencv,代码行数:6,代码来源:video_reader.cpp
示例12: generateBOWImageDescs
/*
generate FabMap bag-of-words data : an image descriptor for each frame
*/
int generateBOWImageDescs(std::string dataPath,
std::string bowImageDescPath,
std::string vocabPath,
cv::Ptr<cv::FeatureDetector> &detector,
cv::Ptr<cv::DescriptorExtractor> &extractor,
int minWords)
{
cv::FileStorage fs;
//ensure not overwriting training data
std::ifstream checker;
checker.open(bowImageDescPath.c_str());
if(checker.is_open()) {
std::cerr << bowImageDescPath << ": FabMap Training/Testing Data "
"already present" << std::endl;
checker.close();
return -1;
}
//load vocabulary
std::cout << "Loading Vocabulary" << std::endl;
fs.open(vocabPath, cv::FileStorage::READ);
cv::Mat vocab;
fs["Vocabulary"] >> vocab;
if (vocab.empty()) {
std::cerr << vocabPath << ": Vocabulary not found" << std::endl;
return -1;
}
fs.release();
//use a FLANN matcher to generate bag-of-words representations
cv::Ptr<cv::DescriptorMatcher> matcher =
cv::DescriptorMatcher::create("FlannBased");
cv::BOWImgDescriptorExtractor bide(extractor, matcher);
bide.setVocabulary(vocab);
//load movie
cv::VideoCapture movie;
movie.open(dataPath);
if(!movie.isOpened()) {
std::cerr << dataPath << ": movie not found" << std::endl;
return -1;
}
//extract image descriptors
cv::Mat fabmapTrainData;
std::cout << "Extracting Bag-of-words Image Descriptors" << std::endl;
std::cout.setf(std::ios_base::fixed);
std::cout.precision(0);
std::ofstream maskw;
if(minWords) {
maskw.open(std::string(bowImageDescPath + "mask.txt").c_str());
}
cv::Mat frame, bow;
std::vector<cv::KeyPoint> kpts;
while(movie.read(frame)) {
detector->detect(frame, kpts);
bide.compute(frame, kpts, bow);
if(minWords) {
//writing a mask file
if(cv::countNonZero(bow) < minWords) {
//frame masked
maskw << "0" << std::endl;
} else {
//frame accepted
maskw << "1" << std::endl;
fabmapTrainData.push_back(bow);
}
} else {
fabmapTrainData.push_back(bow);
}
std::cout << 100.0 * (movie.get(CV_CAP_PROP_POS_FRAMES) /
movie.get(CV_CAP_PROP_FRAME_COUNT)) << "% \r";
fflush(stdout);
}
std::cout << "Done " << std::endl;
movie.release();
//save training data
fs.open(bowImageDescPath, cv::FileStorage::WRITE);
fs << "BOWImageDescs" << fabmapTrainData;
fs.release();
return 0;
}
开发者ID:Aleem21,项目名称:lsd_slam_noros,代码行数:97,代码来源:openFABMAPcli.cpp
示例13: disparityCallback
void disparityCallback(const stereo_msgs::DisparityImageConstPtr& msg)
{
ROS_DEBUG("Pub Threshold:%f ", LineMOD_Detector::pub_threshold);
if (!LineMOD_Detector::got_color)
{
return;
}
bool show_match_result = true;
cv_bridge::CvImagePtr disp_ptr;
try
{
disp_ptr = cv_bridge::toCvCopy(msg->image, "");
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("disp cv_bridge exception: %s", e.what());
return;
}
float f = msg->f;
float T = msg->T;
cv::Mat depth_img = (f*T*1000)/(disp_ptr->image).clone();
depth_img.convertTo(depth_img, CV_16U);
LineMOD_Detector::sources.push_back(LineMOD_Detector::color_img);
LineMOD_Detector::sources.push_back(depth_img);
// Perform matching
std::vector<cv::linemod::Match> matches;
std::vector<cv::String> class_ids;
std::vector<cv::Mat> quantized_images;
LineMOD_Detector::detector->match(sources, (float)LineMOD_Detector::matching_threshold, matches, class_ids, quantized_images);
LineMOD_Detector::num_classes = detector->numClasses();
ROS_DEBUG("Num Classes: %u", LineMOD_Detector::num_classes);
int classes_visited = 0;
std::set<std::string> visited;
ROS_DEBUG("Matches size: %u", (int)matches.size());
for (int i = 0; (i < (int)matches.size()) && (classes_visited < LineMOD_Detector::num_classes); ++i)
{
cv::linemod::Match m = matches[i];
ROS_DEBUG("Matching count: %u", i);
if (visited.insert(m.class_id).second)
{
++classes_visited;
if (show_match_result)
{
ROS_DEBUG("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
printf("Similarity: %5.1f%%; x: %3d; y: %3d; class: %s; template: %3d\n",
m.similarity, m.x, m.y, m.class_id.c_str(), m.template_id);
}
// Draw matching template
const std::vector<cv::linemod::Template>& templates = LineMOD_Detector::detector->getTemplates(m.class_id, m.template_id);
drawResponse(templates, LineMOD_Detector::num_modalities, LineMOD_Detector::display, cv::Point(m.x, m.y), LineMOD_Detector::detector->getT(0));
if (m.similarity > LineMOD_Detector::pub_threshold)
{
LineMOD_Detector::publishPoint(templates, m, depth_img, msg->header);
}
}
}
LineMOD_Detector::sources.clear();
}
开发者ID:contradict,项目名称:SampleReturn,代码行数:69,代码来源:linemod_detector.cpp
示例14: main
int main(int argc, char* argv[]) {
// welcome message
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
std::cout<<"* The retina model still have the following properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
// basic input arguments checking
if (argc<4)
{
help("bad number of parameter");
return -1;
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
sscanf(argv[2], "%d", &startFrameIndex);
sscanf(argv[3], "%d", &endFrameIndex);
std::string inputImageNamePrototype(argv[1]);
//////////////////////////////////////////////////////////////////////////////
// checking input media type (still image, video file, live video acquisition)
std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
if (inputImage.empty())
{
help("could not load image, program end");
return -1;
}
//////////////////////////////////////////////////////////////////////////////
// Program start in a try/catch safety context (Retina may throw errors)
try
{
/* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
* -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
*/
if (useLogSampling)
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::bioinspired::createRetina(inputImage.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers
cv::Mat retinaOutput_parvo;
/////////////////////////////////////////////
// prepare displays and interactions
histogramClippingValue=0; // default value... updated with interface slider
std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
cv::namedWindow(retinaInputCorrected,1);
cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
cv::namedWindow(RetinaParvoWindow, 1);
colorSaturationFactor=3;
cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
retinaHcellsGain=40;
cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
localAdaptation_photoreceptors=197;
localAdaptation_Gcells=190;
cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
/////////////////////////////////////////////
// apply default parameters of user interaction variables
callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
//.........这里部分代码省略.........
开发者ID:23pointsNorth,项目名称:opencv_contrib,< |
请发表评论