本文整理汇总了C++中eigen::Matrix4f类的典型用法代码示例。如果您正苦于以下问题:C++ Matrix4f类的具体用法?C++ Matrix4f怎么用?C++ Matrix4f使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Matrix4f类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: functor
template <typename PointSource, typename PointTarget> void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::estimateRigidTransformationBFGS (const PointCloudSource &cloud_src,
const std::vector<int> &indices_src,
const PointCloudTarget &cloud_tgt,
const std::vector<int> &indices_tgt,
Eigen::Matrix4f &transformation_matrix)
{
if (indices_src.size () < 4) // need at least 4 samples
{
PCL_THROW_EXCEPTION (NotEnoughPointsException,
"[pcl::GeneralizedIterativeClosestPoint::estimateRigidTransformationBFGS] Need at least 4 points to estimate a transform! Source and target have " << indices_src.size () << " points!");
return;
}
// Set the initial solution
Vector6d x = Vector6d::Zero ();
x[0] = transformation_matrix (0,3);
x[1] = transformation_matrix (1,3);
x[2] = transformation_matrix (2,3);
x[3] = atan2 (transformation_matrix (2,1), transformation_matrix (2,2));
x[4] = asin (-transformation_matrix (2,0));
x[5] = atan2 (transformation_matrix (1,0), transformation_matrix (0,0));
// Set temporary pointers
tmp_src_ = &cloud_src;
tmp_tgt_ = &cloud_tgt;
tmp_idx_src_ = &indices_src;
tmp_idx_tgt_ = &indices_tgt;
// Optimize using forward-difference approximation LM
const double gradient_tol = 1e-2;
OptimizationFunctorWithIndices functor(this);
BFGS<OptimizationFunctorWithIndices> bfgs (functor);
bfgs.parameters.sigma = 0.01;
bfgs.parameters.rho = 0.01;
bfgs.parameters.tau1 = 9;
bfgs.parameters.tau2 = 0.05;
bfgs.parameters.tau3 = 0.5;
bfgs.parameters.order = 3;
int inner_iterations_ = 0;
int result = bfgs.minimizeInit (x);
result = BFGSSpace::Running;
do
{
inner_iterations_++;
result = bfgs.minimizeOneStep (x);
if(result)
{
break;
}
result = bfgs.testGradient(gradient_tol);
} while(result == BFGSSpace::Running && inner_iterations_ < max_inner_iterations_);
if(result == BFGSSpace::NoProgress || result == BFGSSpace::Success || inner_iterations_ == max_inner_iterations_)
{
PCL_DEBUG ("[pcl::registration::TransformationEstimationBFGS::estimateRigidTransformation]");
PCL_DEBUG ("BFGS solver finished with exit code %i \n", result);
transformation_matrix.setIdentity();
applyState(transformation_matrix, x);
}
else
PCL_THROW_EXCEPTION(SolverDidntConvergeException,
"[pcl::" << getClassName () << "::TransformationEstimationBFGS::estimateRigidTransformation] BFGS solver didn't converge!");
}
开发者ID:2php,项目名称:pcl,代码行数:63,代码来源:gicp.hpp
示例2: while
template <typename PointSource, typename PointTarget> inline void
pcl::GeneralizedIterativeClosestPoint<PointSource, PointTarget>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
pcl::IterativeClosestPoint<PointSource, PointTarget>::initComputeReciprocal ();
using namespace std;
// Difference between consecutive transforms
double delta = 0;
// Get the size of the target
const size_t N = indices_->size ();
// Set the mahalanobis matrices to identity
mahalanobis_.resize (N, Eigen::Matrix3d::Identity ());
// Compute target cloud covariance matrices
if ((!target_covariances_) || (target_covariances_->empty ()))
{
target_covariances_.reset (new MatricesVector);
computeCovariances<PointTarget> (target_, tree_, *target_covariances_);
}
// Compute input cloud covariance matrices
if ((!input_covariances_) || (input_covariances_->empty ()))
{
input_covariances_.reset (new MatricesVector);
computeCovariances<PointSource> (input_, tree_reciprocal_, *input_covariances_);
}
base_transformation_ = guess;
nr_iterations_ = 0;
converged_ = false;
double dist_threshold = corr_dist_threshold_ * corr_dist_threshold_;
std::vector<int> nn_indices (1);
std::vector<float> nn_dists (1);
while(!converged_)
{
size_t cnt = 0;
std::vector<int> source_indices (indices_->size ());
std::vector<int> target_indices (indices_->size ());
// guess corresponds to base_t and transformation_ to t
Eigen::Matrix4d transform_R = Eigen::Matrix4d::Zero ();
for(size_t i = 0; i < 4; i++)
for(size_t j = 0; j < 4; j++)
for(size_t k = 0; k < 4; k++)
transform_R(i,j)+= double(transformation_(i,k)) * double(guess(k,j));
Eigen::Matrix3d R = transform_R.topLeftCorner<3,3> ();
for (size_t i = 0; i < N; i++)
{
PointSource query = output[i];
query.getVector4fMap () = guess * query.getVector4fMap ();
query.getVector4fMap () = transformation_ * query.getVector4fMap ();
if (!searchForNeighbors (query, nn_indices, nn_dists))
{
PCL_ERROR ("[pcl::%s::computeTransformation] Unable to find a nearest neighbor in the target dataset for point %d in the source!\n", getClassName ().c_str (), (*indices_)[i]);
return;
}
// Check if the distance to the nearest neighbor is smaller than the user imposed threshold
if (nn_dists[0] < dist_threshold)
{
Eigen::Matrix3d &C1 = (*input_covariances_)[i];
Eigen::Matrix3d &C2 = (*target_covariances_)[nn_indices[0]];
Eigen::Matrix3d &M = mahalanobis_[i];
// M = R*C1
M = R * C1;
// temp = M*R' + C2 = R*C1*R' + C2
Eigen::Matrix3d temp = M * R.transpose();
temp+= C2;
// M = temp^-1
M = temp.inverse ();
source_indices[cnt] = static_cast<int> (i);
target_indices[cnt] = nn_indices[0];
cnt++;
}
}
// Resize to the actual number of valid correspondences
source_indices.resize(cnt); target_indices.resize(cnt);
/* optimize transformation using the current assignment and Mahalanobis metrics*/
previous_transformation_ = transformation_;
//optimization right here
try
{
rigid_transformation_estimation_(output, source_indices, *target_, target_indices, transformation_);
/* compute the delta from this iteration */
delta = 0.;
for(int k = 0; k < 4; k++) {
for(int l = 0; l < 4; l++) {
double ratio = 1;
if(k < 3 && l < 3) // rotation part of the transform
ratio = 1./rotation_epsilon_;
else
ratio = 1./transformation_epsilon_;
double c_delta = ratio*fabs(previous_transformation_(k,l) - transformation_(k,l));
if(c_delta > delta)
delta = c_delta;
}
}
}
catch (PCLException &e)
//.........这里部分代码省略.........
开发者ID:2php,项目名称:pcl,代码行数:101,代码来源:gicp.hpp
示例3: getClassName
template <typename PointSource, typename PointTarget, typename FeatureT> void
pcl16::SampleConsensusInitialAlignment<PointSource, PointTarget, FeatureT>::computeTransformation (PointCloudSource &output, const Eigen::Matrix4f& guess)
{
if (!input_features_)
{
PCL16_ERROR ("[pcl16::%s::computeTransformation] ", getClassName ().c_str ());
PCL16_ERROR ("No source features were given! Call setSourceFeatures before aligning.\n");
return;
}
if (!target_features_)
{
PCL16_ERROR ("[pcl16::%s::computeTransformation] ", getClassName ().c_str ());
PCL16_ERROR ("No target features were given! Call setTargetFeatures before aligning.\n");
return;
}
if (!error_functor_)
{
error_functor_.reset (new TruncatedError (static_cast<float> (corr_dist_threshold_)));
}
std::vector<int> sample_indices (nr_samples_);
std::vector<int> corresponding_indices (nr_samples_);
PointCloudSource input_transformed;
float error, lowest_error (0);
final_transformation_ = guess;
int i_iter = 0;
if (!guess.isApprox(Eigen::Matrix4f::Identity (), 0.01f))
{ //If guess is not the Identity matrix we check it.
transformPointCloud (*input_, input_transformed, final_transformation_);
lowest_error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
i_iter = 1;
}
for (; i_iter < max_iterations_; ++i_iter)
{
// Draw nr_samples_ random samples
selectSamples (*input_, nr_samples_, min_sample_distance_, sample_indices);
// Find corresponding features in the target cloud
findSimilarFeatures (*input_features_, sample_indices, corresponding_indices);
// Estimate the transform from the samples to their corresponding points
transformation_estimation_->estimateRigidTransformation (*input_, sample_indices, *target_, corresponding_indices, transformation_);
// Tranform the data and compute the error
transformPointCloud (*input_, input_transformed, transformation_);
error = computeErrorMetric (input_transformed, static_cast<float> (corr_dist_threshold_));
// If the new error is lower, update the final transformation
if (i_iter == 0 || error < lowest_error)
{
lowest_error = error;
final_transformation_ = transformation_;
}
}
// Apply the final transformation
transformPointCloud (*input_, output, final_transformation_);
}
开发者ID:kfu,项目名称:metu-ros-pkg,代码行数:61,代码来源:ia_ransac.hpp
示例4: pairAlign
/** \brief Align a pair of PointCloud datasets and return the result
* \param cloud_src the source PointCloud
* \param cloud_tgt the target PointCloud
* \param output the resultant aligned source PointCloud
* \param final_transform the resultant transform between source and target
*/
void pairAlign (const PointCloud::Ptr cloud_src, const PointCloud::Ptr cloud_tgt, PointCloud::Ptr output, Eigen::Matrix4f &final_transform, bool downsample = false)
{
//
// Downsample for consistency and speed
// \note enable this for large datasets
PointCloud::Ptr src (new PointCloud);
PointCloud::Ptr tgt (new PointCloud);
pcl::VoxelGrid<PointT> grid;
if (downsample)
{
grid.setLeafSize (0.005, 0.005, 0.005);
grid.setInputCloud (cloud_src);
grid.filter (*src);
grid.setInputCloud (cloud_tgt);
grid.filter (*tgt);
}
else
{
src = cloud_src;
tgt = cloud_tgt;
}
// Compute surface normals and curvature
PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);
pcl::NormalEstimation<PointT, PointNormalT> norm_est;
pcl::search::KdTree<pcl::PointXYZRGBA>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZRGBA> ());
norm_est.setSearchMethod (tree);
norm_est.setKSearch (30);
norm_est.setInputCloud (src);
norm_est.compute (*points_with_normals_src);
pcl::copyPointCloud (*src, *points_with_normals_src);
norm_est.setInputCloud (tgt);
norm_est.compute (*points_with_normals_tgt);
pcl::copyPointCloud (*tgt, *points_with_normals_tgt);
//
// Instantiate our custom point representation (defined above) ...
MyPointRepresentation point_representation;
// ... and weight the 'curvature' dimension so that it is balanced against x, y, and z
float alpha[4] = {1.0, 1.0, 1.0, 1.0};
point_representation.setRescaleValues (alpha);
float n;
//float it;
//PCL_INFO("Set the max correspondence distance: ");
//std::cin>> n;
//PCL_INFO("Set the maximum number of iterations: ");
//std::cin>> it;
//PCL_INFO("Set the reciprocal correspondences: ");
//char d;
//std::cin >> d;
// Align
pcl::IterativeClosestPointWithNormals<PointNormalT, PointNormalT> reg; //IterativeClosestPointNonLinear
reg.setTransformationEpsilon (1e-8);
// Set the maximum distance between two correspondences (src<->tgt) to 10cm
// Note: adjust this based on the size of your datasets
reg.setMaxCorrespondenceDistance (0.1); //n
//reg.setEuclideanFitnessEpsilon (1e-15);
//reg.setRANSACIterations(200);
reg.setUseReciprocalCorrespondences(true);
reg.setRANSACOutlierRejectionThreshold(0.01);
// Set the point representation
reg.setPointRepresentation (boost::make_shared<const MyPointRepresentation> (point_representation));
reg.setInputSource (points_with_normals_src);
reg.setInputTarget (points_with_normals_tgt);
//
// Run the same optimization in a loop and visualize the results
Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity (), prev, targetToSource;
PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
reg.setMaximumIterations (2);
for (int i = 0; i < 100; ++i) //it
{
PCL_INFO ("Iteration Nr. %d.\n", i);
// save cloud for visualization purpose
points_with_normals_src = reg_result;
// Estimate
reg.setInputSource (points_with_normals_src);
reg.align (*reg_result);
//.........这里部分代码省略.........
开发者ID:shapelabVR,项目名称:AleStella,代码行数:101,代码来源:ICPgerarchico.cpp
示例5: sub_input
void
pcl::rec_3d_framework::LocalRecognitionPipeline<Distance, PointInT, FeatureT>::recognize ()
{
models_.reset (new std::vector<ModelT>);
transforms_.reset (new std::vector<Eigen::Matrix4f, Eigen::aligned_allocator<Eigen::Matrix4f> >);
PointInTPtr processed;
typename pcl::PointCloud<FeatureT>::Ptr signatures (new pcl::PointCloud<FeatureT> ());
//pcl::PointCloud<int> keypoints_input;
PointInTPtr keypoints_pointcloud;
if (signatures_ != 0 && processed_ != 0 && (signatures_->size () == keypoints_pointcloud->points.size ()))
{
keypoints_pointcloud = keypoints_input_;
signatures = signatures_;
processed = processed_;
std::cout << "Using the ISPK ..." << std::endl;
}
else
{
processed.reset( (new pcl::PointCloud<PointInT>));
if (indices_.size () > 0)
{
PointInTPtr sub_input (new pcl::PointCloud<PointInT>);
pcl::copyPointCloud (*input_, indices_, *sub_input);
estimator_->estimate (sub_input, processed, keypoints_pointcloud, signatures);
}
else
{
estimator_->estimate (input_, processed, keypoints_pointcloud, signatures);
}
processed_ = processed;
}
std::cout << "Number of keypoints:" << keypoints_pointcloud->points.size () << std::endl;
int size_feat = sizeof(signatures->points[0].histogram) / sizeof(float);
//feature matching and object hypotheses
std::map<std::string, ObjectHypothesis> object_hypotheses;
{
for (size_t idx = 0; idx < signatures->points.size (); idx++)
{
float* hist = signatures->points[idx].histogram;
std::vector<float> std_hist (hist, hist + size_feat);
flann_model histogram;
histogram.descr = std_hist;
flann::Matrix<int> indices;
flann::Matrix<float> distances;
nearestKSearch (flann_index_, histogram, 1, indices, distances);
//read view pose and keypoint coordinates, transform keypoint coordinates to model coordinates
Eigen::Matrix4f homMatrixPose;
getPose (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, homMatrixPose);
typename pcl::PointCloud<PointInT>::Ptr keypoints (new pcl::PointCloud<PointInT> ());
getKeypoints (flann_models_.at (indices[0][0]).model, flann_models_.at (indices[0][0]).view_id, keypoints);
PointInT view_keypoint = keypoints->points[flann_models_.at (indices[0][0]).keypoint_id];
PointInT model_keypoint;
model_keypoint.getVector4fMap () = homMatrixPose.inverse () * view_keypoint.getVector4fMap ();
typename std::map<std::string, ObjectHypothesis>::iterator it_map;
if ((it_map = object_hypotheses.find (flann_models_.at (indices[0][0]).model.id_)) != object_hypotheses.end ())
{
//if the object hypothesis already exists, then add information
ObjectHypothesis oh = (*it_map).second;
oh.correspondences_pointcloud->points.push_back (model_keypoint);
oh.correspondences_to_inputcloud->push_back (
pcl::Correspondence (static_cast<int> (oh.correspondences_pointcloud->points.size () - 1),
static_cast<int> (idx), distances[0][0]));
oh.feature_distances_->push_back (distances[0][0]);
}
else
{
//create object hypothesis
ObjectHypothesis oh;
typename pcl::PointCloud<PointInT>::Ptr correspondences_pointcloud (new pcl::PointCloud<PointInT> ());
correspondences_pointcloud->points.push_back (model_keypoint);
oh.model_ = flann_models_.at (indices[0][0]).model;
oh.correspondences_pointcloud = correspondences_pointcloud;
//last keypoint for this model is a correspondence the current scene keypoint
pcl::CorrespondencesPtr corr (new pcl::Correspondences ());
oh.correspondences_to_inputcloud = corr;
oh.correspondences_to_inputcloud->push_back (pcl::Correspondence (0, static_cast<int> (idx), distances[0][0]));
boost::shared_ptr < std::vector<float> > feat_dist (new std::vector<float>);
feat_dist->push_back (distances[0][0]);
oh.feature_distances_ = feat_dist;
object_hypotheses[oh.model_.id_] = oh;
}
}
//.........这里部分代码省略.........
开发者ID:2php,项目名称:pcl,代码行数:101,代码来源:local_recognizer.hpp
示例6: set_uniform_matrix
void set_uniform_matrix(GLuint programID, const char* NAME, const Eigen::Matrix4f& matrix) {
GLuint matrix_id = glGetUniformLocation(programID, NAME);
glUniformMatrix4fv(matrix_id, 1, GL_FALSE, matrix.data());
}
开发者ID:leonsenft,项目名称:OpenGP,代码行数:4,代码来源:main.cpp
示例7: qfinal
Eigen::Matrix4f p2w(Eigen::Matrix4f::Identity());
p2w.block<3,3>(0,0) = eigDx.transpose();
p2w.block<3,1>(0,3) = -1.f * (p2w.block<3,3>(0,0) * centroid.head<3>());
pcl::PointCloud<PointN> cPoints;
pcl::transformPointCloud(*final, cPoints, p2w);
PointN min_pt, max_pt;
pcl::getMinMax3D(cPoints, min_pt, max_pt);
const Eigen::Vector3f mean_diag = 0.5f*(max_pt.getVector3fMap() + min_pt.getVector3fMap());
// final transform
const Eigen::Quaternionf qfinal(eigDx);
tfinal = eigDx*mean_diag + centroid.head<3>();
std::cout << "tfinal: " << tfinal << std::endl;
pcl::PointXYZRGB minp, maxp;
Eigen::Matrix4f _tr = Eigen::Matrix4f::Identity();
_tr.topLeftCorner<3,3>() = qfinal.toRotationMatrix();
_tr.block<3,1>(0,3) = tfinal;
_x = (max_pt.x-min_pt.x);// * 0.5;
_y = (max_pt.y-min_pt.y);// * 0.5;
_z = (max_pt.z-min_pt.z);// * 0.5;
_tr = _tr.inverse().eval();
qfinal_r = qfinal;
pcl::PointIndices::Ptr object_indices (new pcl::PointIndices);
for (size_t i = 0; i < final->size(); i++){
PointN p = (*final)[i];
开发者ID:Birkehoj,项目名称:vis3,代码行数:31,代码来源:computeFeatures.hpp
示例8: solve
void GenericIKWindow::solve()
{
if (!kc || !ikSolver || !tcp)
{
return;
}
cout << "---- Solve IK ----" << endl;
IKSolver::CartesianSelection s = IKSolver::All;
if (UI.radioButton_Pos->isChecked())
{
s = IKSolver::Position;
}
//if (UI.radioButton_Ori->isChecked())
// s = IKSolver::Orientation;
//ikSolver->setVerbose(true);
Eigen::Matrix4f targetPose = box->getGlobalPose();
/*
if (kc && kc->getNode(kc->getSize() - 1)->isTranslationalJoint() && kc->getNode(kc->getSize() - 1)->getParent())
{
// setup gaze IK
float v = (kc->getNode(kc->getSize() - 1)->getParent()->getGlobalPose().block(0, 3, 3, 1) - targetPose.block(0, 3, 3, 1)).norm();
cout << "Setting initial value of translation joint to :" << v << endl;
ikSolver->setupTranslationalJoint(kc->getNode(kc->getSize() - 1), v);
kc->getNode(kc->getSize() - 1)->setJointValue(v);
}*/
clock_t startT = clock();
if (ikGazeSolver)
{
ikGazeSolver->solve(targetPose.block(0, 3, 3, 1));
}
else
{
ikSolver->solve(targetPose, s, 50);
}
clock_t endT = clock();
Eigen::Matrix4f actPose = tcp->getGlobalPose();
float errorPos = (actPose.block(0, 3, 3, 1) - targetPose.block(0, 3, 3, 1)).norm();
MathTools::Quaternion q1 = MathTools::eigen4f2quat(actPose);
MathTools::Quaternion q2 = MathTools::eigen4f2quat(targetPose);
MathTools::Quaternion d = getDelta(q1, q2);
float errorOri = fabs(180.0f - (d.w + 1.0f) * 90.0f);
float diffClock = (float)(((float)(endT - startT) / (float)CLOCKS_PER_SEC) * 1000.0f);
QString qd = "Time: ";
qd += QString::number(diffClock, 'f', 2);
qd += " ms";
UI.labelTime->setText(qd);
QString qd2 = "Error Pos: : ";
qd2 += QString::number(errorPos, 'f', 2);
qd2 += " mm";
UI.labelPos->setText(qd2);
QString qd3 = "Error Ori: : ";
qd3 += QString::number(errorOri, 'f', 2);
qd3 += " deg";
UI.labelOri->setText(qd3);
cout << "Joint values:" << endl;
std::vector<RobotNodePtr> nodes = kc->getAllRobotNodes();
for (size_t i = 0; i < nodes.size(); i++)
{
cout << nodes[i]->getJointValue() << endl;
}
/*
DifferentialIKPtr j(new DifferentialIK(kc));
j->setGoal(targetPose,RobotNodePtr(),IKSolver::All);
j->computeSteps(0.2f,0,50);
*/
exViewer->render();
cout << "---- END Solve IK ----" << endl;
}
开发者ID:gkalogiannis,项目名称:simox,代码行数:81,代码来源:GenericIKWindow.cpp
示例9: synthesizeInfo
void IndexMap::synthesizeInfo(const Eigen::Matrix4f & pose,
const std::pair<GLuint, GLuint> & model,
const float depthCutoff,
const float confThreshold)
{
glEnable(GL_PROGRAM_POINT_SIZE);
glEnable(GL_POINT_SPRITE);
infoFrameBuffer.Bind();
glPushAttrib(GL_VIEWPORT_BIT);
glViewport(0, 0, infoRenderBuffer.width, infoRenderBuffer.height);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
combinedProgram->Bind();
Eigen::Matrix4f t_inv = pose.inverse();
Eigen::Vector4f cam(Intrinsics::getInstance().cx(),
Intrinsics::getInstance().cy(),
Intrinsics::getInstance().fx(),
Intrinsics::getInstance().fy());
combinedProgram->setUniform(Uniform("t_inv", t_inv));
combinedProgram->setUniform(Uniform("cam", cam));
combinedProgram->setUniform(Uniform("maxDepth", depthCutoff));
combinedProgram->setUniform(Uniform("confThreshold", confThreshold));
combinedProgram->setUniform(Uniform("cols", (float)Resolution::getInstance().cols()));
combinedProgram->setUniform(Uniform("rows", (float)Resolution::getInstance().rows()));
combinedProgram->setUniform(Uniform("time", 0));
combinedProgram->setUniform(Uniform("maxTime", std::numeric_limits<int>::max()));
combinedProgram->setUniform(Uniform("timeDelta", std::numeric_limits<int>::max()));
glBindBuffer(GL_ARRAY_BUFFER, model.first);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 1));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, Vertex::SIZE, reinterpret_cast<GLvoid*>(sizeof(Eigen::Vector4f) * 2));
glDrawTransformFeedback(GL_POINTS, model.second);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, 0);
infoFrameBuffer.Unbind();
combinedProgram->Unbind();
glDisable(GL_PROGRAM_POINT_SIZE);
glDisable(GL_POINT_SPRITE);
glPopAttrib();
glFinish();
}
开发者ID:HarveyLiuFly,项目名称:ElasticFusion,代码行数:66,代码来源:IndexMap.cpp
示例10: given
template <typename PointT> bool
pcl::SampleConsensusModelSphere<PointT>::computeModelCoefficients (
const std::vector<int> &samples, Eigen::VectorXf &model_coefficients)
{
// Need 4 samples
if (samples.size () != 4)
{
PCL_ERROR ("[pcl::SampleConsensusModelSphere::computeModelCoefficients] Invalid set of samples given (%zu)!\n", samples.size ());
return (false);
}
Eigen::Matrix4f temp;
for (int i = 0; i < 4; i++)
{
temp (i, 0) = input_->points[samples[i]].x;
temp (i, 1) = input_->points[samples[i]].y;
temp (i, 2) = input_->points[samples[i]].z;
temp (i, 3) = 1;
}
float m11 = temp.determinant ();
if (m11 == 0)
return (false); // the points don't define a sphere!
for (int i = 0; i < 4; ++i)
temp (i, 0) = (input_->points[samples[i]].x) * (input_->points[samples[i]].x) +
(input_->points[samples[i]].y) * (input_->points[samples[i]].y) +
(input_->points[samples[i]].z) * (input_->points[samples[i]].z);
float m12 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 1) = temp (i, 0);
temp (i, 0) = input_->points[samples[i]].x;
}
float m13 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 2) = temp (i, 1);
temp (i, 1) = input_->points[samples[i]].y;
}
float m14 = temp.determinant ();
for (int i = 0; i < 4; ++i)
{
temp (i, 0) = temp (i, 2);
temp (i, 1) = input_->points[samples[i]].x;
temp (i, 2) = input_->points[samples[i]].y;
temp (i, 3) = input_->points[samples[i]].z;
}
float m15 = temp.determinant ();
// Center (x , y, z)
model_coefficients.resize (4);
model_coefficients[0] = 0.5f * m12 / m11;
model_coefficients[1] = 0.5f * m13 / m11;
model_coefficients[2] = 0.5f * m14 / m11;
// Radius
model_coefficients[3] = sqrtf (
model_coefficients[0] * model_coefficients[0] +
model_coefficients[1] * model_coefficients[1] +
model_coefficients[2] * model_coefficients[2] - m15 / m11);
return (true);
}
开发者ID:Bastl34,项目名称:PCL,代码行数:65,代码来源:sac_model_sphere.hpp
示例11: Iterate_ICP
void ICP::Iterate_ICP(const CameraData& d_newDepthData,
const PointInfo& d_renderedVertexMap,
float* d_currentTransform) // This is input & output parameter on cuda
{
//static const Eigen::Matrix4f m_identity4f = Eigen::Matrix4f::Identity();
// Eigen::Matrix4f m_identity4f;
// m_identity4f.setIdentity();
cutilSafeCall( cudaMemcpy(incrementalTransform, m_identity4f.data(), 16*sizeof(float), cudaMemcpyHostToDevice) );
// d_currentTransform currently holds the previous transformation matrix. Initialize new transformation matrix to the previous
cutilSafeCall( cudaMemcpy(newTransform, d_currentTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice) );
int iterationCounter = 0;
cutilSafeCall(cudaMemcpy(bestTransform, newTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice));
// While loop - until we get minimum transformation matrix:
do {
iterationCounter++;
// Copy current transform to its matching field.
cutilSafeCall( cudaMemcpy(invLastTransform_host, newTransform, 16*sizeof(float), cudaMemcpyDeviceToHost) );
//cutilSafeCall( cudaMemcpy(invLastTransformMatrix.data(), newTransform, 16*sizeof(float), cudaMemcpyDeviceToHost) );
cudaDeviceSynchronize();
// Calculate inverse of current transformation matrix and store in invLastTransformMatrix.
Eigen::Map<Eigen::Matrix<float, 4, 4, Eigen::RowMajor> > invLastTransformMatrix(invLastTransform_host); // TODO: COLMAJOR IS ALSO WORKING WELL. CHECK OUT. MAYBE BUGGY
invLastTransformMatrix = invLastTransformMatrix.inverse();
// Copy current inverse transformation matrix into cuda.
cutilSafeCall( cudaMemcpy(invLastTransform, invLastTransform_host, 16*sizeof(float), cudaMemcpyHostToDevice) );
cudaDeviceSynchronize();
// Find corresponding points and build A and b matrices.
FindCorresponding (d_newDepthData, d_renderedVertexMap);
//////calculating Xopt = (At * A)^(-1) * At * b using tree reduction//////
cutilSafeCall(cudaDeviceSynchronize());
ClearHostMatrices();
cutilSafeCall(cudaMemcpy(AtA_host, AtA, NUM_VARS*NUM_VARS*m_numCorrespondenceBlocks*sizeof(float), cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(Atb_host, Atb, NUM_VARS*m_numCorrespondenceBlocks*sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < m_numCorrespondenceBlocks ; ++i) {
for (int j = 0; j < NUM_VARS*NUM_VARS; ++j) {
AtA_sum[j] += AtA_host[i * NUM_VARS*NUM_VARS + j];
}
for (int j = 0; j < NUM_VARS; ++j) {
Atb_sum[j] += Atb_host[i * NUM_VARS + j];
}
}
for (int i = 0; i < NUM_VARS; ++i) {
for (int j = 0; j < i; ++j) {
AtA_sum[i * NUM_VARS + j] = AtA_sum[j * NUM_VARS + i];
}
}
Eigen::Matrix<float, NUM_VARS, NUM_VARS, Eigen::RowMajor> AtA_eigen;
Eigen::Matrix<float, NUM_VARS, 1> Atb_eigen;
for (int i = 0; i < NUM_VARS; ++i) {
Atb_eigen(i) = Atb_sum[i];
for (int j = 0; j < NUM_VARS; ++j) {
AtA_eigen(i, j) = AtA_sum[i * NUM_VARS + j];
}
}
float det = AtA_eigen.determinant();
if (isnan(det) || det == 0.f || fabs(det) < _EPSILON_) {
// TODO - PROBLEM! MATRIX IS SINGULAR. HANDLE
cutilSafeCall( cudaMemcpy(newTransform, d_currentTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice) );
cout << " No transform found." << endl;
break;
}
Eigen::Matrix<float, NUM_VARS, 1> parameters = AtA_eigen.llt().solve(Atb_eigen).cast<float>();
for (int i = 0; i < 3; ++i) {
xOpt_host[i] = -parameters(i); // TODO - CONSIDER WRITE -parameters(i) (minus) like in KinectShape
}
for (int i = 3; i < 6; ++i) { // Angles are negated, translations are positive
xOpt_host[i] = parameters(i);
}
cutilSafeCall(cudaMemcpy(xOpt, xOpt_host, NUM_VARS*sizeof(float), cudaMemcpyHostToDevice));
BuildNewTransformMatrix();
CameraHandler::updateCameraData(m_gridSize, m_blockSize, d_newDepthData, NULL, newTransform, NULL, NULL);
cutilSafeCall(cudaDeviceSynchronize());
} while (iterationCounter < m_maxIterations);
cutilSafeCall(cudaMemcpy(d_currentTransform, newTransform, 16*sizeof(float), cudaMemcpyDeviceToDevice));
// The output is in @Param d_currentTransform
}
开发者ID:gilbs10,项目名称:OpenFusion,代码行数:99,代码来源:ICP.cpp
示例12: pairAlign
void pairAlign( const PointCloud::Ptr cloud_src , const PointCloud::Ptr cloud_tgt , Eigen::Matrix4f &final_transform )
{
PointCloud::Ptr src (new PointCloud);
PointCloud::Ptr tgt (new PointCloud);
*src = *cloud_src;
*tgt = *cloud_tgt;
// Compute surface normals and curvature
PointCloudWithNormals::Ptr points_with_normals_src (new PointCloudWithNormals);
PointCloudWithNormals::Ptr points_with_normals_tgt (new PointCloudWithNormals);
pcl::NormalEstimation<PointT, PointNormalT> norm_est;
pcl::search::KdTree<PointT>::Ptr tree (new pcl::search::KdTree<PointT> ());
norm_est.setSearchMethod (tree);
norm_est.setKSearch (30);
norm_est.setInputCloud (src);
norm_est.compute (*points_with_normals_src);
pcl::copyPointCloud (*src, *points_with_normals_src);
norm_est.setInputCloud (tgt);
norm_est.compute (*points_with_normals_tgt);
pcl::copyPointCloud (*tgt, *points_with_normals_tgt);
// Instantiate our custom point representation (defined above) ...
MyPointRepresentation point_representation;
// ... and weight the 'curvature' dimension so that it is balanced against x, y, and z
float alpha[4] = {1.0, 1.0, 1.0, 1.0};
point_representation.setRescaleValues (alpha);
//
// Align
pcl::IterativeClosestPointNonLinear<PointNormalT, PointNormalT> reg;
reg.setTransformationEpsilon (1e-3);
// Set the maximum distance between two correspondences (src<->tgt) to 10cm
// Note: adjust this based on the size of your datasets
reg.setMaxCorrespondenceDistance (0.5);
// Set the point representation
reg.setPointRepresentation (boost::make_shared<const MyPointRepresentation> (point_representation));
reg.setInputSource (points_with_normals_src);
reg.setInputTarget (points_with_normals_tgt);
//
// Run the same optimization in a loop and visualize the results
Eigen::Matrix4f Ti = Eigen::Matrix4f::Identity (), prev, targetToSource;
PointCloudWithNormals::Ptr reg_result = points_with_normals_src;
reg.setMaximumIterations (2);
//originally iterates up to 30
for (int i = 0; i < 10; ++i)
{
// save cloud for visualization purpose
points_with_normals_src = reg_result;
// Estimate
reg.setInputSource (points_with_normals_src);
reg.align (*reg_result);
//accumulate transformation between each Iteration
Ti = reg.getFinalTransformation () * Ti;
//if the difference between this transformation and the previous one
//is smaller than the threshold, refine the process by reducing
//the maximal correspondence distance
if (fabs ((reg.getLastIncrementalTransformation () - prev).sum ()) < reg.getTransformationEpsilon ())
{
reg.setMaxCorrespondenceDistance (reg.getMaxCorrespondenceDistance () - 0.001);
}
prev = reg.getLastIncrementalTransformation ();
}
//
// Get the transformation from target to source
targetToSource = Ti.inverse();
final_transform = targetToSource;
}
开发者ID:ProjectIRoniC,项目名称:OpenPointMesh,代码行数:85,代码来源:CloudRegistrationFunctions.cpp
示例13: printHelp
/* ---[ */
int
main (int argc, char** argv)
{
print_info ("Transform a cloud. For more information, use: %s -h\n", argv[0]);
bool help = false;
parse_argument (argc, argv, "-h", help);
if (argc < 3 || help)
{
printHelp (argc, argv);
return (-1);
}
// Parse the command line arguments for .pcd files
std::vector<int> p_file_indices;
p_file_indices = parse_file_extension_argument (argc, argv, ".pcd");
if (p_file_indices.size () != 2)
{
print_error ("Need one input PCD file and one output PCD file to continue.\n");
return (-1);
}
// Initialize the transformation matrix
Eigen::Matrix4f tform;
tform.setIdentity ();
// Command line parsing
float dx, dy, dz;
std::vector<float> values;
if (parse_3x_arguments (argc, argv, "-trans", dx, dy, dz) > -1)
{
tform (0, 3) = dx;
tform (1, 3) = dy;
tform (2, 3) = dz;
}
if (parse_x_arguments (argc, argv, "-quat", values) > -1)
{
if (values.size () == 4)
{
const float& x = values[0];
const float& y = values[1];
const float& z = values[2];
const float& w = values[3];
tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::Quaternionf (w, x, y, z));
}
else
{
print_error ("Wrong number of values given (%zu): ", values.size ());
print_error ("The quaternion specified with -quat must contain 4 elements (w,x,y,z).\n");
}
}
if (parse_x_arguments (argc, argv, "-axisangle", values) > -1)
{
if (values.size () == 4)
{
const float& ax = values[0];
const float& ay = values[1];
const float& az = values[2];
const float& theta = values[3];
tform.topLeftCorner (3, 3) = Eigen::Matrix3f (Eigen::AngleAxisf (theta, Eigen::Vector3f (ax, ay, az)));
}
else
{
print_error ("Wrong number of values given (%zu): ", values.size ());
print_error ("The rotation specified with -axisangle must contain 4 elements (ax,ay,az,theta).\n");
}
}
if (parse_x_arguments (argc, argv, "-matrix", values) > -1)
{
if (values.size () == 9 || values.size () == 16)
{
int n = values.size () == 9 ? 3 : 4;
for (int r = 0; r < n; ++r)
for (int c = 0; c < n; ++c)
tform (r, c) = values[n*r+c];
}
else
{
print_error ("Wrong number of values given (%zu): ", values.size ());
print_error ("The transformation specified with -matrix must be 3x3 (9) or 4x4 (16).\n");
}
}
// Load the first file
sensor_msgs::PointCloud2::Ptr cloud (new sensor_msgs::PointCloud2);
if (!loadCloud (argv[p_file_indices[0]], *cloud))
return (-1);
// Apply the transform
sensor_msgs::PointCloud2 output;
compute (cloud, output, tform);
// Check if a scaling parameter has been given
double divider[3];
if (parse_3x_arguments (argc, argv, "-scale", divider[0], divider[1], divider[2]) > -1)
//.........这里部分代码省略.........
开发者ID:kalectro,项目名称:pcl_groovy,代码行数:101,代码来源:transform_point_cloud.cpp
示例14: volumetric_knt_cuda
int volumetric_knt_cuda(int argc, char **argv)
{
Timer timer;
int vol_size = vx_count * vx_size;
float half_vol_size = vol_size * 0.5f;
Eigen::Vector3i voxel_size(vx_size, vx_size, vx_size);
Eigen::Vector3i volume_size(vol_size, vol_size, vol_size);
Eigen::Vector3i voxel_count(vx_count, vx_count, vx_count);
int total_voxels = voxel_count.x() * voxel_count.y() * voxel_count.z();
std::cout << std::fixed
<< "Voxel Count : " << voxel_count.transpose() << std::endl
<< "Voxel Size : " << voxel_size.transpose() << std::endl
<< "Volume Size : " << volume_size.transpose() << std::endl
<< "Total Voxels : " << total_voxels << std::endl
<< std::endl;
timer.start();
KinectFrame knt(filepath);
timer.print_interval("Importing knt frame : ");
Eigen::Affine3f grid_affine = Eigen::Affine3f::Identity();
grid_affine.translate(Eigen::Vector3f(0, 0, half_vol_size));
grid_affine.scale(Eigen::Vector3f(1, 1, 1)); // z is negative inside of screen
Eigen::Matrix4f grid_matrix = grid_affine.matrix();
float knt_near_plane = 0.1f;
float knt_far_plane = 10240.0f;
Eigen::Matrix4f projection = perspective_matrix<float>(KINECT_V2_FOVY, KINECT_V2_DEPTH_ASPECT_RATIO, knt_near_plane, knt_far_plane);
Eigen::Matrix4f projection_inverse = projection.inverse();
Eigen::Matrix4f view_matrix = Eigen::Matrix4f::Identity();
std::vector<float4> vertices(knt.depth.size(), make_float4(0, 0, 0, 1));
std::vector<float4> normals(knt.depth.size(), make_float4(0, 0, 1, 1));
std::vector<Eigen::Vector2f> grid_voxels_params(total_voxels);
//
// setup image parameters
//
unsigned short image_width = KINECT_V2_DEPTH_WIDTH;
unsigned short image_height = image_width / aspect_ratio;
QImage img(image_width, image_height, QImage::Format::Format_RGBA8888);
img.fill(Qt::GlobalColor::gray);
uchar4* image_data = (uchar4*)img.bits();
//float4* debug_buffer = new float4[image_width * image_height];
//memset(debug_buffer, 0, image_width * image_height * sizeof(float4));
knt_cuda_setup(
vx_count, vx_size,
grid_matrix.data(),
projection.data(),
projection_inverse.data(),
*grid_voxels_params.data()->data(),
KINECT_V2_DEPTH_WIDTH,
KINECT_V2_DEPTH_HEIGHT,
KINECT_V2_DEPTH_MIN,
KINECT_V2_DEPTH_MAX,
vertices.data()[0],
normals.data()[0],
image_width,
image_height
);
timer.start();
knt_cuda_allocate();
knt_cuda_init_grid();
timer.print_interval("Allocating gpu : ");
timer.start();
knt_cuda_copy_host_to_device();
knt_cuda_copy_depth_buffer_to_device(knt.depth.data());
timer.print_interval("Copy host to device : ");
timer.start();
knt_cuda_normal_estimation();
timer.print_interval("Normal estimation : ");
timer.start();
knt_cuda_update_grid(view_matrix.data());
timer.print_interval("Update grid : ");
timer.start();
knt_cuda_grid_params_copy_device_to_host();
knt_cuda_copy_device_to_host();
timer.print_interval("Copy device to host : ");
//
// setup camera parameters
//
timer.start();
Eigen::Affine3f camera_to_world = Eigen::Affine3f::Identity();
float cam_z = -half_vol_size;
camera_to_world.scale(Eigen::Vector3f(1, 1, -1));
camera_to_world.translate(Eigen::Vector3f(half_vol_size, half_vol_size, cam_z));
//.........这里部分代码省略.........
开发者ID:diegomazala,项目名称:QtKinect,代码行数:101,代码来源:Raycasting_gpu.cpp
示例15: ComputePnP
void ComputePnP(const char* folder_name,int start_idx,int total_num,int jump_num){//Compute two-view PnP
int img_idx = start_idx;
int img_num = total_num;
int img_num2 = jump_num;
char imgname[64];
char pcdname[64];
char imgname2[64];
char pcdname2[64];
ofstream out_pose,out_pose2;
out_pose.open("RT.txt");
out_pose2.open("invRT.txt");
cv::Mat colorImage_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
cv::Mat pointCloud_XYZforRGB_1(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));
cv::Mat colorImage_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_8UC4);
cv::Mat pointCloud_XYZforRGB_2(KINECT_IMAGE_HEGIHT,KINECT_IMAGE_WIDTH,CV_32FC3,cv::Scalar::all(0));
cv::Mat img1,img2;
/////////////////////PCL objects//////////////////////////
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1 (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2 (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_1f (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::PointCloud<pcl::PointXYZRGBA>::Ptr cloud_2f (new pcl::PointCloud<pcl::PointXYZRGBA>);
pcl::visualization::PCLVisualizer vislm;
////////////////////Find cv::Matched points between two RGB images////////////////////////
ifstream cam_in("CamPara.txt");
cam_in>>CamIntrinsic[0][0]>>CamIntrinsic[0][1]>>CamIntrinsic[0][2]
>>CamIntrinsic[1][0]>>CamIntrinsic[1][1]>>CamIntrinsic[1][2]
>>CamIntrinsic[2][0]>>CamIntrinsic[2][1]>>CamIntrinsic[2][2];
cam_in.close();
const cv::Mat Camera_Matrix(3,3,CV_64F,CamIntrinsic);
const cv::Mat disCoef(1,5,CV_64F,DisCoef);
cv::Mat img_Matches;
int numKeyPoints = 400;
int numKeyPoints2 = 400;
RobustMatcher rMatcher;
cv::Ptr<cv::FeatureDetector> detector = new cv::FastFeatureDetector(0);
cv::Ptr<cv::FeatureDetector> detector2 = new cv::FastFeatureDetector(0);
cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor();
cv::Ptr<cv::DescriptorMatcher> Matcher= new cv::BFMatcher(cv::NORM_HAMMING);
rMatcher.setFeatureDetector(detector);
rMatcher.setDescriptorExtractor(extractor);
rMatcher.setDescriptorMatcher(Matcher);
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
std::vector<cv::KeyPoint> img2_keypoints;
cv::Mat img2_descriptors;
std::vector<cv::DMatch > Matches;
//////////////////////PCL rigid motion estimation///////////////////////////
Eigen::Matrix4f TotaltransforMation = Eigen::Matrix4f::Identity();
Eigen::Matrix4f Ti = Eigen::Matrix4f::Identit
|
请发表评论