本文整理汇总了C++中arma::Mat类的典型用法代码示例。如果您正苦于以下问题:C++ Mat类的具体用法?C++ Mat怎么用?C++ Mat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Mat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: MapTokens
void MapTokens(const std::vector<std::string>& tokens,
size_t& row,
arma::Mat<eT>& matrix,
MapType& maps,
std::vector<Datatype>& types)
{
// MissingPolicy allows double type matrix only, because it uses NaN.
static_assert(std::is_same<eT, double>::value, "You must use double type "
" matrix in order to apply MissingPolicy");
std::stringstream token;
for (size_t i = 0; i != tokens.size(); ++i)
{
token.str(tokens[i]);
token>>matrix.at(row, i);
// if the token is not number, map it.
// or if token is a number, but is included in the missingSet, map it.
if (token.fail() || missingSet.find(tokens[i]) != std::end(missingSet))
{
const eT val = static_cast<eT>(this->MapString(tokens[i], row, maps,
types));
matrix.at(row, i) = val;
}
token.clear();
}
}
开发者ID:BangLiu,项目名称:mlpack,代码行数:26,代码来源:missing_policy.hpp
示例2: Convolution
static typename std::enable_if<
std::is_same<Border, ValidConvolution>::value, void>::type
Convolution(const arma::Mat<eT>& input,
const arma::Mat<eT>& filter,
arma::Mat<eT>& output,
const size_t dW = 1,
const size_t dH = 1)
{
output = arma::zeros<arma::Mat<eT> >((input.n_rows - filter.n_rows + 1) /
dW, (input.n_cols - filter.n_cols + 1) / dH);
// It seems to be about 3.5 times faster to use pointers instead of
// filter(ki, kj) * input(leftInput + ki, topInput + kj) and output(i, j).
eT* outputPtr = output.memptr();
for (size_t j = 0; j < output.n_cols; ++j)
{
for (size_t i = 0; i < output.n_rows; ++i, outputPtr++)
{
const eT* kernelPtr = filter.memptr();
for (size_t kj = 0; kj < filter.n_cols; ++kj)
{
const eT* inputPtr = input.colptr(kj + j * dW) + i * dH;
for (size_t ki = 0; ki < filter.n_rows; ++ki, ++kernelPtr, ++inputPtr)
*outputPtr += *kernelPtr * (*inputPtr);
}
}
}
}
开发者ID:sbrodehl,项目名称:mlpack,代码行数:29,代码来源:naive_convolution.hpp
示例3: convert_matrix
void
convert_matrix(const arma::Mat<Tfrom>& densemat,
csc_matrix<Tto, Idxto>& cscmat) {
Idxto m = densemat.num_rows();
Idxto n = densemat.num_cols();
arma::Col<Idxto> new_col_offsets(n + 1, 0);
arma::Col<Idxto> new_row_indices;
arma::Col<Tto> new_values;
// Compute column sizes, and fill vectors of row indices and values.
for (Idxto j(0); j < n; ++j) {
for (Idxto i(0); i < m; ++i) {
if (densemat(i,j) != 0) {
++new_col_offsets[j];
new_row_indices.push_back(i);
new_values.push_back(densemat(i,j));
}
}
}
// Compute offsets.
for (Idxto i(0); i < n; ++i)
new_col_offsets[i+1] += new_col_offsets[i];
for (Idxto i(n); i > 0; --i)
new_col_offsets[i] = new_col_offsets[i-1];
new_col_offsets[0] = 0;
// Copy data over
cscmat.reset_nocopy(m, n, new_col_offsets, new_row_indices, new_values);
}
开发者ID:vdeepak13,项目名称:sill,代码行数:30,代码来源:csc_matrix.hpp
示例4: gpu_train_batch
inline void gpu_train_batch(FeedForward_Network<activation, error>& network,
arma::Mat<float> inputs, arma::Mat<float> targets, int batch_size, float learning_rate = 0.8f, float momentum = 0.8f) {
network.resize_activation(batch_size);
Raw_FeedForward_Network<activation, error> raw_net = convert_to_raw(network);
Raw_FeedForward_Network<activation, error> * d_network = network_to_gpu(raw_net);
int batches_in_train = targets.n_rows/batch_size - 1;
for (int i = 0; i < batches_in_train; ++i) {
arma::Mat<float> input_slice = inputs.rows(i*batch_size, (i+1) * batch_size - 1);
Raw_Matrix raw_input = to_raw(input_slice);
Raw_Matrix * d_input = matrix_to_gpu(raw_input);
int num_trials = input_slice.n_rows;
calculate_activation(num_trials, network.layer_sizes, d_network, d_input);
//TODO make this memory shared as to not realloc
free_gpu_matrix(d_input);
arma::Mat<float> targets_slice = targets.rows(i*batch_size, (i+1) * batch_size - 1);
Raw_Matrix raw_targets = to_raw(targets_slice);
Raw_Matrix * d_targets = matrix_to_gpu(raw_targets);
backprop(num_trials, network.layer_sizes, d_network, d_targets, learning_rate, momentum);
free_gpu_matrix(d_targets);
}
network_to_cpu_free(d_network, raw_net);
update_from_raw(network, raw_net);
}
开发者ID:Spottybadrabbit,项目名称:Neural-Net-Experiments,代码行数:32,代码来源:net_gpu.hpp
示例5: Impute
/**
* Impute function searches through the input looking for mappedValue and
* remove the whole row or column. The result is overwritten to the input.
*
* @param input Matrix that contains mappedValue.
* @param mappedValue Value that the user wants to get rid of.
* @param dimension Index of the dimension of the mappedValue.
* @param columnMajor State of whether the input matrix is columnMajor or not.
*/
void Impute(arma::Mat<T>& input,
const T& mappedValue,
const size_t dimension,
const bool columnMajor = true)
{
std::vector<arma::uword> colsToKeep;
if (columnMajor)
{
for (size_t i = 0; i < input.n_cols; ++i)
{
if (!(input(dimension, i) == mappedValue ||
std::isnan(input(dimension, i))))
{
colsToKeep.push_back(i);
}
}
input = input.cols(arma::uvec(colsToKeep));
}
else
{
for (size_t i = 0; i < input.n_rows; ++i)
{
if (!(input(i, dimension) == mappedValue ||
std::isnan(input(i, dimension))))
{
colsToKeep.push_back(i);
}
}
input = input.rows(arma::uvec(colsToKeep));
}
}
开发者ID:YaweiZhao,项目名称:mlpack,代码行数:41,代码来源:listwise_deletion.hpp
示例6: save
void
save(OutputArchive& ar, const arma::Mat<T>& mat, const unsigned int version) {
size_t size = mat.size();
ar & mat.n_cols;
ar & mat.n_rows;
ar & make_array(mat.memptr(), size);
}
开发者ID:dapicester,项目名称:image-search,代码行数:9,代码来源:serialization_matrix.hpp
示例7: compute_column_rms
arma::Col<double> compute_column_rms(const arma::Mat<double>& data) {
const long n_cols = data.n_cols;
arma::Col<double> rms(n_cols);
for (long i=0; i<n_cols; ++i) {
const double dot = arma::dot(data.col(i), data.col(i));
rms(i) = std::sqrt(dot / (data.col(i).n_rows-1));
}
return std::move(rms);
}
开发者ID:Pfern,项目名称:sailfish,代码行数:9,代码来源:PCAUtils.cpp
示例8: elementMapper
void ConcreteGridView<DuneGridView>::getRawElementDataImpl(
arma::Mat<CoordinateType>& vertices,
arma::Mat<int>& elementCorners,
arma::Mat<char>& auxData) const
{
typedef typename DuneGridView::Grid DuneGrid;
typedef typename DuneGridView::IndexSet DuneIndexSet;
const int dimGrid = DuneGrid::dimension;
const int dimWorld = DuneGrid::dimensionworld;
const int codimVertex = dimGrid;
const int codimElement = 0;
typedef Dune::LeafMultipleCodimMultipleGeomTypeMapper<DuneGrid,
Dune::MCMGElementLayout> DuneElementMapper;
typedef typename DuneGridView::template Codim<codimVertex>::Iterator
DuneVertexIterator;
typedef typename DuneGridView::template Codim<codimElement>::Iterator
DuneElementIterator;
typedef typename DuneGridView::template Codim<codimVertex>::Geometry
DuneVertexGeometry;
typedef typename DuneGridView::template Codim<codimElement>::Geometry
DuneElementGeometry;
typedef typename DuneGrid::ctype ctype;
const DuneIndexSet& indexSet = m_dune_gv.indexSet();
vertices.set_size(dimWorld, indexSet.size(codimVertex));
for (DuneVertexIterator it = m_dune_gv.template begin<codimVertex>();
it != m_dune_gv.template end<codimVertex>(); ++it)
{
size_t index = indexSet.index(*it);
const DuneVertexGeometry& geom = it->geometry();
Dune::FieldVector<ctype, dimWorld> vertex = geom.corner(0);
for (int i = 0; i < dimWorld; ++i)
vertices(i, index) = vertex[i];
}
const int MAX_CORNER_COUNT = dimWorld == 2 ? 2 : 4;
DuneElementMapper elementMapper(m_dune_gv.grid());
elementCorners.set_size(MAX_CORNER_COUNT, elementMapper.size());
for (DuneElementIterator it = m_dune_gv.template begin<codimElement>();
it != m_dune_gv.template end<codimElement>(); ++it)
{
size_t index = elementMapper.map(*it);
const Dune::GenericReferenceElement<ctype, dimGrid>& refElement =
Dune::GenericReferenceElements<ctype, dimGrid>::general(it->type());
const int cornerCount = refElement.size(codimVertex);
assert(cornerCount <= MAX_CORNER_COUNT);
for (int i = 0; i < cornerCount; ++i)
elementCorners(i, index) = indexSet.subIndex(*it, i, codimVertex);
for (int i = cornerCount; i < MAX_CORNER_COUNT; ++i)
elementCorners(i, index) = -1;
}
auxData.set_size(0, elementCorners.n_cols);
}
开发者ID:UCL,项目名称:bempp,代码行数:55,代码来源:concrete_grid_view_imp.hpp
示例9: sortedCombinations
// Predict the rating for a group of user/item combinations.
void CF::Predict(const arma::Mat<size_t>& combinations,
arma::vec& predictions) const
{
// First, for nearest neighbor search, stretch the H matrix.
arma::mat l = arma::chol(w.t() * w);
arma::mat stretchedH = l * h; // Due to the Armadillo API, l is L^T.
// Now, we must determine those query indices we need to find the nearest
// neighbors for. This is easiest if we just sort the combinations matrix.
arma::Mat<size_t> sortedCombinations(combinations.n_rows,
combinations.n_cols);
arma::uvec ordering = arma::sort_index(combinations.row(0).t());
for (size_t i = 0; i < ordering.n_elem; ++i)
sortedCombinations.col(i) = combinations.col(ordering[i]);
// Now, we have to get the list of unique users we will be searching for.
arma::Col<size_t> users = arma::unique(combinations.row(0).t());
// Assemble our query matrix from the stretchedH matrix.
arma::mat queries(stretchedH.n_rows, users.n_elem);
for (size_t i = 0; i < queries.n_cols; ++i)
queries.col(i) = stretchedH.col(users[i]);
// Now calculate the neighborhood of these users.
neighbor::KNN a(stretchedH);
arma::mat distances;
arma::Mat<size_t> neighborhood;
a.Search(queries, numUsersForSimilarity, neighborhood, distances);
// Now that we have the neighborhoods we need, calculate the predictions.
predictions.set_size(combinations.n_cols);
size_t user = 0; // Cumulative user count, because we are doing it in order.
for (size_t i = 0; i < sortedCombinations.n_cols; ++i)
{
// Could this be made faster by calculating dot products for multiple items
// at once?
double rating = 0.0;
// Map the combination's user to the user ID used for kNN.
while (users[user] < sortedCombinations(0, i))
++user;
for (size_t j = 0; j < neighborhood.n_rows; ++j)
rating += arma::as_scalar(w.row(sortedCombinations(1, i)) *
h.col(neighborhood(j, user)));
rating /= neighborhood.n_rows;
predictions(ordering[i]) = rating;
}
}
开发者ID:AmesianX,项目名称:mlpack,代码行数:53,代码来源:cf.cpp
示例10: enforce_positive_sign_by_column
void enforce_positive_sign_by_column(arma::Mat<double>& data) {
for (long i=0; i<long(data.n_cols); ++i) {
const double max = arma::max(data.col(i));
const double min = arma::min(data.col(i));
bool change_sign = false;
if (std::abs(max)>=std::abs(min)) {
if (max<0) change_sign = true;
} else {
if (min<0) change_sign = true;
}
if (change_sign) data.col(i) *= -1;
}
}
开发者ID:Pfern,项目名称:sailfish,代码行数:13,代码来源:PCAUtils.cpp
示例11: calculateJacobian
void calculateJacobian(const arma::Mat<std::complex<double> >& myOffsets,
arma::Mat<double>& myJacobian,
arma::Col<std::complex<double> >& myTargetsCalculated,
arma::Col<std::complex<double> >& myCurrentGuess,
void myCalculateDependentVariables(const arma::Mat<std::complex<double> >&, const arma::Col<std::complex<double> >&, arma::Col<std::complex<double> >&))
{
//Calculate a temporary, unperturbed target evaluation, such as is needed for solving for the updated guess
//formula
arma::Col<std::complex<double> > unperturbedTargetsCalculated(NUMDIMENSIONS);
unperturbedTargetsCalculated.fill(0.0);
myCalculateDependentVariables(myOffsets, myCurrentGuess, unperturbedTargetsCalculated);
std::complex<double> oldGuessValue(0.0, 0.0);
//Each iteration fills a column in the Jacobian
//The Jacobian takes this form:
//
// dF0/dx0 dF0/dx1
// dF1/dx0 dF1/dx1
//
for(int j = 0; j< NUMDIMENSIONS; j++)
{
//Store old element value, perturb the current value
oldGuessValue = myCurrentGuess[j];
myCurrentGuess[j] += std::complex<double>(0.0, PROBEDISTANCE);
//Evaluate functions for perturbed guess
myCalculateDependentVariables(myOffsets, myCurrentGuess, myTargetsCalculated);
//The column of the Jacobian that goes with the independent variable we perturbed
//can be determined using the finite-difference formula
//The arma::Col allows this to be expressed as a single vector operation
//note slice works as: std::slice(start_index, number_of_elements_to_access, index_interval_between_selections)
//std::cout << "Jacobian column " << j << " with:" << std::endl;
//std::cout << "myTargetsCalculated" << std::endl;
//std::cout << myTargetsCalculated << std::endl;
//std::cout << "unperturbedTargetsCalculated" << std::endl;
//std::cout << unperturbedTargetsCalculated << std::endl;
myJacobian.col(j) = arma::imag(myTargetsCalculated);
myJacobian.col(j) *= pow(PROBEDISTANCE, -1.0);
//std::cout << "The jacobian: " << std::endl;
//std::cout << myJacobian << std::endl;
myCurrentGuess[j] = oldGuessValue;
}
//Reset to unperturbed, so we dont waste a function evaluation
myTargetsCalculated = unperturbedTargetsCalculated;
}
开发者ID:utsa-idl,项目名称:NewtonRaphsonExamples,代码行数:48,代码来源:complex_step.cpp
示例12: compute_column_means
arma::Col<double> compute_column_means(const arma::Mat<double>& data) {
const long n_cols = data.n_cols;
arma::Col<double> means(n_cols);
for (long i=0; i<n_cols; ++i)
means(i) = arma::mean(data.col(i));
return std::move(means);
}
开发者ID:Pfern,项目名称:sailfish,代码行数:7,代码来源:PCAUtils.cpp
示例13: Arma_mat_to_cv_mat
void Arma_mat_to_cv_mat(const arma::Mat<T>& arma_mat_in, cv::Mat_<T>& cv_mat_out)
{
cv::transpose(cv::Mat_<T>(static_cast<int>(arma_mat_in.n_cols),
static_cast<int>(arma_mat_in.n_rows),
const_cast<T*>(arma_mat_in.memptr())),
cv_mat_out);
};
开发者ID:hnanhtuan,项目名称:urban-track_LPR,代码行数:7,代码来源:LetterClassifier.cpp
示例14: invalid_argument
void FastMKS<KernelType, TreeType>::Search(TreeType* queryTree,
const size_t k,
arma::Mat<size_t>& indices,
arma::mat& kernels)
{
// If either naive mode or single mode is specified, this must fail.
if (naive || singleMode)
{
throw std::invalid_argument("can't call Search() with a query tree when "
"single mode or naive search is enabled");
}
// No remapping will be necessary because we are using the cover tree.
indices.set_size(k, queryTree->Dataset().n_cols);
kernels.set_size(k, queryTree->Dataset().n_cols);
kernels.fill(-DBL_MAX);
Timer::Start("computing_products");
typedef FastMKSRules<KernelType, TreeType> RuleType;
RuleType rules(referenceSet, queryTree->Dataset(), indices, kernels,
metric.Kernel());
typename TreeType::template DualTreeTraverser<RuleType> traverser(rules);
traverser.Traverse(*queryTree, *referenceTree);
Log::Info << rules.BaseCases() << " base cases." << std::endl;
Log::Info << rules.Scores() << " scores." << std::endl;
Timer::Stop("computing_products");
}
开发者ID:0x0all,项目名称:mlpack,代码行数:31,代码来源:fastmks_impl.hpp
示例15: Backward
void Backward(const arma::Cube<eT>& input,
const arma::Mat<eT>& gy,
arma::Cube<eT>& g)
{
// Generate a cube using the backpropagated error matrix.
arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows,
input.n_cols, input.n_slices);
for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
{
for (size_t i = 0; i < gy.n_cols; i++)
{
arma::Col<eT> temp = gy.col(i).subvec(
j * input.n_rows * input.n_cols,
(j + 1) * input.n_rows * input.n_cols - 1);
mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
input.n_rows, input.n_cols);
}
}
arma::Cube<eT> derivative;
Deriv(input, derivative);
g = mappedError % derivative;
}
开发者ID:DeepCV,项目名称:mlpack,代码行数:25,代码来源:hard_tanh_layer.hpp
示例16: InsertNeighbor
inline force_inline
double LSHSearch<SortPolicy>::BaseCase(arma::mat& distances,
arma::Mat<size_t>& neighbors,
const size_t queryIndex,
const size_t referenceIndex)
{
// If the datasets are the same, then this search is only using one dataset
// and we should not return identical points.
if ((&querySet == &referenceSet) && (queryIndex == referenceIndex))
return 0.0;
const double distance = metric::EuclideanDistance::Evaluate(
querySet.unsafe_col(queryIndex), referenceSet.unsafe_col(referenceIndex));
// If this distance is better than any of the current candidates, the
// SortDistance() function will give us the position to insert it into.
arma::vec queryDist = distances.unsafe_col(queryIndex);
arma::Col<size_t> queryIndices = neighbors.unsafe_col(queryIndex);
size_t insertPosition = SortPolicy::SortDistance(queryDist, queryIndices,
distance);
// SortDistance() returns (size_t() - 1) if we shouldn't add it.
if (insertPosition != (size_t() - 1))
InsertNeighbor(distances, neighbors, queryIndex, insertPosition,
referenceIndex, distance);
return distance;
}
开发者ID:0x0all,项目名称:mlpack,代码行数:28,代码来源:lsh_search_impl.hpp
示例17: FeedBackward
void FeedBackward(const arma::Cube<eT>& inputActivation,
const arma::Mat<eT>& error,
arma::Cube<eT>& delta)
{
delta = delta % mask * scale;
// Generate a cube from the error matrix.
arma::Cube<eT> mappedError = arma::zeros<arma::cube>(inputActivation.n_rows,
inputActivation.n_cols, inputActivation.n_slices);
for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= error.n_cols, j++)
{
for (size_t i = 0; i < error.n_cols; i++)
{
arma::Col<eT> temp = error.col(i).subvec(
j * inputActivation.n_rows * inputActivation.n_cols,
(j + 1) * inputActivation.n_rows * inputActivation.n_cols - 1);
mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
inputActivation.n_rows, inputActivation.n_cols);
}
}
delta = mappedError;
}
开发者ID:suspy,项目名称:mlpack,代码行数:25,代码来源:dropout_layer.hpp
示例18: to_raw
inline Raw_Matrix to_raw(arma::Mat<float> & mat) {
Raw_Matrix matrix;
matrix.n_rows = mat.n_rows;
matrix.n_cols= mat.n_cols;
matrix.data = mat.memptr();
return matrix;
}
开发者ID:Spottybadrabbit,项目名称:Neural-Net-Experiments,代码行数:7,代码来源:net_raw_utils.hpp
示例19: Convolution
static typename std::enable_if<
std::is_same<Border, FullConvolution>::value, void>::type
Convolution(const arma::Mat<eT>& input,
const arma::Mat<eT>& filter,
arma::Mat<eT>& output)
{
// In case of the full convolution outputRows and outputCols doesn't
// represent the true output size when the padLastDim parameter is set,
// instead it's the working size.
const size_t outputRows = input.n_rows + 2 * (filter.n_rows - 1);
size_t outputCols = input.n_cols + 2 * (filter.n_cols - 1);
if (padLastDim)
outputCols++;
// Pad filter and input to the working output shape.
arma::Mat<eT> inputPadded = arma::zeros<arma::Mat<eT> >(outputRows,
outputCols);
inputPadded.submat(filter.n_rows - 1, filter.n_cols - 1,
filter.n_rows - 1 + input.n_rows - 1,
filter.n_cols - 1 + input.n_cols - 1) = input;
arma::Mat<eT> filterPadded = filter;
filterPadded.resize(outputRows, outputCols);
// Perform FFT and IFFT
output = arma::real(ifft2(arma::fft2(inputPadded) % arma::fft2(
filterPadded)));
// Extract the region of interest. We don't need to handle the padLastDim
// parameter in a special way we just cut it out from the output matrix.
output = output.submat(filter.n_rows - 1, filter.n_cols - 1,
2 * (filter.n_rows - 1) + input.n_rows - 1,
2 * (filter.n_cols - 1) + input.n_cols - 1);
}
开发者ID:Andrew-He,项目名称:mlpack,代码行数:35,代码来源:fft_convolution.hpp
示例20: Forward
void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
{
arma::mat maxInput = arma::repmat(arma::max(input), input.n_rows, 1);
output = (maxInput - input);
// Approximation of the hyperbolic tangent. The acuracy however is
// about 0.00001 lower as using tanh. Credits go to Leon Bottou.
output.transform( [](double x)
{
//! Fast approximation of exp(-x) for x positive.
static constexpr double A0 = 1.0;
static constexpr double A1 = 0.125;
static constexpr double A2 = 0.0078125;
static constexpr double A3 = 0.00032552083;
static constexpr double A4 = 1.0172526e-5;
if (x < 13.0)
{
double y = A0 + x * (A1 + x * (A2 + x * (A3 + x * A4)));
y *= y;
y *= y;
y *= y;
y = 1 / y;
return y;
}
return 0.0;
} );
output = input - (maxInput + std::log(arma::accu(output)));
}
开发者ID:AmesianX,项目名称:mlpack,代码行数:32,代码来源:log_softmax_layer.hpp
注:本文中的arma::Mat类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论