本文整理汇总了C++中SGMatrix类的典型用法代码示例。如果您正苦于以下问题:C++ SGMatrix类的具体用法?C++ SGMatrix怎么用?C++ SGMatrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SGMatrix类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: update_delta_cache
void CECOCIHDDecoder::update_delta_cache(const SGMatrix<int32_t> codebook)
{
if (codebook.matrix == m_codebook.matrix)
return; // memory address the same
if (codebook.num_cols == m_codebook.num_cols && codebook.num_rows == m_codebook.num_rows)
{
bool the_same = true;
for (int32_t i=0; i < codebook.num_rows && the_same; ++i)
for (int32_t j=0; j < codebook.num_cols && the_same; ++j)
if (codebook(i,j) != m_codebook(i,j))
the_same = false;
if (the_same)
return; // no need to update delta
}
m_codebook = codebook; // operator=
m_delta = SGMatrix<float64_t>(codebook.num_cols, codebook.num_cols);
m_delta.zero();
for (int32_t i=0; i < codebook.num_cols; ++i)
{
for (int32_t j=i+1; j < codebook.num_cols; ++j)
{
m_delta(i, j) = m_delta(j, i) =
CECOCUtil::hamming_distance(codebook.get_column_vector(i), codebook.get_column_vector(j), codebook.num_rows);
}
}
// compute inverse of delta
SGVector<int32_t> IPIV(m_delta.num_cols);
clapack_dgetrf(CblasColMajor, m_delta.num_cols, m_delta.num_cols, m_delta.matrix, m_delta.num_cols, IPIV.vector);
clapack_dgetri(CblasColMajor, m_delta.num_cols, m_delta.matrix, m_delta.num_cols, IPIV.vector);
}
开发者ID:AlexBinder,项目名称:shogun,代码行数:33,代码来源:ECOCIHDDecoder.cpp
示例2: SGReferencedData
SGVector<T>::SGVector(SGMatrix<T> matrix)
: SGReferencedData(matrix), vlen(matrix.num_cols * matrix.num_rows),
gpu_ptr(NULL)
{
ASSERT(!matrix.on_gpu())
vector = matrix.data();
m_on_gpu.store(false, std::memory_order_release);
}
开发者ID:DEVESHTARASIA,项目名称:shogun,代码行数:8,代码来源:SGVector.cpp
示例3: build_factor_graph
void build_factor_graph(MultilabelParameter param, SGMatrix<float64_t> feats, SGMatrix<int32_t> labels,
CFactorGraphFeatures * fg_feats, CFactorGraphLabels * fg_labels,
const DynArray<CTableFactorType *>& v_ftp_u,
const DynArray<CTableFactorType *>& v_ftp_t)
{
int32_t num_sample = labels.num_cols;
int32_t num_classes = labels.num_rows;
int32_t dim = feats.num_rows;
SGMatrix< int32_t > mat_edges = get_edge_list(param.graph_type, num_classes);
int32_t num_edges = mat_edges.num_rows;
// prepare features and labels in factor graph
for (int32_t n = 0; n < num_sample; n++)
{
SGVector<int32_t> vc(num_classes);
SGVector<int32_t>::fill_vector(vc.vector, vc.vlen, NUM_STATUS);
CFactorGraph * fg = new CFactorGraph(vc);
float64_t * pfeat = feats.get_column_vector(n);
SGVector<float64_t> feat_i(dim);
memcpy(feat_i.vector, pfeat, dim * sizeof(float64_t));
// add unary factors
for (int32_t u = 0; u < num_classes; u++)
{
SGVector<int32_t> var_index_u(1);
var_index_u[0] = u;
CFactor * fac_u = new CFactor(v_ftp_u[u], var_index_u, feat_i);
fg->add_factor(fac_u);
}
// add pairwise factors
for (int32_t t = 0; t < num_edges; t++)
{
SGVector<float64_t> data_t(1);
data_t[0] = 1.0;
SGVector<int32_t> var_index_t = mat_edges.get_row_vector(t);
CFactor * fac_t = new CFactor(v_ftp_t[t], var_index_t, data_t);
fg->add_factor(fac_t);
}
// add factor graph instance
fg_feats->add_sample(fg);
// add label
int32_t * plabs = labels.get_column_vector(n);
SGVector<int32_t> states_gt(num_classes);
memcpy(states_gt.vector, plabs, num_classes * sizeof(int32_t));
SGVector<float64_t> loss_weights(num_classes);
SGVector<float64_t>::fill_vector(loss_weights.vector, loss_weights.vlen, 1.0/num_classes);
CFactorGraphObservation * fg_obs = new CFactorGraphObservation(states_gt, loss_weights);
fg_labels->add_label(fg_obs);
}
}
开发者ID:DrahmA,项目名称:shogun,代码行数:56,代码来源:so_fg_multilabel.cpp
示例4: cblas_ddot
SGMatrix<float64_t> CLeastAngleRegression::cholesky_insert(
SGMatrix<float64_t> X, SGMatrix<float64_t> R, int32_t i_max_corr)
{
// diag_k = X[:,i_max_corr]' * X[:,i_max_corr]
float64_t diag_k = cblas_ddot(X.num_rows, X.get_column_vector(i_max_corr), 1,
X.get_column_vector(i_max_corr), 1);
if (m_num_active == 0)
{ // R isn't allocated yet
SGMatrix<float64_t> nR(1,1);
nR(0,0) = CMath::sqrt(diag_k);
return nR;
}
else
{
// col_k is the k-th column of (X'X)
vector<float64_t> col_k(m_num_active);
for (int32_t i=0; i < m_num_active; ++i)
{
// col_k[i] = X[:,i_max_corr]' * X[:,m_active_set[i]]
col_k[i] = cblas_ddot(X.num_rows, X.get_column_vector(i_max_corr), 1,
X.get_column_vector(m_active_set[i]), 1);
}
// R' * R_k = (X' * X)_k = col_k, solving to get R_k
vector<float64_t> R_k(col_k);
cblas_dtrsm(CblasColMajor, CblasLeft, CblasUpper, CblasTrans, CblasNonUnit, m_num_active, 1,
1, R.matrix, m_num_active, &R_k[0], m_num_active);
float64_t R_kk = CMath::sqrt(diag_k -
cblas_ddot(m_num_active, &R_k[0], 1, &R_k[0], 1));
// new_R = [R R_k; zeros(...) R_kk]
SGMatrix<float64_t> nR(m_num_active+1, m_num_active+1);
for (int32_t i=0; i < m_num_active; ++i)
for (int32_t j=0; j < m_num_active; ++j)
nR(i,j) = R(i,j);
for (int32_t i=0; i < m_num_active; ++i)
nR(i, m_num_active) = R_k[i];
for (int32_t i=0; i < m_num_active; ++i)
nR(m_num_active, i) = 0;
nR(m_num_active, m_num_active) = R_kk;
return nR;
}
}
开发者ID:behollis,项目名称:muViewBranch,代码行数:48,代码来源:LeastAngleRegression.cpp
示例5: plane_rot
static void plane_rot(float64_t x0, float64_t x1,
float64_t &y0, float64_t &y1, SGMatrix<float64_t> &G)
{
G.zero();
if (x1 == 0)
{
G(0, 0) = G(1, 1) = 1;
y0 = x0;
y1 = x1;
}
else
{
float64_t r = CMath::sqrt(x0*x0+x1*x1);
float64_t sx0 = x0 / r;
float64_t sx1 = x1 / r;
G(0,0) = sx0;
G(1,0) = -sx1;
G(0,1) = sx1;
G(1,1) = sx0;
y0 = r;
y1 = 0;
}
}
开发者ID:behollis,项目名称:muViewBranch,代码行数:25,代码来源:LeastAngleRegression.cpp
示例6: gen_rand_data
void gen_rand_data(SGVector<float64_t> lab, SGMatrix<float64_t> feat,
float64_t dist)
{
index_t dims=feat.num_rows;
index_t num=lab.vlen;
for (int32_t i=0; i<num; i++)
{
if (i<num/2)
{
lab[i]=-1.0;
for (int32_t j=0; j<dims; j++)
feat(j, i)=CMath::random(0.0, 1.0)+dist;
}
else
{
lab[i]=1.0;
for (int32_t j=0; j<dims; j++)
feat(j, i)=CMath::random(0.0, 1.0)-dist;
}
}
lab.display_vector("lab");
feat.display_matrix("feat");
}
开发者ID:fuxiang90,项目名称:code-fuxiang90,代码行数:26,代码来源:1.cpp
示例7:
template<class ST> void CDenseFeatures<ST>::copy_feature_matrix(SGMatrix<ST> src)
{
if (m_subset_stack->has_subsets())
SG_ERROR("A subset is set, cannot call copy_feature_matrix\n")
free_feature_matrix();
feature_matrix = src.clone();
num_features = src.num_rows;
num_vectors = src.num_cols;
initialize_cache();
}
开发者ID:cwidmer,项目名称:shogun,代码行数:11,代码来源:DenseFeatures.cpp
示例8: mv
SGVector<float64_t> CWeightedMajorityVote::combine(const SGMatrix<float64_t>& ensemble_result) const
{
REQUIRE(m_weights.vlen == ensemble_result.num_cols, "The number of results and weights does not match!");
SGVector<float64_t> mv(ensemble_result.num_rows);
for (index_t i = 0; i < ensemble_result.num_rows; ++i)
{
SGVector<float64_t> rv = ensemble_result.get_row_vector(i);
mv[i] = combine(rv);
}
return mv;
}
开发者ID:42MachineLearning,项目名称:shogun,代码行数:12,代码来源:WeightedMajorityVote.cpp
示例9: read_data
void read_data(const char * fname, SGMatrix<int32_t>& labels, SGMatrix<float64_t>& feats)
{
// sparse data from matrix
CLibSVMFile * svmfile = new CLibSVMFile(fname);
SGSparseVector<float64_t>* spv_feats;
SGVector<float64_t>* pv_labels;
int32_t dim_feat;
int32_t num_samples;
int32_t num_classes;
svmfile->get_sparse_matrix(spv_feats, dim_feat, num_samples, pv_labels, num_classes);
SG_SPRINT("Number of the samples: %d\n", num_samples);
SG_SPRINT("Dimention of the feature: %d\n", dim_feat+1);
SG_SPRINT("Number of classes: %d\n", num_classes);
feats = SGMatrix<float64_t>(dim_feat+1, num_samples);
labels = SGMatrix<int32_t>(num_classes, num_samples);
feats.zero();
labels.zero();
for (int32_t i = 0; i < num_samples; i++)
{
SGVector<float64_t> v_feat = spv_feats[i].get_dense();
SGVector<float64_t> v_labels = pv_labels[i];
for (int32_t f = 0; f < v_feat.size(); f++)
feats(f, i) = v_feat[f];
feats(dim_feat, i) = 1.0; // bias
for (int32_t l = 0; l < v_labels.size(); l++)
labels((int32_t)v_labels[l], i) = 1;
}
SG_UNREF(svmfile);
SG_FREE(spv_feats);
SG_FREE(pv_labels);
}
开发者ID:DrahmA,项目名称:shogun,代码行数:40,代码来源:so_fg_multilabel.cpp
示例10: decide_label
int32_t CECOCIHDDecoder::decide_label(const SGVector<float64_t> outputs, const SGMatrix<int32_t> codebook)
{
update_delta_cache(codebook);
SGVector<float64_t> query = binarize(outputs);
SGVector<float64_t> L(codebook.num_cols);
for (int32_t i=0; i < codebook.num_cols; ++i)
L[i] = CECOCUtil::hamming_distance(query.vector, codebook.get_column_vector(i), query.vlen);
SGVector<float64_t> res(codebook.num_cols);
res.zero();
// res = m_delta * L
cblas_dgemv(CblasColMajor, CblasNoTrans, m_delta.num_cols, m_delta.num_cols,
1, m_delta.matrix, m_delta.num_cols, L.vector, 1, 1, res.vector, 1);
return SGVector<float64_t>::arg_max(res.vector, 1, res.vlen);
}
开发者ID:AlexBinder,项目名称:shogun,代码行数:16,代码来源:ECOCIHDDecoder.cpp
示例11: cos
SGMatrix<float64_t> CJADiagOrth::diagonalize(SGNDArray<float64_t> C, SGMatrix<float64_t> V0,
double eps, int itermax)
{
int m = C.dims[0];
int L = C.dims[2];
SGMatrix<float64_t> V;
if (V0.num_rows == m && V0.num_cols == m)
V = V0.clone();
else
V = SGMatrix<float64_t>::create_identity_matrix(m,1);
bool more = true;
int rots = 0;
while (more)
{
more = false;
for (int p = 0; p < m; p++)
{
for (int q = p+1; q < m; q++)
{
// computation of Givens angle
float64_t theta = givens_stack(C.array, m, L, p, q);
// Givens update
if (fabs(theta) > eps)
{
float64_t c = cos(theta);
float64_t s = sin(theta);
left_rot_stack (C.array, m, m, L, p, q, c, s);
right_rot_stack(C.array, m, m, L, p, q, c, s);
left_rot_simple(V.matrix, m, m, p, q, c, s);
rots++;
more = true;
}
}
}
}
return V;
}
开发者ID:DrahmA,项目名称:shogun,代码行数:43,代码来源:JADiagOrth.cpp
示例12: image
void CConvolutionalFeatureMap::compute_weight_gradients(
SGMatrix< float64_t > inputs,
SGMatrix< float64_t > local_gradients,
SGMatrix< float64_t > weight_gradients,
int32_t inputs_row_offset,
int32_t local_gradients_row_offset)
{
weight_gradients.zero();
for (int32_t i=0; i<local_gradients.num_cols; i++)
{
SGMatrix<float64_t> image(
inputs.matrix+i*inputs.num_rows + inputs_row_offset,
m_input_height, m_input_width, false);
SGMatrix<float64_t> LG_image(
local_gradients.matrix+i*local_gradients.num_rows
+ local_gradients_row_offset, m_output_height, m_output_width, false);
for (int32_t x=0; x<m_input_width; x+=m_stride_x)
{
for (int32_t y=0; y<m_input_height; y+=m_stride_y)
{
for (int32_t x1=x-m_radius_x; x1<=x+m_radius_x; x1++)
{
for (int32_t y1=y-m_radius_y; y1<=y+m_radius_y; y1++)
{
if (x1>=0 && y1>=0 && x1<image.num_cols && y1<image.num_rows)
{
if (m_autoencoder_position == NLAP_NONE)
weight_gradients(m_radius_y-y1+y,m_radius_x-x1+x) +=
LG_image(y/m_stride_y,x/m_stride_x)*image(y1,x1);
else
weight_gradients(m_radius_y-y1+y,m_radius_x-x1+x) +=
LG_image(y,x)*image(y1,x1);
}
}
}
}
}
}
}
开发者ID:DrahmA,项目名称:shogun,代码行数:41,代码来源:ConvolutionalFeatureMap.cpp
示例13: reshape_transmission_params
void CTwoStateModel::reshape_transmission_params(
SGMatrix< float64_t >& transmission_weights, SGVector< float64_t > w)
{
transmission_weights.set_const(-CMath::INFTY);
// Legend for state indices:
// 0 -> start state
// 1 -> stop state
// 2 -> negative state (label == 0)
// 3 -> positive state (label == 1)
// From start
transmission_weights(0,2) = 0; // to negative
transmission_weights(0,3) = 0; // to positive
// From negative
transmission_weights(2,1) = 0; // to stop
transmission_weights(2,2) = w[0]; // to negative
transmission_weights(2,3) = w[1]; // to positive
// From positive
transmission_weights(3,1) = 0; // to stop
transmission_weights(3,2) = w[3]; // to positive
transmission_weights(3,3) = w[2]; // to negative
}
开发者ID:vladislav-horbatiuk,项目名称:shogun,代码行数:23,代码来源:TwoStateModel.cpp
示例14: main
int main(int argc, char **argv)
{
init_shogun_with_defaults();
/* create some data and labels */
SGMatrix<float64_t> matrix =
SGMatrix<float64_t>(dim_vectors, num_vectors);
SGMatrix<float64_t> matrix2 =
SGMatrix<float64_t>(dim_vectors, num_vectors);
CRegressionLabels* labels=new CRegressionLabels(num_vectors);
build_matrices(matrix2, matrix, labels);
/* create training features */
CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t> ();
features->set_feature_matrix(matrix);
/* create testing features */
CDenseFeatures<float64_t>* features2=new CDenseFeatures<float64_t> ();
features2->set_feature_matrix(matrix2);
SG_REF(features);
SG_REF(features2);
SG_REF(labels);
/*Allocate our Kernel*/
CGaussianKernel* test_kernel = new CGaussianKernel(10, 2);
test_kernel->init(features, features);
/*Allocate our mean function*/
CZeroMean* mean = new CZeroMean();
/*Allocate our likelihood function*/
CGaussianLikelihood* lik = new CGaussianLikelihood();
/*Allocate our inference method*/
CExactInferenceMethod* inf =
new CExactInferenceMethod(test_kernel,
features, mean, labels, lik);
SG_REF(inf);
/*Finally use these to allocate the Gaussian Process Object*/
CGaussianProcessRegression* gp =
new CGaussianProcessRegression(inf, features, labels);
SG_REF(gp);
/*Build the parameter tree for model selection*/
CModelSelectionParameters* root = build_tree(inf, lik, test_kernel);
/*Criterion for gradient search*/
CGradientCriterion* crit = new CGradientCriterion();
/*This will evaluate our inference method for its derivatives*/
CGradientEvaluation* grad=new CGradientEvaluation(gp, features, labels,
crit);
grad->set_function(inf);
gp->print_modsel_params();
root->print_tree();
/* handles all of the above structures in memory */
CGradientModelSelection* grad_search=new CGradientModelSelection(
root, grad);
/* set autolocking to false to get rid of warnings */
grad->set_autolock(false);
/*Search for best parameters*/
CParameterCombination* best_combination=grad_search->select_model(true);
/*Output all the results and information*/
if (best_combination)
{
SG_SPRINT("best parameter(s):\n");
best_combination->print_tree();
best_combination->apply_to_machine(gp);
}
CGradientResult* result=(CGradientResult*)grad->evaluate();
if(result->get_result_type() != GRADIENTEVALUATION_RESULT)
SG_SERROR("Evaluation result not a GradientEvaluationResult!");
result->print_result();
SGVector<float64_t> alpha = inf->get_alpha();
SGVector<float64_t> labe = labels->get_labels();
SGVector<float64_t> diagonal = inf->get_diagonal_vector();
SGMatrix<float64_t> cholesky = inf->get_cholesky();
gp->set_return_type(CGaussianProcessRegression::GP_RETURN_COV);
//.........这里部分代码省略.........
开发者ID:coodoing,项目名称:shogun,代码行数:101,代码来源:regression_gaussian_process_gaussian.cpp
示例15: main
int main(int argc, char** argv)
{
int32_t num_vectors = 0;
int32_t num_feats = 0;
init_shogun_with_defaults();
const char*fname_train = "../data/7class_example4_train.dense";
CStreamingAsciiFile *train_file = new CStreamingAsciiFile(fname_train);
SG_REF(train_file);
CStreamingDenseFeatures<float64_t> *stream_features = new CStreamingDenseFeatures<float64_t>(train_file, true, 1024);
SG_REF(stream_features);
SGMatrix<float64_t> mat;
SGVector<float64_t> labvec(1000);
stream_features->start_parser();
SGVector< float64_t > vec;
while (stream_features->get_next_example())
{
vec = stream_features->get_vector();
if (num_feats == 0)
{
num_feats = vec.vlen;
mat = SGMatrix<float64_t>(num_feats, 1000);
}
std::copy(vec.vector, vec.vector+vec.vlen, mat.get_column_vector(num_vectors));
labvec[num_vectors] = stream_features->get_label();
num_vectors++;
stream_features->release_example();
}
stream_features->end_parser();
mat.num_cols = num_vectors;
labvec.vlen = num_vectors;
CMulticlassLabels* labels = new CMulticlassLabels(labvec);
SG_REF(labels);
// Create features with the useful values from mat
CDenseFeatures< float64_t >* features = new CDenseFeatures<float64_t>(mat);
SG_REF(features);
SG_SPRINT("Performing ShareBoost on a %d-class problem\n", labels->get_num_classes());
// Create ShareBoost Machine
CShareBoost *machine = new CShareBoost(features, labels, 10);
SG_REF(machine);
machine->train();
SGVector<int32_t> activeset = machine->get_activeset();
SG_SPRINT("%d out of %d features are selected:\n", activeset.vlen, mat.num_rows);
for (int32_t i=0; i < activeset.vlen; ++i)
SG_SPRINT("activeset[%02d] = %d\n", i, activeset[i]);
CDenseSubsetFeatures<float64_t> *subset_fea = new CDenseSubsetFeatures<float64_t>(features, machine->get_activeset());
SG_REF(subset_fea);
CMulticlassLabels* output = CMulticlassLabels::obtain_from_generic(machine->apply(subset_fea));
int32_t correct = 0;
for (int32_t i=0; i < output->get_num_labels(); ++i)
if (output->get_int_label(i) == labels->get_int_label(i))
correct++;
SG_SPRINT("Accuracy = %.4f\n", float64_t(correct)/labels->get_num_labels());
// Free resources
SG_UNREF(machine);
SG_UNREF(output);
SG_UNREF(subset_fea);
SG_UNREF(features);
SG_UNREF(labels);
SG_UNREF(train_file);
SG_UNREF(stream_features);
exit_shogun();
return 0;
}
开发者ID:AlexBinder,项目名称:shogun,代码行数:78,代码来源:classifier_multiclass_shareboost.cpp
示例16: main
int main(int argc, char **argv)
{
init_shogun(&print_message, &print_message, &print_message);
int32_t num_vectors=4;
int32_t dim_vectors=3;
/* create some data and labels */
SGMatrix<float64_t> matrix =
SGMatrix<float64_t>(dim_vectors, num_vectors);
matrix[0] = -1;
matrix[1] = -1;
matrix[2] = -1;
matrix[3] = 1;
matrix[4] = 1;
matrix[5] = 1;
matrix[6] = -10;
matrix[7] = -10;
matrix[8] = -10;
matrix[9] = 3;
matrix[10] = 2;
matrix[11] = 1;
SGMatrix<float64_t> matrix2 =
SGMatrix<float64_t>(dim_vectors, num_vectors);
for (int32_t i=0; i<num_vectors*dim_vectors; i++)
matrix2[i]=i*sin(i)*.96;
/* create training features */
CDenseFeatures<float64_t>* features=new CDenseFeatures<float64_t> ();
features->set_feature_matrix(matrix);
/* create testing features */
CDenseFeatures<float64_t>* features2=new CDenseFeatures<float64_t> ();
features2->set_feature_matrix(matrix2);
SG_REF(features);
SG_REF(features2);
CRegressionLabels* labels=new CRegressionLabels(num_vectors);
/* create labels, two classes */
for (index_t i=0; i<num_vectors; ++i)
{
if(i%2 == 0) labels->set_label(i, 1);
else labels->set_label(i, -1);
}
SG_REF(labels);
CGaussianKernel* test_kernel = new CGaussianKernel(10, 2);
test_kernel->init(features, features);
CZeroMean* mean = new CZeroMean();
CGaussianLikelihood* lik = new CGaussianLikelihood();
lik->set_sigma(0.01);
CExactInferenceMethod* inf =
new CExactInferenceMethod(test_kernel, features, mean, labels, lik);
SG_REF(inf);
CGaussianProcessRegression* gp =
new CGaussianProcessRegression(inf, features, labels);
CModelSelectionParameters* root=new CModelSelectionParameters();
CModelSelectionParameters* c1 =
new CModelSelectionParameters("inference_method", inf);
root->append_child(c1);
CModelSelectionParameters* c2 = new CModelSelectionParameters("scale");
c1 ->append_child(c2);
c2->build_values(0.01, 4.0, R_LINEAR);
CModelSelectionParameters* c3 =
new CModelSelectionParameters("likelihood_model", lik);
c1->append_child(c3);
CModelSelectionParameters* c4=new CModelSelectionParameters("sigma");
c3->append_child(c4);
c4->build_values(0.001, 4.0, R_LINEAR);
CModelSelectionParameters* c5 =
new CModelSelectionParameters("kernel", test_kernel);
c1->append_child(c5);
CModelSelectionParameters* c6 =
new CModelSelectionParameters("width");
c5->append_child(c6);
c6->build_values(0.001, 4.0, R_LINEAR);
/* cross validation class for evaluation in model selection */
SG_REF(gp);
CGradientCriterion* crit = new CGradientCriterion();
//.........这里部分代码省略.........
开发者ID:jimloco,项目名称:shogun,代码行数:101,代码来源:regression_gaussian_process_gaussian.cpp
示例17: SG_ERROR
SGMatrix<float64_t> CIsomap::isomap_distance(SGMatrix<float64_t> D_matrix)
{
int32_t N,t,i,j;
float64_t tmp;
N = D_matrix.num_cols;
if (D_matrix.num_cols!=D_matrix.num_rows)
{
D_matrix.destroy_matrix();
SG_ERROR("Given distance matrix is not square.\n");
}
if (m_k>=N)
{
D_matrix.destroy_matrix();
SG_ERROR("K parameter should be less than number of given vectors (k=%d, N=%d)\n", m_k, N);
}
// cut by k-nearest neighbors
int32_t* edges_idx_matrix = SG_MALLOC(int32_t, N*m_k);
float64_t* edges_matrix = SG_MALLOC(float64_t, N*m_k);
// query neighbors and edges to neighbors
CFibonacciHeap* heap = new CFibonacciHeap(N);
for (i=0; i<N; i++)
{
// insert distances to heap
for (j=0; j<N; j++)
heap->insert(j,D_matrix[i*N+j]);
// extract nearest neighbor: the jth object itself
heap->extract_min(tmp);
// extract m_k neighbors and distances
for (j=0; j<m_k; j++)
{
edges_idx_matrix[i*m_k+j] = heap->extract_min(tmp);
edges_matrix[i*m_k+j] = tmp;
}
// clear heap
heap->clear();
}
// cleanup
delete heap;
#ifdef HAVE_PTHREAD
// Parallel Dijkstra with Fibonacci Heap
int32_t num_threads = parallel->get_num_threads();
ASSERT(num_threads>0);
// allocate threads and thread parameters
pthread_t* threads = SG_MALLOC(pthread_t, num_threads);
DIJKSTRA_THREAD_PARAM* parameters = SG_MALLOC(DIJKSTRA_THREAD_PARAM, num_threads);
// allocate heaps
CFibonacciHeap** heaps = SG_MALLOC(CFibonacciHeap*, num_threads);
for (t=0; t<num_threads; t++)
heaps[t] = new CFibonacciHeap(N);
#else
int32_t num_threads = 1;
#endif
// allocate (s)olution
bool* s = SG_MALLOC(bool,N*num_threads);
// allocate (f)rontier
bool* f = SG_MALLOC(bool,N*num_threads);
// init matrix to store shortest distances
float64_t* shortest_D = D_matrix.matrix;
#ifdef HAVE_PTHREAD
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
for (t=0; t<num_threads; t++)
{
parameters[t].i_start = t;
parameters[t].i_stop = N;
parameters[t].i_step = num_threads;
parameters[t].heap = heaps[t];
parameters[t].edges_matrix = edges_matrix;
parameters[t].edges_idx_matrix = edges_idx_matrix;
parameters[t].s = s+t*N;
parameters[t].f = f+t*N;
parameters[t].m_k = m_k;
parameters[t].shortest_D = shortest_D;
pthread_create(&threads[t], &attr, CIsomap::run_dijkstra_thread, (void*)¶meters[t]);
}
for (t=0; t<num_threads; t++)
pthread_join(threads[t], NULL);
pthread_attr_destroy(&attr);
for (t=0; t<num_threads; t++)
delete heaps[t];
SG_FREE(heaps);
SG_FREE(parameters);
SG_FREE(threads);
#else
D_THREAD_PARAM single_thread_param;
single_thread_param.i_start = 0;
single_thread_param.i_stop = N;
single_thread_param.i_step = 1;
single_thread_param.m_k = m_k;
//.........这里部分代码省略.........
开发者ID:axitkhurana,项目名称:shogun,代码行数:101,代码来源:Isomap.cpp
示例18: C0
SGMatrix<float64_t> CUWedge::diagonalize(SGNDArray<float64_t> C, SGMatrix<float64_t> V0,
double eps, int itermax)
{
int d = C.dims[0];
int L = C.dims[2];
SGMatrix<float64_t> V;
if (V0.num_rows == d && V0.num_cols == d)
{
V = V0.clone();
}
else
{
Map<MatrixXd> C0(C.get_matrix(0),d,d);
EigenSolver<MatrixXd> eig;
eig.compute(C0);
// sort eigenvectors
MatrixXd eigenvectors = eig.pseudoEigenvectors();
MatrixXd eigenvalues = eig.pseudoEigenvalueMatrix();
bool swap = false;
do
{
swap = false;
for (int j = 1; j < d; j++)
{
if ( eigenvalues(j,j) > eigenvalues(j-1,j-1) )
{
std::swap(eigenvalues(j,j),eigenvalues(j-1,j-1));
eigenvectors.col(j).swap(eigenvectors.col(j-1));
swap = true;
}
}
} while(swap);
V = SGMatrix<float64_t>::create_identity_matrix(d,1);
Map<MatrixXd> EV(V.matrix, d,d);
EV = eigenvalues.cwiseAbs().cwiseSqrt().inverse() * eigenvectors.transpose();
}
Map<MatrixXd> EV(V.matrix, d,d);
index_t * Cs_dims = SG_MALLOC(index_t, 3);
Cs_dims[0] = d;
Cs_dims[1] = d;
Cs_dims[2] = L;
SGNDArray<float64_t> Cs(Cs_dims,3);
sg_memcpy(Cs.array, C.array, Cs.dims[0]*Cs.dims[1]*Cs.dims[2]*sizeof(float64_t));
MatrixXd Rs(d,L);
std::vector<float64_t> crit;
crit.push_back(0.0);
for (int l = 0; l < L; l++)
{
Map<MatrixXd> Ci(C.get_matrix(l),d,d);
Map<MatrixXd> Csi(Cs.get_matrix(l),d,d);
Ci = 0.5 * (Ci + Ci.transpose());
Csi = EV * Ci * EV.transpose();
Rs.col(l) = Csi.diagonal();
crit.back() += Csi.cwiseAbs2().sum() - Rs.col(l).cwiseAbs2().sum();
}
float64_t iter = 0;
float64_t improve = 10;
while (improve > eps && iter < itermax)
{
MatrixXd B = Rs * Rs.transpose();
MatrixXd C1 = MatrixXd::Zero(d,d);
for (int id = 0; id < d; id++)
{
// rowSums
for (int l = 0; l < L; l++)
{
Map<MatrixXd> Csi(Cs.get_matrix(l),d,d);
C1.row(id) += Csi.row(id) * Rs(id,l);
}
}
MatrixXd D0 = B.cwiseProduct(B.transpose()) - B.diagonal() * B.diagonal().transpose();
MatrixXd A0 = MatrixXd::Identity(d,d) + (C1.cwiseProduct(B) - B.diagonal().asDiagonal() * C1.transpose()).cwiseQuotient(D0+MatrixXd::Identity(d,d));
EV = A0.inverse() * EV;
Map<MatrixXd> C0(C.get_matrix(0),d,d);
MatrixXd Raux = EV * C0 * EV.transpose();
MatrixXd aux = Raux.diagonal().cwiseAbs().cwiseSqrt().asDiagonal().inverse();
EV = aux * EV;
crit.push_back(0.0);
for (int l = 0; l < L; l++)
{
Map<MatrixXd> Ci(C.get_matrix(l),d,d);
Map<MatrixXd> Csi(Cs.get_matrix(l),d,d);
Csi = EV * Ci * EV.transpose();
Rs.col(l) = Csi.diagonal();
crit.back() += Csi.cwiseAbs2().sum() - Rs.col(l).cwiseAbs2().sum();
}
improve = CMath::abs(crit.back() - crit[iter]);
//.........这里部分代码省略.........
开发者ID:minxuancao,项目名称:shogun,代码行数:101,代码来源:UWedge.cpp
示例19: SG_DEBUG
void CLinearTimeMMD::compute_statistic_and_Q(
SGVector<float64_t>& statistic, SGMatrix<float64_t>& Q)
{
SG_DEBUG("entering %s::compute_statistic_and_Q()\n", get_name())
REQUIRE(m_streaming_p, "%s::compute_statistic_and_Q: streaming "
"features p required!\n", get_name());
REQUIRE(m_streaming_q, "%s::compute_statistic_and_Q: streaming "
"features q required!\n", get_name());
REQUIRE(m_kernel, "%s::compute_statistic_and_Q: kernel needed!\n",
get_name());
/* make sure multiple_kernels flag is used only with a combined kernel */
REQUIRE(m_kernel->get_kernel_type()==K_COMBINED,
"%s::compute_statistic_and_Q: underlying kernel is not of "
"type K_COMBINED\n", get_name());
/* cast combined kernel */
CCombinedKernel* combined=(CCombinedKernel*)m_kernel;
/* m is number of samples from each distribution, m_4 is quarter of it */
REQUIRE(m_m>=4, "%s::compute_statistic_and_Q: Need at least m>=4\n",
get_name());
index_t m_4=m_m/4;
SG_DEBUG("m_m=%d\n", m_m)
/* find out whether single or multiple kernels (cast is safe, check above) */
index_t num_kernels=combined->get_num_subkernels();
REQUIRE(num_kernels>0, "%s::compute_statistic_and_Q: At least one kernel "
"is needed\n", get_name());
/* allocate memory for results if vectors are empty */
if (!statistic.vector)
statistic=SGVector<float64_t>(num_kernels);
if (!Q.matrix)
Q=SGMatrix<float64_t>(num_kernels, num_kernels);
/* ensure right dimensions */
REQUIRE(statistic.vlen==num_kernels, "%s::compute_statistic_and_variance: "
"statistic vector size (%d) does not match number of kernels (%d)\n",
get_name(), statistic.vlen, num_kernels);
REQUIRE(Q.num_rows==num_kernels, "%s::compute_statistic_and_variance: "
"Q number of rows does (%d) not match number of kernels (%d)\n",
get_name(), Q.num_rows, num_kernels);
REQUIRE(Q.num_cols==num_kernels, "%s::compute_statistic_and_variance: "
"Q number of columns (%d) does not match number of kernels (%d)\n",
get_name(), Q.num_cols, num_kernels);
/* initialise statistic and variance since they are cumulative */
statistic.zero();
Q.zero();
/* produce two kernel lists to iterate doubly nested */
CList* list_i=new CList();
CList* list_j=new CList();
for (index_t k_idx=0; k_idx<combined->get_num_kernels(); k_idx++)
{
CKernel* kernel = combined->get_kernel(k_idx);
list_i->append_element(kernel);
list_j->append_element(kernel);
SG_UNREF(kernel);
}
/* needed for online mean and variance */
SGVector<index_t> term_counters_statistic(num_kernels);
SGMatrix<index_t> term_counters_Q(num_kernels, num_kernels);
term_counters_statistic.set_const(1);
term_counters_Q.set_const(1);
index_t num_examples_processed=0;
while (num_examples_processed<m_4)
{
/* number of example to look at in this iteration */
index_t num_this_run=CMath::min(m_blocksize,
CMath::max(0, m_4-num_examples_processed));
SG_DEBUG("processing %d more examples. %d so far processed. Blocksize "
"is %d\n", num_this_run, num_examples_processed, m_blocksize);
/* stream data from both distributions */
CFeatures* p1a=m_streaming_p->get_streamed_features(num_this_run);
CFeatures* p1b=m_streaming_p->get_streamed_features(num_this_run);
CFeatures* p2a=m_streaming_p->get_streamed_features(num_this_run);
CFeatures* p2b=m_streaming_p->get_streamed_features(num_this_run);
CFeatures* q1a=m_streaming_q->get_streamed_features(num_this_run);
CFeatures* q1b=m_streaming_q->get_streamed_features(num_this_run);
CFeatures* q2a=m_streaming_q->get_streamed_features(num_this_run);
CFeatures* q2b=m_streaming_q->get_streamed_features(num_this_run);
/* check whether h0 should be simulated and permute if so */
if (m_simulate_h0)
{
/* create merged copy of all feature instances to permute */
CList* list=new CList();
list->append_element(p1b);
//.........这里部分代码省略.........
开发者ID:hushell,项目名称:shogun,代码行数:101,代码来源:LinearTimeMMD.cpp
示例20: SG_ERROR
float64_t CGMM::train_em(float64_t min_cov, int32_t max_iter, float64_t min_change)
{
if (!features)
SG_ERROR("No features to train on.\n");
CDotFeatures* dotdata=(CDotFeatures *) features;
int32_t num_vectors=dotdata->get_num_vectors();
SGMatrix<float64_t> alpha;
if (m_components.vector[0]->get_mean().vector==NULL)
{
CKMeans* init_k_means=new CKMeans(m_components.vlen, new CEuclidianDistance());
init_k_means->train(dotdata);
SGMatrix<float64_t> init_means=init_k_means->get_cluster_centers();
alpha=alpha_init(init_means);
SG_UNREF(init_k_means);
max_likelihood(alpha, min_cov);
}
else
{
alpha.matrix=SG_MALLOC(float64_t, num_vectors*m_components.vlen);
alpha.num_rows=num_vectors;
alpha.num_cols=m_components.vlen;
}
int32_t iter=0;
float64_t log_likelihood_prev=0;
float64_t log_likelihood_cur=0;
float64_t* logPxy=SG_MALLOC(float64_t, num_vectors*m_components.vlen);
float64_t* logPx=SG_MALLOC(float64_t, num_vectors);
//float64_t* logPost=SG_MALLOC(float64_t, num_vectors*m_components.vlen);
while (iter<max_iter)
{
log_likelihood_prev=log_likelihood_cur;
log_likelihood_cur=0;
for (int32_t i=0; i<num_vectors; i++)
{
logPx[i]=0;
SGVector<float64_t> v=dotdata->get_computed_dot_feature_vector(i);
for (int32_t j=0; j<m_components.vlen; j++)
{
logPxy[i*m_components.vlen+j]=m_components.vector[j]->compute_log_PDF(v)+CMath::log(m_coefficients.vector[j]);
logPx[i]+=CMath::exp(logPxy[i*m_components.vlen+j]);
}
logPx[i]=CMath::log(logPx[i]);
log_likelihood_cur+=logPx[i];
v.free_vector();
for (int32_t j=0; j<m_components.vlen; j++)
{
//logPost[i*m_components.vlen+j]=logPxy[i*m_components.vlen+j]-logPx[i];
alpha.matrix[i*m_components.vlen+j]=CMath::exp(logPxy[i*m_components.vlen+j]-logPx[i]);
}
}
if (iter>0 && log_likelihood_cur-log_likelihood_prev<min_change)
break;
max_likelihood(alpha, min_cov);
iter++;
}
SG_FREE(logPxy);
SG_FREE(logPx);
//SG_FREE(logPost);
alpha.free_matrix();
return log_likelihood_cur;
}
开发者ID:HiroyukiMikita,项目名称:usc-clmc-ros-pkg,代码行数:77,代码来源:GMM.cpp
注:本文中的SGMatrix类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论