本文整理汇总了C++中VectorFloat类的典型用法代码示例。如果您正苦于以下问题:C++ VectorFloat类的具体用法?C++ VectorFloat怎么用?C++ VectorFloat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VectorFloat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: b
VectorFloat MatrixFloat::multiple(const VectorFloat &b) const{
const unsigned int M = rows;
const unsigned int N = cols;
const unsigned int K = (unsigned int)b.size();
if( N != K ){
warningLog << "multiple(vector b) - The size of b (" << b.size() << ") does not match the number of columns in this matrix (" << N << ")" << std::endl;
return VectorFloat();
}
VectorFloat c(M);
const Float *pb = &b[0];
Float *pc = &c[0];
unsigned int i,j = 0;
for(i=0; i<rows; i++){
pc[i] = 0;
for(j=0; j<cols; j++){
pc[i] += dataPtr[i*cols+j]*pb[j];
}
}
return c;
}
开发者ID:pscholl,项目名称:grt,代码行数:25,代码来源:MatrixFloat.cpp
示例2: ComputePP
VectorFloat AttitudeLoop::ComputePP(Quaternion qM, VectorFloat omegaM) {
Quaternion qErr;
VectorFloat axisErr;
Serial.print("Printing qRef ");
fQRef.print();
Serial.print("Printing qM ");
qM.print();
qErr = fQRef.conjugate() * qM;
Serial.print("Printing qErr ");
qErr.print();
if(qErr.w < 0) axisErr = VectorFloat(qErr);
else axisErr = -VectorFloat(qErr);
Serial.print("Printing axisErr ");
axisErr.print();
Serial.print("Printing omegaM ");
omegaM.print();
Serial.print("Printing torque without I ");
(axisErr*fPQ - omegaM*fPOmega).print();
fTorque = fI * (axisErr*fPQ - omegaM*fPOmega);
Serial.print("Printing fTorque ");
fTorque.print();
return fTorque;
}
开发者ID:nlurkin,项目名称:Drone,代码行数:28,代码来源:AttitudeLoop.cpp
示例3: generate_rows
void generate_rows (
const protobuf::Config::Generate & config,
CrossCat & cross_cat,
Assignments & assignments,
const char * rows_out,
rng_t & rng)
{
const size_t kind_count = cross_cat.kinds.size();
const size_t row_count = config.row_count();
const float density = config.density();
LOOM_ASSERT_LE(0.0, density);
LOOM_ASSERT_LE(density, 1.0);
VectorFloat scores;
std::vector<ProductModel::Value> partial_values(kind_count);
protobuf::Row row;
protobuf::OutFile rows(rows_out);
for (auto & kind : cross_cat.kinds) {
kind.model.realize(rng);
}
cross_cat.schema.clear(* row.mutable_diff());
ProductValue & full_value = * row.mutable_diff()->mutable_pos();
for (size_t id = 0; id < row_count; ++id) {
assignments.rowids().try_push(id);
for (size_t k = 0; k < kind_count; ++k) {
auto & kind = cross_cat.kinds[k];
ProductModel & model = kind.model;
auto & mixture = kind.mixture;
ProductValue & value = partial_values[k];
auto & groupids = assignments.groupids(k);
scores.resize(mixture.clustering.counts().size());
mixture.clustering.score_value(model.clustering, scores);
distributions::scores_to_probs(scores);
const VectorFloat & probs = scores;
auto & observed = * value.mutable_observed();
ValueSchema::clear(observed);
observed.set_sparsity(ProductModel::Value::Observed::DENSE);
const size_t feature_count = kind.featureids.size();
for (size_t f = 0; f < feature_count; ++f) {
observed.add_dense(
distributions::sample_bernoulli(rng, density));
}
size_t groupid = mixture.sample_value(model, probs, value, rng);
model.add_value(value, rng);
mixture.add_value(model, groupid, value, rng);
groupids.push(groupid);
}
row.set_id(id);
cross_cat.splitter.join(full_value, partial_values);
rows.write_stream(row);
}
}
开发者ID:jostheim,项目名称:loom,代码行数:58,代码来源:generate.hpp
示例4: predict_
bool Softmax::predict_(VectorFloat &inputVector){
if( !trained ){
errorLog << __GRT_LOG__ << " Model Not Trained!" << std::endl;
return false;
}
predictedClassLabel = 0;
maxLikelihood = -10000;
if( !trained ) return false;
if( inputVector.getSize() != numInputDimensions ){
errorLog << __GRT_LOG__ << " The size of the input vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], ranges[n].minValue, ranges[n].maxValue, 0, 1);
}
}
if( classLikelihoods.size() != numClasses ) classLikelihoods.resize(numClasses,0);
if( classDistances.size() != numClasses ) classDistances.resize(numClasses,0);
//Loop over each class and compute the likelihood of the input data coming from class k. Pick the class with the highest likelihood
Float sum = 0;
Float bestEstimate = -grt_numeric_limits< Float >::max();
UINT bestIndex = 0;
for(UINT k=0; k<numClasses; k++){
Float estimate = models[k].compute( inputVector );
if( estimate > bestEstimate ){
bestEstimate = estimate;
bestIndex = k;
}
classDistances[k] = estimate;
classLikelihoods[k] = estimate;
sum += estimate;
}
if( sum > 1.0e-5 ){
for(UINT k=0; k<numClasses; k++){
classLikelihoods[k] /= sum;
}
}else{
//If the sum is less than the value above then none of the models found a positive class
maxLikelihood = bestEstimate;
predictedClassLabel = GRT_DEFAULT_NULL_CLASS_LABEL;
return true;
}
maxLikelihood = classLikelihoods[bestIndex];
predictedClassLabel = classLabels[bestIndex];
return true;
}
开发者ID:nickgillian,项目名称:grt,代码行数:58,代码来源:Softmax.cpp
示例5: input
bool FFT::update(const VectorFloat &x){
if( !initialized ){
errorLog << "update(const VectorFloat &x) - Not initialized!" << std::endl;
return false;
}
if( x.size() != numInputDimensions ){
errorLog << "update(const VectorFloat &x) - The size of the input (" << x.size() << ") does not match that of the FeatureExtraction (" << numInputDimensions << ")!" << std::endl;
return false;
}
//Add the current input to the data buffers
dataBuffer.push_back( x );
featureDataReady = false;
if( ++hopCounter == hopSize ){
hopCounter = 0;
//Compute the FFT for each dimension
for(UINT j=0; j<numInputDimensions; j++){
//Copy the input data for this dimension into the temp buffer
for(UINT i=0; i<dataBufferSize; i++){
tempBuffer[i] = dataBuffer[i][j];
}
//Compute the FFT
if( !fft[j].computeFFT( tempBuffer ) ){
errorLog << "update(const VectorFloat &x) - Failed to compute FFT!" << std::endl;
return false;
}
}
//Flag that the fft was computed during this update
featureDataReady = true;
//Copy the FFT data to the feature vector
UINT index = 0;
for(UINT j=0; j<numInputDimensions; j++){
if( computeMagnitude ){
Float *mag = fft[j].getMagnitudeDataPtr();
for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
featureVector[index++] = *mag++;
}
}
if( computePhase ){
Float *phase = fft[j].getPhaseDataPtr();
for(UINT i=0; i<fft[j].getFFTSize()/2; i++){
featureVector[index++] = *phase++;
}
}
}
}
return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:57,代码来源:FFT.cpp
示例6: getAngle
////////////
// Camera //
////////////
static float getAngle(VectorFloat vec1,VectorFloat vec2)
{
float cosPhi = (vec1*vec2)/(vec1.length()*vec2.length());
if (vec1.y>=vec2.y)
return (float)acos(cosPhi);
else
return -(float)acos(cosPhi);
}
开发者ID:hakan64,项目名称:sdl,代码行数:12,代码来源:Camera.cpp
示例7: TEST
// Tests the VectorFloat type
TEST(DynamicType, VectorFloatTest) {
DynamicType type;
VectorFloat a(3);
a[0] = 1.1; a[1] = 1.2; a[2] = 1.3;
EXPECT_TRUE( type.set( a ) );
VectorFloat b = type.get< VectorFloat >();
EXPECT_EQ( a.getSize(), b.getSize() );
for(unsigned int i=0; i<a.getSize(); i++){
EXPECT_EQ( a[i], b[i] );
}
}
开发者ID:sgrignard,项目名称:grt,代码行数:12,代码来源:DynamicTypeTest.cpp
示例8: newPoint
void Calibrator::newPoint(int motor, float p, VectorFloat omega, VectorFloat alpha, VectorFloat acceleration, Quaternion q) {
cout << p;
omega.print();
alpha.print();
acceleration.print();
q.print();
fP[motor].push_back(p);
fOmega[motor].push_back(omega);
fAlpha[motor].push_back(alpha);
fA[motor].push_back(acceleration);
fQ[motor].push_back(q);
}
开发者ID:nlurkin,项目名称:Drone,代码行数:12,代码来源:Calibrator.cpp
示例9: computeDerivative
Float Derivative::computeDerivative(const Float x) {
if( numInputDimensions != 1 ) {
errorLog << "computeDerivative(const Float x) - The Number Of Input Dimensions is not 1! NumInputDimensions: " << numInputDimensions << std::endl;
return 0;
}
VectorFloat y = computeDerivative( VectorFloat(1,x) );
if( y.size() == 0 ) return 0 ;
return y[0];
}
开发者ID:codeflakes0,项目名称:grt,代码行数:13,代码来源:Derivative.cpp
示例10: filter
Float DoubleMovingAverageFilter::filter(const Float x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(const Float x) - The filter has not been initialized!" << std::endl;
return 0;
}
VectorFloat y = filter(VectorFloat(1,x));
if( y.getSize() == 0 ) return 0;
return y[0];
}
开发者ID:sboettcher,项目名称:grt,代码行数:13,代码来源:DoubleMovingAverageFilter.cpp
示例11: filter
Float SavitzkyGolayFilter::filter(const Float x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(Float x) - The filter has not been initialized!" << std::endl;
return 0;
}
VectorFloat y = filter(VectorFloat(1,x));
if( y.size() > 0 ) return y[0];
return 0;
}
开发者ID:sboettcher,项目名称:grt,代码行数:13,代码来源:SavitzkyGolayFilter.cpp
示例12: sample
bool ClassificationData::addSample(const UINT classLabel,const VectorFloat &sample){
if( sample.getSize() != numDimensions ){
if( totalNumSamples == 0 ){
warningLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << "), setting dimensionality to: " << numDimensions << std::endl;
numDimensions = sample.getSize();
}else{
errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the size of the new sample (" << sample.getSize() << ") does not match the number of dimensions of the dataset (" << numDimensions << ")" << std::endl;
return false;
}
}
//The class label must be greater than zero (as zero is used for the null rejection class label
if( classLabel == GRT_DEFAULT_NULL_CLASS_LABEL && !allowNullGestureClass ){
errorLog << "addSample(const UINT classLabel, VectorFloat &sample) - the class label can not be 0!" << std::endl;
return false;
}
//The dataset has changed so flag that any previous cross validation setup will now not work
crossValidationSetup = false;
crossValidationIndexs.clear();
ClassificationSample newSample(classLabel,sample);
data.push_back( newSample );
totalNumSamples++;
if( classTracker.getSize() == 0 ){
ClassTracker tracker(classLabel,1);
classTracker.push_back(tracker);
}else{
bool labelFound = false;
for(UINT i=0; i<classTracker.getSize(); i++){
if( classLabel == classTracker[i].classLabel ){
classTracker[i].counter++;
labelFound = true;
break;
}
}
if( !labelFound ){
ClassTracker tracker(classLabel,1);
classTracker.push_back(tracker);
}
}
//Update the class labels
sortClassLabels();
return true;
}
开发者ID:sgrignard,项目名称:grt,代码行数:49,代码来源:ClassificationData.cpp
示例13: main
int main (int argc, const char * argv[])
{
//Load the example data
ClassificationData data;
if( !data.load("WiiAccShakeData.grt") ){
cout << "ERROR: Failed to load data from file!\n";
return EXIT_FAILURE;
}
//The variables used to initialize the MovementIndex feature extraction
UINT windowSize = 10;
UINT numDimensions = data.getNumDimensions();
//Create a new instance of the MovementIndex feature extraction
MovementIndex movementIndex(windowSize,numDimensions);
//Loop over the accelerometer data, at each time sample (i) compute the features using the new sample and then write the results to a file
for(UINT i=0; i<data.getNumSamples(); i++){
//Compute the features using this new sample
movementIndex.computeFeatures( data[i].getSample() );
//Write the data
cout << "InputVector: ";
for(UINT j=0; j<data.getNumDimensions(); j++){
cout << data[i].getSample()[j] << "\t";
}
//Get the latest feature vector
VectorFloat featureVector = movementIndex.getFeatureVector();
//Write the features
cout << "FeatureVector: ";
for(UINT j=0; j<featureVector.size(); j++){
cout << featureVector[j];
if( j != featureVector.size()-1 ) cout << "\t";
}
cout << endl;
}
//Save the MovementIndex settings to a file
movementIndex.save("MovementIndexSettings.grt");
//You can then load the settings again if you need them
movementIndex.load("MovementIndexSettings.grt");
return EXIT_SUCCESS;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:49,代码来源:MovementIndexExample.cpp
示例14: Vector
bool LinearRegression::predict_(VectorFloat &inputVector){
if( !trained ){
errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
if( !trained ) return false;
if( inputVector.size() != numInputDimensions ){
errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << int( inputVector.size() ) << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1);
}
}
regressionData[0] = w0;
for(UINT j=0; j<numInputDimensions; j++){
regressionData[0] += inputVector[j] * w[j];
}
if( useScaling ){
for(UINT n=0; n<numOutputDimensions; n++){
regressionData[n] = scale(regressionData[n], 0, 1, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue);
}
}
return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:33,代码来源:LinearRegression.cpp
示例15: Vector
bool RegressionTree::predict_(VectorFloat &inputVector){
if( !trained ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
if( tree == NULL ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Tree pointer is null!" << std::endl;
return false;
}
if( inputVector.size() != numInputDimensions ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.size() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0, 1);
}
}
if( !tree->predict( inputVector, regressionData ) ){
Regressifier::errorLog << "predict_(VectorFloat &inputVector) - Failed to predict!" << std::endl;
return false;
}
return true;
}
开发者ID:sboettcher,项目名称:grt,代码行数:30,代码来源:RegressionTree.cpp
示例16: inputVector
UINT KMeansQuantizer::quantize(const VectorFloat &inputVector){
if( !trained ){
errorLog << "computeFeatures(const VectorFloat &inputVector) - The quantizer has not been trained!" << std::endl;
return 0;
}
if( inputVector.getSize() != numInputDimensions ){
errorLog << "computeFeatures(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
return 0;
}
//Find the minimum cluster
Float minDist = grt_numeric_limits< Float >::max();
UINT quantizedValue = 0;
for(UINT k=0; k<numClusters; k++){
//Compute the squared Euclidean distance
quantizationDistances[k] = 0;
for(UINT i=0; i<numInputDimensions; i++){
quantizationDistances[k] += grt_sqr( inputVector[i]-clusters[k][i] );
}
if( quantizationDistances[k] < minDist ){
minDist = quantizationDistances[k];
quantizedValue = k;
}
}
featureVector[0] = quantizedValue;
featureDataReady = true;
return quantizedValue;
}
开发者ID:pscholl,项目名称:grt,代码行数:33,代码来源:KMeansQuantizer.cpp
示例17: VectorFloat
VectorFloat Derivative::computeDerivative(const VectorFloat &x) {
if( !initialized ) {
errorLog << "computeDerivative(const VectorFloat &x) - Not Initialized!" << std::endl;
return VectorFloat();
}
if( x.size() != numInputDimensions ) {
errorLog << "computeDerivative(const VectorFloat &x) - The Number Of Input Dimensions (" << numInputDimensions << ") does not match the size of the input vector (" << x.size() << ")!" << std::endl;
return VectorFloat();
}
VectorFloat y;
if( filterData ) {
y = filter.filter( x );
} else y = x;
for(UINT n=0; n<numInputDimensions; n++) {
processedData[n] = (y[n]-yy[n])/delta;
yy[n] = y[n];
}
if( derivativeOrder == SECOND_DERIVATIVE ) {
Float tmp = 0;
for(UINT n=0; n<numInputDimensions; n++) {
tmp = processedData[n];
processedData[n] = (processedData[n]-yyy[n])/delta;
yyy[n] = tmp;
}
}
return processedData;
}
开发者ID:codeflakes0,项目名称:grt,代码行数:33,代码来源:Derivative.cpp
示例18: inputVector
UINT RBMQuantizer::quantize(const VectorFloat &inputVector){
if( !trained ){
errorLog << "quantize(const VectorFloat &inputVector) - The quantizer model has not been trained!" << std::endl;
return 0;
}
if( inputVector.getSize() != numInputDimensions ){
errorLog << "quantize(const VectorFloat &inputVector) - The size of the inputVector (" << inputVector.getSize() << ") does not match that of the filter (" << numInputDimensions << ")!" << std::endl;
return 0;
}
if( !rbm.predict( inputVector ) ){
errorLog << "quantize(const VectorFloat &inputVector) - Failed to quantize input!" << std::endl;
return 0;
}
quantizationDistances = rbm.getOutputData();
//Search for the neuron with the maximum output
UINT quantizedValue = 0;
Float maxValue = 0;
for(UINT k=0; k<numClusters; k++){
if( quantizationDistances[k] > maxValue ){
maxValue = quantizationDistances[k];
quantizedValue = k;
}
}
featureVector[0] = quantizedValue;
featureDataReady = true;
return quantizedValue;
}
开发者ID:sgrignard,项目名称:grt,代码行数:34,代码来源:RBMQuantizer.cpp
示例19: Vector
bool MultidimensionalRegression::predict_(VectorFloat &inputVector){
if( !trained ){
errorLog << "predict_(VectorFloat &inputVector) - Model Not Trained!" << std::endl;
return false;
}
if( !trained ) return false;
if( inputVector.getSize() != numInputDimensions ){
errorLog << "predict_(VectorFloat &inputVector) - The size of the input Vector (" << inputVector.getSize() << ") does not match the num features in the model (" << numInputDimensions << std::endl;
return false;
}
if( useScaling ){
for(UINT n=0; n<numInputDimensions; n++){
inputVector[n] = grt_scale(inputVector[n], inputVectorRanges[n].minValue, inputVectorRanges[n].maxValue, 0.0, 1.0);
}
}
for(UINT n=0; n<numOutputDimensions; n++){
if( !regressionModules[ n ]->predict( inputVector ) ){
errorLog << "predict_(VectorFloat &inputVector) - Failed to predict for regression module " << n << std::endl;
}
regressionData[ n ] = regressionModules[ n ]->getRegressionData()[0];
}
if( useScaling ){
for(UINT n=0; n<numOutputDimensions; n++){
regressionData[n] = grt_scale(regressionData[n], 0.0, 1.0, targetVectorRanges[n].minValue, targetVectorRanges[n].maxValue);
}
}
return true;
}
开发者ID:BryanBo-Cao,项目名称:grt,代码行数:35,代码来源:MultidimensionalRegression.cpp
示例20: VectorFloat
VectorFloat MovingAverageFilter::filter(const VectorFloat &x){
//If the filter has not been initialised then return 0, otherwise filter x and return y
if( !initialized ){
errorLog << "filter(const VectorFloat &x) - The filter has not been initialized!" << std::endl;
return VectorFloat();
}
if( x.size() != numInputDimensions ){
errorLog << "filter(const VectorFloat &x) - The size of the input vector (" << x.getSize() << ") does not match that of the number of dimensions of the filter (" << numInputDimensions << ")!" << std::endl;
return VectorFloat();
}
if( ++inputSampleCounter > filterSize ) inputSampleCounter = filterSize;
//Add the new value to the buffer
dataBuffer.push_back( x );
for(unsigned int j=0; j<numInputDimensions; j++){
processedData[j] = 0;
for(unsigned int i=0; i<inputSampleCounter; i++) {
processedData[j] += dataBuffer[i][j];
}
processedData[j] /= Float(inputSampleCounter);
}
return processedData;
}
开发者ID:CV-IP,项目名称:grt,代码行数:28,代码来源:MovingAverageFilter.cpp
注:本文中的VectorFloat类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论