本文整理汇总了C++中TheMatrix类的典型用法代码示例。如果您正苦于以下问题:C++ TheMatrix类的具体用法?C++ TheMatrix怎么用?C++ TheMatrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了TheMatrix类的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: test6
double test6() {
string t0[] = {"101",
"011",
"101",
"010"};
vector <string> p0(t0, t0+sizeof(t0)/sizeof(string));
TheMatrix * obj = new TheMatrix();
clock_t start = clock();
int my_answer = obj->MaxArea(p0);
clock_t end = clock();
delete obj;
cout <<"Time: " <<(double)(end-start)/CLOCKS_PER_SEC <<" seconds" <<endl;
int p1 = 8;
cout <<"Desired answer: " <<endl;
cout <<"\t" << p1 <<endl;
cout <<"Your answer: " <<endl;
cout <<"\t" << my_answer <<endl;
if (p1 != my_answer) {
cout <<"DOESN'T MATCH!!!!" <<endl <<endl;
return -1;
}
else {
cout <<"Match :-)" <<endl <<endl;
return (double)(end-start)/CLOCKS_PER_SEC;
}
}
开发者ID:yuzhou627,项目名称:TopCoder,代码行数:26,代码来源:TheMatrix.cpp
示例2:
/** The subgradient is chosen as sgn(w)
*/
void CL1N1::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
reg = 0;
TheMatrix &w = model.GetW();
w.Norm1(reg);
grad.Zero();
for(int i=0; i<w.Length(); i++)
{
double val = 0;
w.Get(i,val);
grad.Set(i,SML::sgn(val));
}
}
开发者ID:funkey,项目名称:bmrm,代码行数:15,代码来源:l1n1.cpp
示例3: LossAndGrad
/**
* Compute loss and partial derivative of hinge loss w.r.t f
*
* @param loss [write] loss value computed.
* @param f [r/w] = X*w
* @param l [write] partial derivative of loss w.r.t. f
*/
void CLogisticLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
l.Zero(); // for gradient computation i.e. grad := l'*X
f.ElementWiseMult(_data->labels());
double* f_array = f.Data(); // pointer to memory location of f (faster element access)
int len = f.Length();
double exp_yf = 0.0;
for(int i=0; i < len; i++)
{
if(fabs(f_array[i]) == 0.0)
{
loss += LN2;
l.Set(i,-0.5);
}
else if (f_array[i] > 0.0)
{
exp_yf = exp(-f_array[i]);
loss += log(1+exp_yf);
l.Set(i,-exp_yf/(1+exp_yf));
}
else
{
exp_yf = exp(f_array[i]);
loss += log(1+exp_yf) - f_array[i];
l.Set(i,-1.0/(1+exp_yf));
}
}
l.ElementWiseMult(_data->labels());
}
开发者ID:funkey,项目名称:bmrm,代码行数:37,代码来源:logisticloss.cpp
示例4: LossAndGrad
/**
* Compute loss and gradient of Least Absolute Deviation loss w.r.t f
*
* @param loss [write] loss value computed.
* @param f [r/w] = X*w
* @param l [write] partial derivative of loss w.r.t. f
*/
void CLeastAbsDevLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
loss = 0;
l.Zero();
double *Y_array = _data->labels().Data();
double* f_array = f.Data();
int len = f.Length();
for(int i=0; i < len; i++)
{
double f_minus_y = f_array[i] - Y_array[i];
loss += fabs(f_minus_y);
l.Set(i, SML::sgn(f_minus_y));
}
}
开发者ID:funkey,项目名称:bmrm,代码行数:21,代码来源:leastabsdevloss.cpp
示例5: LossAndGrad
/**
* Compute loss and gradient of novelty detection loss.
* CAUTION: f is passed by reference and is changed within this
* function. This is done for efficiency reasons, otherwise we would
* have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
* @param l [write] partial derivative of loss function w.r.t. f
*/
void CNoveltyLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
double* f_array = f.Data(); // pointer to memory location of f (faster element access)
int len = f.Length();
l.Zero(); // grad := l'*X
for(int i=0; i < len; i++)
{
if(rho > f_array[i])
{
loss += rho - f_array[i];
l.Set(i, -1.0);
}
}
}
开发者ID:funkey,项目名称:bmrm,代码行数:25,代码来源:noveltyloss.cpp
示例6: Loss
/**
* Compute NDCGRank loss. CAUTION: f is passed by reference and is
* changed within this function. This is done for efficiency reasons,
* otherwise we would have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
*/
void CNDCGRankLoss::Loss(Scalar& loss, TheMatrix& f)
{
// chteo: here we make use of the subset information
loss = 0.0;
Scalar* f_array = f.Data();
for(int q=0; q < _data->NumOfSubset(); q++)
{
int offset = _data->subset[q].startIndex;
int subsetsize = _data->subset[q].size;
current_ideal_pi = sort_vectors[q];
vector<double> b = bs[q];
//compute_coefficients(offset, subsetsize, y_array, current_ideal_pi, a, b);
/* find the best permutation */
find_permutation(subsetsize, offset, a, b, c, f_array, pi);
/* compute the loss */
double value;
delta(subsetsize, a, b, pi, value);
loss += value;
for (int i=0;i<subsetsize;i++){
loss = loss + c[i]*(get(f_array, offset, pi[i]) - get(f_array, offset, i));
}
//free(c);
//free(a);
//free(b);
//free(pi);
}
}
开发者ID:kingang1986,项目名称:shapematching,代码行数:43,代码来源:ndcgrankloss.cpp
示例7: Loss
/**
* Compute hinge loss. CAUTION: f is passed by reference and is
* changed within this function. This is done for efficiency reasons,
* otherwise we would have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
*/
void CLogisticLoss::Loss(double& loss, TheMatrix& f)
{
loss = 0;
f.ElementWiseMult(_data->labels()); // f = y*f
double* f_array = f.Data(); // pointer to memory location of f (faster element access)
int len = f.Length();
for(int i=0; i < len; i++)
{
if(fabs(f_array[i]) == 0.0)
loss += LN2;
else if (f_array[i] > 0.0)
loss += log(1+exp(-f_array[i]));
else
loss += log(1+exp(f_array[i])) - f_array[i];
}
}
开发者ID:funkey,项目名称:bmrm,代码行数:24,代码来源:logisticloss.cpp
示例8:
void CL2N2::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
reg = 0;
TheMatrix &w = model.GetW();
w.Norm2(reg);
reg = 0.5*reg*reg;
grad.Assign(w);
}
开发者ID:funkey,项目名称:bmrm,代码行数:8,代码来源:l2n2.cpp
示例9: LossAndGrad
/**
* Compute loss and partial derivative of NDCGRank loss w.r.t f
*
* @param loss [write] loss value computed.
* @param f [r/w] = X*w
* @param l [write] partial derivative of loss w.r.t. f
*/
void CNDCGRankLoss::LossAndGrad(Scalar& loss, TheMatrix& f, TheMatrix& l)
{
// chteo: here we make use of the subset information
loss = 0.0;
l.Zero();
Scalar* f_array = f.Data();
for(int q=0; q < _data->NumOfSubset(); q++)
{
//cout << "q = "<< q <<endl;
int offset = _data->subset[q].startIndex;
int subsetsize = _data->subset[q].size;
current_ideal_pi = sort_vectors[q];
vector<double> b = bs[q];
//compute_coefficients(offset, subsetsize, y_array, current_ideal_pi, a, b);
//cout << "before finding permutation\n";
/* find the best permutation */
find_permutation(subsetsize, offset, a, b, c, f_array, pi);
//cout << "after finding permutation\n";
//cout << "before finding delta\n";
/* compute the loss */
double value;
delta(subsetsize, a, b, pi, value);
//cout << "before finding delta\n";
loss += value;
for (int i=0;i<subsetsize;i++){
loss = loss + c[i]*(get(f_array, offset, pi[i]) - get(f_array, offset, i));
}
for (int i=0;i<subsetsize;i++){
//add(l, offset, i, c[pi[i]] - c[i]);
add(l, offset, i, - c[i]);
add(l, offset, pi[i], c[i]);
}
}
}
开发者ID:kingang1986,项目名称:shapematching,代码行数:50,代码来源:ndcgrankloss.cpp
示例10: ComputeLoss
/** Flag = 0: marginloss, no label loss. The label loss will always be zero
1: marginloss, and label loss.
*/
void CSMMMulticlassLoss::ComputeLoss(vector<unsigned int> y, vector<unsigned int> ylabel, vector<unsigned int> ybar, vector<unsigned int> ybarlabel, const CSeqMulticlassFeature::seqfeature_struct &x, const TheMatrix &w, double & marginloss, double & labelloss, int flag)
{
unsigned int i;
double w_dot_phi1 = 0;
double w_dot_phi2 = 0;
marginloss = 0;
unsigned int start;
if(is_first_phi1_used)
start = 0;
else
start = 1;
for(i=start; i < ybar.size(); i++)
{
_data->TensorPhi1(x.phi_1[ybar[i]],ybarlabel[i],0,tphi_1);
//tphi_1->Print();
w.Dot(*(tphi_1), w_dot_phi1);
marginloss += w_dot_phi1;
//printf("%d(%d):%2.4f\t",ybar[i],ybarlabel[i],marginloss);
}
for(i=1;i<ybar.size();i++)
{
int vb = 0;
_data->TensorPhi2(x.phi_2[ybar[i-1]][ybar[i]-ybar[i-1]-1], ybarlabel[i-1], ybarlabel[i], 0,vb,tphi_2);
w.Dot(*(tphi_2), w_dot_phi2);
marginloss += w_dot_phi2;
}
if(ybar.size() > 0)
{
//grad.Add(*(X[i].phi_2[ybar[ybar.size()-1]][X[i].len-1 - ybar[ybar.size()-1]-1]));////
_data->TensorPhi2(x.phi_2[ybar[ybar.size()-1]][x.len - ybar[ybar.size()-1]-1 ], ybarlabel[ybar.size()-1], 0, 0,0,tphi_2);
w.Dot(*(tphi_2), w_dot_phi2);
marginloss += w_dot_phi2;
}
//vector <unsigned int> yss = Boundry2StatSequence(y,ylabel,x.len);
//vector <unsigned int> ybarss = Boundry2StatSequence(ybar,ybarlabel,x.len);
//labelloss = Labelloss(yss,ybarss);
labelloss = AllDelta(ybar,y,ybarlabel,ylabel,x.len);
}
开发者ID:funkey,项目名称:bmrm,代码行数:45,代码来源:smmmulticlassloss.cpp
示例11: KawigiEdit_RunTest
// BEGIN KAWIGIEDIT TESTING
// Generated by KawigiEdit 2.1.4 (beta) modified by pivanof
bool KawigiEdit_RunTest(int testNum, vector <string> p0, bool hasAnswer, int p1) {
cout << "Test " << testNum << ": [" << "{";
for (int i = 0; int(p0.size()) > i; ++i) {
if (i > 0) {
cout << ",";
}
cout << "\"" << p0[i] << "\"";
}
cout << "}";
cout << "]" << endl;
TheMatrix *obj;
int answer;
obj = new TheMatrix();
clock_t startTime = clock();
answer = obj->MaxArea(p0);
clock_t endTime = clock();
delete obj;
bool res;
res = true;
cout << "Time: " << double(endTime - startTime) / CLOCKS_PER_SEC << " seconds" << endl;
if (hasAnswer) {
cout << "Desired answer:" << endl;
cout << "\t" << p1 << endl;
}
cout << "Your answer:" << endl;
cout << "\t" << answer << endl;
if (hasAnswer) {
res = answer == p1;
}
if (!res) {
cout << "DOESN'T MATCH!!!!" << endl;
} else if (double(endTime - startTime) / CLOCKS_PER_SEC >= 2) {
cout << "FAIL the timeout" << endl;
res = false;
} else if (hasAnswer) {
cout << "Match :-)" << endl;
} else {
cout << "OK, but is it right?" << endl;
}
cout << "" << endl;
return res;
}
开发者ID:ArtieTheOnes,项目名称:algorithm,代码行数:44,代码来源:div1_250.cpp
示例12: DisplayAfterTrainingInfo
void CBMRM::DisplayAfterTrainingInfo(unsigned int iter, double finalExactObjVal,
double approxObjVal, double loss,
TheMatrix& w_best, CTimer& lossAndGradientTime,
CTimer& innerSolverTime, CTimer& totalTime)
{
// legends
if(verbosity >= 1)
{
printf("\n[Legends]\n");
if(verbosity > 1)
printf("pobj: primal objective function value"
"\naobj: approximate objective function value\n");
printf("gam: gamma (approximation error) "
"\neps: lower bound on gam "
"\nloss: loss function value "
"\nreg: regularizer value\n");
}
double norm1 = 0, norm2 = 0, norminf = 0;
w_best.Norm1(norm1);
w_best.Norm2(norm2);
w_best.NormInf(norminf);
printf("\nNote: the final w is the w_t where J(w_t) is the smallest.\n");
printf("No. of iterations: %d\n",iter);
printf("Primal obj. val.: %.6e\n",finalExactObjVal);
printf("Approx obj. val.: %.6e\n",approxObjVal);
printf("Primal - Approx.: %.6e\n",finalExactObjVal-approxObjVal);
printf("Loss: %.6e\n",loss);
printf("|w|_1: %.6e\n",norm1);
printf("|w|_2: %.6e\n",norm2);
printf("|w|_oo: %.6e\n",norminf);
// display timing profile
printf("\nCPU seconds in:\n");
printf("1. loss and gradient: %8.2f\n", lossAndGradientTime.CPUTotal());
printf("2. solver: %8.2f\n", innerSolverTime.CPUTotal());
printf(" Total: %8.2f\n", totalTime.CPUTotal());
printf("Wall-clock total: %8.2f\n", totalTime.WallclockTotal());
}
开发者ID:funkey,项目名称:bmrm,代码行数:42,代码来源:bmrm.cpp
示例13: LossAndGrad
/**
* Compute loss and gradient of Huber hinge loss.
* CAUTION: f is passed by reference and is changed within this
* function. This is done for efficiency reasons, otherwise we would
* have had to create a new copy of f.
*
* @param loss [write] loss value computed.
* @param f [read/write] prediction vector.
* @param l [write] partial derivative of loss function w.r.t. f
*/
void CHuberHingeLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
f.ElementWiseMult(_data->labels());
double* yf = f.Data();
double* Y = _data->labels().Data();
int len = f.Length();
loss = 0.0;
l.Zero();
for(int i=0; i < len; i++)
{
double v = 1-yf[i];
if(h < v)
{
loss += v;
l.Set(i,-Y[i]);
}
else if(-h > v) {}
else
{
loss += (v+h)*(v+h)/4/h;
l.Set(i, -Y[i]*(v+h)/2/h);
}
}
}
开发者ID:funkey,项目名称:bmrm,代码行数:35,代码来源:huberhingeloss.cpp
示例14: ComputeLossAndGradient
void CGenericLoss::ComputeLossAndGradient(double& loss, TheMatrix& grad)
{
loss = 0;
grad.Zero();
TheMatrix &w = _model->GetW();
double* dat = w.Data();
double* raw_g = grad.Data();
{
double* resy;
double* resybar;
map<int,int> ybar;
resy = new double [data->dim()];
resybar = new double [data->dim()];
minimize(data->nodeFeatures, &(data->nodeLabels), data->edgeFeatures, dat, dat + data->nNodeFeatures, ybar, data->nNodeFeatures, data->nEdgeFeatures, data->lossPositive, data->lossNegative, data->indexEdge, NULL, 1, data->firstOrderResponses);
Phi(data->nodeFeatures, &(data->nodeLabels), data->edgeFeatures, data->nNodeFeatures, data->nEdgeFeatures, resy, resy + data->nNodeFeatures, data->indexEdge);
Phi(data->nodeFeatures, &ybar, data->edgeFeatures, data->nNodeFeatures, data->nEdgeFeatures, resybar, resybar + data->nNodeFeatures, data->indexEdge);
loss += LabelLoss(data->nodeLabels, ybar, data->lossPositive, data->lossNegative, LOSS);
for (int j = 0; j < (int) data->dim(); j ++)
{
loss += dat[j]*(resybar[j]-resy[j]);
raw_g[j] += (1.0/data->N)*(resybar[j]-resy[j]);
}
delete [] resy;
delete [] resybar;
}
loss = loss/data->N;
}
开发者ID:interxuxing,项目名称:bmrm_demo,代码行数:36,代码来源:genericloss.cpp
示例15: g
/** Compute loss and gradient
*/
void CSMMMulticlassLoss::ComputeLossAndGradient(double& loss, TheMatrix& grad)
{
iterNum ++;
TheMatrix &w = _model->GetW();
loss = 0;
grad.Zero();
TheMatrix g(grad, SML::DENSE);
const vector<CSeqMulticlassLabel::seqlabel_struct> &Y = _data->labels();
const vector<CSeqMulticlassFeature::seqfeature_struct> &X = _data->features();
unsigned int trainExNum = 0;
vector <int > cvmark = _data->Getcvmark();
for(unsigned int i=0; i < m; i++)
{
if(cvmark.size()!=0)
{
if(cvmark[i]!=SMM::TRAIN_DATA)
continue;
}
trainExNum ++;
//if(cvmark)
vector<unsigned int> ybar(X[i].len,0);
vector<unsigned int> ybarlabel(X[i].len,0);
double labelloss = 0;
double marginloss = 0;
double w_dot_g = 0.0;;
// find best label y' and return the score wrt to y'
if(verbosity>=2)
{
cout <<"ex:"<< i<< endl;fflush(stdout);
}
if(is_single_action_persequence)
find_best_label_grammer(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
else
find_best_label(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
double labelloss_y = 0;
double marginloss_y = 0;
double labelloss_ybar = 0;
double marginloss_ybar = 0;
ComputeLoss(Y[i].pos,Y[i].type,ybar,ybarlabel,X[i],w,marginloss_ybar,labelloss_ybar,1);
if(lossw[0]!=0)
labelloss+=lossw[0];
if(lastDuration>0)
{
marginloss = marginloss_ybar;
labelloss = labelloss_ybar;
}
if(verbosity>=3)
{
ComputeLoss(Y[i].pos,Y[i].type,Y[i].pos,Y[i].type,X[i],w,marginloss_y,labelloss_y,1);
printf("dp------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss,labelloss);
printf("ybar----marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_ybar,labelloss_ybar);
printf("y-------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_y,labelloss_y);
if(abs(labelloss_ybar-labelloss)>1e-5)
{
printf("labelloss doesn't match!\n");
//exit(0);
}
if(abs(marginloss_ybar-marginloss)>1e-5)
{
printf("marginloss_ybar_dp:%2.4f != marginloss_ybar_computeLoss:%2.4f\n",marginloss,marginloss_ybar);
printf("marginloss doesn't match!\n");
}
}
// construct the gradient vector for the part of true y
const vector<unsigned int> &y = Y[i].pos;
const vector<unsigned int> &ylabel = Y[i].type;
g.Zero();
for(unsigned int j=0; j < y.size(); j++)
{
//g.Add(*(X[i].phi_1[y[j]]));
//g.Add(*(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1]));
_data->TensorPhi1(X[i].phi_1[y[j]],ylabel[j],0,tphi_1);
g.Add(*tphi_1);
if(j > 0)
{
_data->TensorPhi2(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1], ylabel[j-1], ylabel[j], 0,0,tphi_2);
g.Add(*tphi_2);
}
}
if(y.size() > 0)
{
//g.Add(*(X[i].phi_2[y[y.size()-1]][X[i].len-1 - y[y.size()-1]-1]));////
_data->TensorPhi2(X[i].phi_2[y[y.size()-1]][X[i].len - y[y.size()-1]-1 ], ylabel[y.size()-1], 0,0,0,tphi_2);
g.Add(*tphi_2);
}
// for predicted y'
//.........这里部分代码省略.........
开发者ID:funkey,项目名称:bmrm,代码行数:101,代码来源:smmmulticlassloss.cpp
示例16: add
void CNDCGRankLoss::add(TheMatrix &l, int offset, int i, double value){
Scalar temp;
l.Get(offset + current_ideal_pi[i], temp);
l.Set(offset + current_ideal_pi[i], temp + value);
}
开发者ID:kingang1986,项目名称:shapematching,代码行数:5,代码来源:ndcgrankloss.cpp
示例17: find_best_label_grammer
/** find best label with a grammer(with label loss): g(w) := max_y' <w,\phi(x,y')> + Delta(y', y)
*
* @param x [read] sequence
* @param y [read] actual label for x
* @param w [read] weight vector
* @param ybar [write] found best label
* @param marginloss [write] margin loss <w,\Phi(x,y')> w.r.t to best y'
* @param labelloss [write] label loss \Delta(y',y) w.r.t. to best y'
*
*/
void CSMMMulticlassLoss::find_best_label_grammer(const vector<unsigned int> &y,const vector<unsigned int> &ylabel, const CSeqMulticlassFeature::seqfeature_struct &x, const TheMatrix &w, vector<unsigned int> &ybar,vector<unsigned int> &ybarlabel, double &marginloss, double &labelloss, unsigned int personid, unsigned int classNum)
{
// reset return values
marginloss = 0;
labelloss = 0;
ybar.clear();
ybarlabel.clear();
/** The margin value vector used in dynamic programming
*/
vector< vector<double> > M (x.len+1,vector<double> (classNum,0));
/** The label loss value vector used in dynamic programming
*/
vector< vector<double> > L (x.len+1,vector<double> (classNum,0));
/** The back pointers vector used in dynamic programming to retrieve the optimal path
*/
// The positions
vector< vector<int> > A (x.len+1,vector<int> (classNum,-1));
// The class labels
vector< vector<int> > C (x.len+1,vector<int> (classNum,0));
double maxval = -SML::INFTY;
double w_dot_phi1 = 0;
double w_dot_phi2 = 0;
double marginval = 0;
double labelval = 0;
unsigned int right = 0;
unsigned int left = 0;
unsigned int start = 0;
unsigned int end = 0;
unsigned int classID = 0;
unsigned int classIDPrev = 0;
double sum = 0;
// compute DP statistics for positions 1 to len-1
// L[0] += y.size()-2;
// A[1] = 0;
for(classID=0;classID<classNum;classID++)
{
A[1][classID] = 0;
//C[1][classID] = 0;
}
//debug
//printf("x.len:%d",x.len);
if(is_first_phi1_used)
{
right =0;
for(classID=0;classID<classNum;classID++)
{
maxval = -SML::INFTY;
w_dot_phi1 = 0.0;
_data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
//tphi_1->Print();
w.Dot(*(tphi_1), w_dot_phi1);
marginval = w_dot_phi1;
sum = marginval;
if(sum > maxval)
{
M[right][classID] = marginval;
maxval = sum;
}
}
}
for(right=1; right < x.len+1; right++)
{
for(classID=0;classID<classNum;classID++)
{
// \Phi = (phi1, phi2[left,right])
// <w, \Phi> = <w,phi1> + <w,phi[left,right]>
maxval = -SML::INFTY;
w_dot_phi1 = 0.0;
//w.Dot(*(x.phi_1[right]), w_dot_phi1);
//printf("pos:%d,classid:%d ",right,classID);fflush(stdout);
//x.phi_1[right]->Print();
if(right<x.len)
{
_data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
//tphi_1->Print();
w.Dot(*(tphi_1), w_dot_phi1);
//.........这里部分代码省略.........
开发者ID:funkey,项目名称:bmrm,代码行数:101,代码来源:smmmulticlassloss.cpp
示例18: main
int main(int argc, char* argv[])
{
if (argc<4) {
printf("usage: %s foundkey bitpos framecount (framecount2 burst2)\n", argv[0]);
return -1;
}
unsigned framecount = 0;
uint64_t stop;
sscanf(argv[1],"%lux",&stop);
int pos;
sscanf(argv[2],"%i",&pos);
Bidirectional back;
TheMatrix tm;
back.doPrintCand(false);
sscanf(argv[3],"%i",&framecount);
uint64_t stop_val = Bidirectional::ReverseBits(stop);
printf("#### Found potential key (bits: %i)####\n", pos);
stop_val = back.Forwards(stop_val, 100, NULL);
back.ClockBack( stop_val, 101+pos );
uint64_t tst;
unsigned char bytes[16];
char out[115];
out[114]='\0';
int x = 0;
printf("Framecount is %i\n", framecount);
unsigned framecount2 = -1;
if (argc>=6) {
if (strlen(argv[5]) != 114) {
fprintf(stderr, "burst2 must be a 114 digit bitstring\n");
exit(1);
}
sscanf(argv[4],"%i",&framecount2);
}
while (back.PopCandidate(tst)) {
uint64_t orig = tm.CountUnmix(tst, framecount);
orig = tm.KeyUnmix(orig);
printf("KC(%i): ", x);
for(int i=7; i>=0; i--) {
printf("%02x ",(unsigned)(orig>>(8*i))&0xff);
}
x++;
if (framecount2>=0) {
uint64_t mix = tm.KeyMix(orig);
mix = tm.CountMix(mix,framecount2);
mix = back.Forwards(mix, 101, NULL);
back.Forwards(mix, 114, bytes);
int ok = 0;
for (int bit=0;bit<114;bit++) {
int byte = bit / 8;
int b = bit & 0x7;
int v = bytes[byte] & (1<<(7-b));
char check = v ? '1' : '0';
if (check==argv[5][bit]) ok++;
}
if (ok>104) {
printf(" *** MATCHED ***");
} else {
printf(" mismatch");
}
}
printf("\n");
#if 0
uint64_t mixed = back.Forwards(tst, 101, NULL);
back.Forwards(mixed, 114, bytes);
for (int bit=0;bit<114;bit++) {
int byte = bit / 8;
int b = bit & 0x7;
int v = bytes[byte] & (1<<(7-b));
out[bit] = v ? '1' : '0';
}
printf("cipher %s\n", out);
#endif
}
}
开发者ID:0x0d,项目名称:kraken,代码行数:82,代码来源:find_kc.cpp
注:本文中的TheMatrix类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论