本文整理汇总了C++中create_example函数的典型用法代码示例。如果您正苦于以下问题:C++ create_example函数的具体用法?C++ create_example怎么用?C++ create_example使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了create_example函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: write_struct_model
void write_struct_model(char *file, STRUCTMODEL *sm,
STRUCT_LEARN_PARM *sparm)
{
/* Writes structural model sm to file file. */
PyObject *pFunc, *pValue;
// Reduce the support vectors if appropriate.
if (sm->w && sm->lin_reduce) {
MODEL *m = sm->svm_model;
// Get rid of the old support vectors.
int i;
for (i=1; i<m->sv_num; ++i) {
free_example(m->supvec[i], 1);
}
free(m->supvec);
free(m->alpha);
// Create the new model sv structurs.
m->supvec = (DOC**)my_malloc(sizeof(DOC*)*2);
m->supvec[0] = NULL;
m->alpha = (double*)my_malloc(sizeof(double)*2);
m->alpha[0]=0.0; m->alpha[1]=1.0;
m->sv_num = 2;
// Create the new support vector.
SVECTOR *sv = create_svector_n(sm->w, sm->sizePsi,"",1.0);
m->supvec[1] = create_example(0,0,1,1.0,sv);
}
// Call the relevant Python function.
pFunc = getFunction(PYTHON_WRITE_MODEL);
pValue = PyObject_CallFunction
(pFunc,"sNN",file,StructModel_FromStructModel(sm),Sparm_FromSparm(sparm));
PY_RUNCHECK;
Py_DECREF(pValue);
}
开发者ID:We-can-apply-GPU,项目名称:aMeLiDoSu-HW2,代码行数:33,代码来源:svm_struct_api.c
示例2: add_weight_vector_to_linear_model
MODEL *compact_linear_model(MODEL *model)
/* Makes a copy of model where the support vectors are replaced
with a single linear weight vector. */
/* NOTE: It adds the linear weight vector also to newmodel->lin_weights */
/* WARNING: This is correct only for linear models! */
{
MODEL *newmodel;
newmodel=(MODEL *)my_malloc(sizeof(MODEL));
(*newmodel)=(*model);
add_weight_vector_to_linear_model(newmodel);
newmodel->supvec = (DOC **)my_malloc(sizeof(DOC *)*2);
newmodel->alpha = (double *)my_malloc(sizeof(double)*2);
newmodel->index = NULL; /* index is not copied */
newmodel->supvec[0] = NULL;
newmodel->alpha[0] = 0.0;
newmodel->supvec[1] = create_example(-1,0,0,0,
create_svector_n(newmodel->lin_weights,
newmodel->totwords,
NULL,1.0));
newmodel->alpha[1] = 1.0;
newmodel->sv_num=2;
return(newmodel);
}
开发者ID:a061105,项目名称:ConvexLatentSVM,代码行数:25,代码来源:svm_common.c
示例3: create_svector
double SVMModule::classify(std::vector<float>& instance)
{
if(m_pTrainModel == NULL)
return -1;
WORD *words;
int vecLen = instance.size();
words = (WORD*)my_malloc(sizeof(WORD)*(vecLen + 1));
for(int i = 0; i < vecLen; i++) {
words[i].wnum = (i+1);
words[i].weight = instance[i];
}
words[vecLen].wnum = 0;
// make svector
SVECTOR* svec = create_svector(words, "", 1.0);
DOC* doc = create_example(-1, 0, 0, 0.0, svec);
double prob = classify_example(m_pTrainModel, doc);
free_example(doc, 1);
return prob;
}
开发者ID:rudhirg,项目名称:ObjectDetection,代码行数:25,代码来源:ObjectDetection.cpp
示例4: save_constraints
/* save constraints in structmodel for training the full model in the future */
void save_constraints(STRUCT_LEARN_PARM *sparm, STRUCTMODEL *sm)
{
int i, count;
CONSTSET c;
/* read the constraints */
c = read_constraints(sparm->confile, sm);
count = 0;
for(i = 0; i < c.m; i++)
{
sparm->cset.lhs = (DOC**)realloc(sparm->cset.lhs, sizeof(DOC*)*(sparm->cset.m+1));
sparm->cset.lhs[sparm->cset.m] = create_example(sparm->cset.m, 0, 1000000+sparm->cset.m, 1, create_svector(c.lhs[i]->fvec->words, "", 1.0));
sparm->cset.rhs = (double*)realloc(sparm->cset.rhs, sizeof(double)*(sparm->cset.m+1));
sparm->cset.rhs[sparm->cset.m] = c.rhs[i];
sparm->cset.m++;
count++;
}
/* clean up */
free(c.rhs);
for(i = 0; i < c.m; i++)
free_example(c.lhs[i], 1);
free(c.lhs);
printf("%d constraints added, totally %d constraints\n", count, sparm->cset.m);
}
开发者ID:caomw,项目名称:ObjectInteraction,代码行数:27,代码来源:svm_struct_main.c
示例5: mexToDOC
/**
* mexToDOC() - convert the MATLAB/MEX array mxData and mxLabels into
* the SVMLite formatted docs array and label array. Note that
* MATLAB uses column major ordering, and SVMLite (and most programs)
* use row major ordering. This method unravels that complication.
*/
void mexToDOC(mxArray *mxData, mxArray *mxLabels, DOC ***docs, double **label,
long int *totwords, long int *totdoc)
{
int i;
int rows, cols;
double *yvals;
WORD *words;
if (mxData == NULL) {
printf("WARNING: mexToDoc : mxData is NULL");
return;
}
/* retrieve the rows and columns */
rows = mxGetM(mxData);
cols = mxGetN(mxData);
/* allocate memory for the DOC rows */
(*docs) = (DOC **)my_malloc(sizeof(DOC *) * rows);
/* allocate memory for the labels */
if (mxLabels != NULL)
(*label) = (double *)my_malloc(sizeof(double)* rows);
/* allocate a single buffer in memory for the words (hold n columns) */
words = (WORD *)my_malloc(sizeof(WORD)*cols);
/* store the number of words and docs */
if ((totwords != NULL) && (totdoc != NULL)) {
(*totwords) = cols;
(*totdoc) = rows;
}
/* load the yvals from the mxLabels array */
if (mxLabels != NULL)
yvals = mxGetPr(mxLabels);
/* for each row, create a corresponding vector of *DOC and store it */
for (i = 0; i < rows; i++) {
SVECTOR *fvec = NULL;
int j;
/* parse and copy the given mxData into a WORD array words */
parse_mxEntry(i, mxData, mxLabels, words);
/* create the intermediate structure (svector in svm_common.c) */
fvec = create_svector(words,"",1.0);
for (j = 0; j < 2; j++) {
(*docs)[i] = create_example(i, 0, 0, 1.0, fvec);
if (mxLabels != NULL)
(*label)[i] = yvals[i];
}
}
}
开发者ID:LinaW,项目名称:sentence-training,代码行数:64,代码来源:mexcommon.c
示例6: find_most_violated_joint_constraint_in_cache_old
double find_most_violated_joint_constraint_in_cache_old(int n, int cache_size, SVECTOR ***fydelta_cache, double **loss_cache, MODEL *svmModel, SVECTOR **lhs, double *margin)
{
int i,j;
double progress,progress_old;
double maxviol=0,sumviol,viol,lossval;
double dist_ydelta;
SVECTOR *fydelta;
DOC *doc_fydelta;
(*lhs)=NULL;
(*margin)=0;
sumviol=0;
progress=0;
progress_old=progress;
for(i=0; i<n; i++) { /*** example loop ***/
progress+=10.0/n;
if((struct_verbosity==1) && (((int)progress_old) != ((int)progress)))
{printf("+");fflush(stdout); progress_old=progress;}
if(struct_verbosity>=2)
{printf("+"); fflush(stdout);}
fydelta=NULL;
lossval=0;
for(j=0;j<cache_size;j++) {
doc_fydelta=create_example(1,0,1,1,fydelta_cache[j][i]);
dist_ydelta=classify_example(svmModel,doc_fydelta);
free_example(doc_fydelta,0);
viol=loss_cache[j][i]-dist_ydelta;
if((viol > maxviol) || (!fydelta)) {
fydelta=fydelta_cache[j][i];
lossval=loss_cache[j][i];
maxviol=viol;
}
}
/**** add current fydelta to joint constraint ****/
fydelta=copy_svector(fydelta);
append_svector_list(fydelta,(*lhs)); /* add fydelta to lhs */
(*lhs)=fydelta;
(*margin)+=lossval; /* add loss to rhs */
sumviol+=maxviol;
}
return(sumviol);
}
开发者ID:jaimeguzman,项目名称:iaudp2014,代码行数:49,代码来源:svm_struct_learn.c
示例7: update_constraint_cache_for_model
void update_constraint_cache_for_model(CCACHE *ccache, MODEL *svmModel)
/* update the violation scores according to svmModel and find the
most violated constraints for each example */
{
int i;
double progress=0,progress_old=0;
double maxviol=0;
double dist_ydelta;
DOC *doc_fydelta;
CCACHEELEM *celem,*prev,*maxviol_celem,*maxviol_prev;
for(i=0; i<ccache->n; i++) { /*** example loop ***/
progress+=10.0/ccache->n;
if((struct_verbosity==1) && (((int)progress_old) != ((int)progress)))
{printf("+");fflush(stdout); progress_old=progress;}
if(struct_verbosity>=2)
{printf("+"); fflush(stdout);}
maxviol=0;
prev=NULL;
maxviol_celem=NULL;
maxviol_prev=NULL;
for(celem=ccache->constlist[i];celem;celem=celem->next) {
doc_fydelta=create_example(1,0,1,1,celem->fydelta);
dist_ydelta=classify_example(svmModel,doc_fydelta);
free_example(doc_fydelta,0);
celem->viol=celem->rhs-dist_ydelta;
if((celem->viol > maxviol) || (!maxviol_celem)) {
maxviol=celem->viol;
maxviol_celem=celem;
maxviol_prev=prev;
}
prev=celem;
}
if(maxviol_prev) { /* move max violated constraint to the top of list */
maxviol_prev->next=maxviol_celem->next;
maxviol_celem->next=ccache->constlist[i];
ccache->constlist[i]=maxviol_celem;
}
}
}
开发者ID:jaimeguzman,项目名称:iaudp2014,代码行数:42,代码来源:svm_struct_learn.c
示例8: update_constraint_cache_for_model
void update_constraint_cache_for_model(CCACHE *ccache, MODEL *svmModel)
/* update the violation scores according to svmModel and find the
most violated constraints for each example */
{
int i;
long progress=0;
double maxviol=0;
double dist_ydelta;
DOC *doc_fydelta;
CCACHEELEM *celem,*prev,*maxviol_celem,*maxviol_prev;
doc_fydelta=create_example(1,0,1,1,NULL);
for(i=0; i<ccache->n; i++) { /*** example loop ***/
if(struct_verbosity>=3)
print_percent_progress(&progress,ccache->n,10,"+");
maxviol=0;
prev=NULL;
maxviol_celem=NULL;
maxviol_prev=NULL;
for(celem=ccache->constlist[i];celem;celem=celem->next) {
doc_fydelta->fvec=celem->fydelta;
dist_ydelta=classify_example(svmModel,doc_fydelta);
celem->viol=celem->rhs-dist_ydelta;
if((celem->viol > maxviol) || (!maxviol_celem)) {
maxviol=celem->viol;
maxviol_celem=celem;
maxviol_prev=prev;
}
prev=celem;
}
ccache->changed[i]=0;
if(maxviol_prev) { /* move max violated constraint to the top of list */
maxviol_prev->next=maxviol_celem->next;
maxviol_celem->next=ccache->constlist[i];
ccache->constlist[i]=maxviol_celem;
ccache->changed[i]=1;
}
}
free_example(doc_fydelta,0);
}
开发者ID:aa755,项目名称:cfg3d,代码行数:42,代码来源:svm_struct_learn.cpp
示例9: add_constraint_to_constraint_cache
void add_constraint_to_constraint_cache(CCACHE *ccache, MODEL *svmModel, int exnum, SVECTOR *fydelta, double rhs, int maxconst)
/* add new constraint fydelta*w>rhs for example exnum to cache,
if it is more violated than the currently most violated
constraint in cache. if this grows the number of constraint
for this example beyond maxconst, then the most unused
constraint is deleted. the funciton assumes that
update_constraint_cache_for_model has been run. */
{
double viol;
double dist_ydelta;
DOC *doc_fydelta;
CCACHEELEM *celem;
int cnum;
doc_fydelta=create_example(1,0,1,1,fydelta);
dist_ydelta=classify_example(svmModel,doc_fydelta);
free_example(doc_fydelta,0);
viol=rhs-dist_ydelta;
if((viol-0.000000000001) > ccache->constlist[exnum]->viol) {
celem=ccache->constlist[exnum];
ccache->constlist[exnum]=(CCACHEELEM *)malloc(sizeof(CCACHEELEM));
ccache->constlist[exnum]->next=celem;
ccache->constlist[exnum]->fydelta=fydelta;
ccache->constlist[exnum]->rhs=rhs;
ccache->constlist[exnum]->viol=viol;
/* remove last constraint in list, if list is longer than maxconst */
cnum=2;
for(celem=ccache->constlist[exnum];celem && celem->next && celem->next->next;celem=celem->next)
cnum++;
if(cnum>maxconst) {
free_svector(celem->next->fydelta);
free(celem->next);
celem->next=NULL;
}
}
else {
free_svector(fydelta);
}
}
开发者ID:jaimeguzman,项目名称:iaudp2014,代码行数:41,代码来源:svm_struct_learn.c
示例10: init_struct_constraints
CONSTSET init_struct_constraints(SAMPLE sample, STRUCTMODEL *sm,
STRUCT_LEARN_PARM *sparm)
{
/* Initializes the optimization problem. Typically, you do not need
to change this function, since you want to start with an empty
set of constraints. However, if for example you have constraints
that certain weights need to be positive, you might put that in
here. The constraints are represented as lhs[i]*w >= rhs[i]. lhs
is an array of feature vectors, rhs is an array of doubles. m is
the number of constraints. The function returns the initial
set of constraints. */
CONSTSET c;
long sizePsi=sm->sizePsi;
long i;
WORD words[2];
if(1) { /* normal case: start with empty set of constraints */
c.lhs=NULL;
c.rhs=NULL;
c.m=0;
}
else { /* add constraints so that all learned weights are
positive. WARNING: Currently, they are positive only up to
precision epsilon set by -e. */
c.lhs=my_malloc(sizeof(DOC *)*sizePsi);
c.rhs=my_malloc(sizeof(double)*sizePsi);
for(i=0; i<sizePsi; i++) {
words[0].wnum=i+1;
words[0].weight=1.0;
words[1].wnum=0;
/* the following slackid is a hack. we will run into problems,
if we have move than 1000000 slack sets (ie examples) */
c.lhs[i]=create_example(i,0,1000000+i,1,create_svector(words,"",1.0));
c.rhs[i]=0.0;
}
}
return(c);
}
开发者ID:gdoggg2032,项目名称:DL,代码行数:38,代码来源:svm_struct_api.c
示例11: LOG
//.........这里部分代码省略.........
fscanf(modelfl,"%ld%*[^\n]\n", &model->totwords);
fscanf(modelfl,"%ld%*[^\n]\n", &model->totdoc);
fscanf(modelfl,"%ld%*[^\n]\n", &model->sv_num);
fscanf(modelfl,"%lf%*[^\n]\n", &model->b);
} else { // use_gmumr
max_words = config.getDataDim();
words = (WORD *)my_malloc(sizeof(WORD)*(max_words+10));
LOG(
config.log,
LogLevel::DEBUG_LEVEL,
__debug_prefix__ + ".libraryReadModel() Converting config to model..."
);
/* 0=linear, 1=poly, 2=rbf, 3=sigmoid, 4=custom -- same as GMUM.R! */
model->kernel_parm.kernel_type = static_cast<long int>(config.kernel_type);
// -d int -> parameter d in polynomial kernel
model->kernel_parm.poly_degree = config.degree;
// -g float -> parameter gamma in rbf kernel
model->kernel_parm.rbf_gamma = config.gamma;
// -s float -> parameter s in sigmoid/poly kernel
model->kernel_parm.coef_lin = config.gamma;
// -r float -> parameter c in sigmoid/poly kernel
model->kernel_parm.coef_const = config.coef0;
// -u string -> parameter of user defined kernel
char kernel_parm_custom[50] = "empty";
char * model_kernel_parm_custom = model->kernel_parm.custom;
model_kernel_parm_custom = kernel_parm_custom;
// highest feature index
model->totwords = config.getDataDim();
// number of training documents
model->totdoc = config.target.n_rows;
// number of support vectors plus 1 (!)
model->sv_num = config.l + 1;
/* Threshold b (has opposite sign than SVMClient::predict())
* In svm_common.c:57 in double classify_example_linear():
* return(sum-model->b);
*/
model->b = - config.b;
LOG(
config.log,
LogLevel::DEBUG_LEVEL,
__debug_prefix__ + ".libraryReadModel() Converting config done."
);
}
// GMUM.R changes }
model->supvec = (DOC **)my_malloc(sizeof(DOC *)*model->sv_num);
model->alpha = (double *)my_malloc(sizeof(double)*model->sv_num);
model->index=NULL;
model->lin_weights=NULL;
// GMUM.R changes {
if (!use_gmumr) {
for(i=1;i<model->sv_num;i++) {
fgets(line,(int)ll,modelfl);
if(!parse_document(line,words,&(model->alpha[i]),&queryid,&slackid,
&costfactor,&wpos,max_words,&comment)) {
C_PRINTF("\nParsing error while reading model file in SV %ld!\n%s",
i,line);
EXIT(1);
}
model->supvec[i] = create_example(-1,
0,0,
0.0,
create_svector(words,comment,1.0));
}
fclose(modelfl);
free(line);
} else {
for(i = 1; i < model->sv_num; ++i) {
line = SVMConfigurationToSVMLightModelSVLine(config, i-1);
if(!parse_document(line,words,&(model->alpha[i]),&queryid,&slackid,
&costfactor,&wpos,max_words,&comment)) {
C_PRINTF("\nParsing error while reading model file in SV %ld!\n%s",
i,line);
EXIT(1);
}
model->supvec[i] = create_example(-1,
0,0,
0.0,
create_svector(words,comment,1.0));
free(line);
}
}
// GMUM.R changes }
free(words);
if(verbosity>=1) {
C_FPRINTF(stdout, "OK. (%d support vectors read)\n",(int)(model->sv_num-1));
}
LOG(
config.log,
LogLevel::DEBUG_LEVEL,
__debug_prefix__ + ".libraryReadModel() Done."
);
return(model);
}
开发者ID:gmum,项目名称:gmum.r,代码行数:101,代码来源:svmlight_runner.cpp
示例12: ipProcessTargetClassifierInternal
//.........这里部分代码省略.........
words[1].wnum = 2;
words[1].weight = (float)astClassFeature->_D;
words[2].wnum = 3;
words[2].weight = (float)astClassFeature->_Delta;
words[3].wnum = 4;
words[3].weight = (float)astClassFeature->_Hu[0];
words[4].wnum = 5;
words[4].weight = (float)astClassFeature->_Hu[1];
words[5].wnum = 6;
words[5].weight = (float)astClassFeature->_Hu[2];
words[6].wnum = 7;
words[6].weight = (float)astClassFeature->_Hu[3];
words[7].wnum = 8;
words[7].weight = (float)astClassFeature->_Hu[4];
words[8].wnum = 9;
words[8].weight = (float)astClassFeature->_Hu[5];
words[9].wnum = 10;
words[9].weight = (float)astClassFeature->_Hu[6];
words[10].wnum = 11;
words[10].weight = (float)astClassFeature->_I;
words[11].wnum = 12;
words[11].weight = (float)astClassFeature->_P;
words[12].wnum = 13;
words[12].weight = (float)astClassFeature->_P13;
words[13].wnum = 14;
words[13].weight = (float)astClassFeature->_P23;
words[14].wnum=0;
//c= (IMP_S32)svm_predict(pstParams->m_model, x);
doc = create_example(-1,0,0,0.0,create_svector(words,"",1.0));
type=(float)classify_example(pstParams->pstModel,doc);
if (type>0 && type <2)
{
c=IMP_TGT_TYPE_HUMAN;
pstTarget->stTargetInfo.s32HumanLikehood++;
pstTarget->stTargetInfo.s32VehicleLikehood--;
}
else if (type>2 && type <4)
{
c=IMP_TGT_TYPE_VEHICLE;
pstTarget->stTargetInfo.s32VehicleLikehood++;
pstTarget->stTargetInfo.s32HumanLikehood--;
}
free_example(doc,1);
}
}
}
}
if (pstTarget->stTargetInfo.s32VehicleLikehood>100)
{
pstTarget->stTargetInfo.s32VehicleLikehood=100;
}
else if(pstTarget->stTargetInfo.s32VehicleLikehood<-1)
{
pstTarget->stTargetInfo.s32VehicleLikehood=-1;
}
开发者ID:119,项目名称:myimpsrc2,代码行数:66,代码来源:imp_pea_classify.c
示例13: svm_learn_struct_joint
//.........这里部分代码省略.........
/**** add current fy-fybar to constraint and margin ****/
if(kparm->kernel_type == LINEAR) {
add_list_n_ns(diff_n,fybar,1.0); /* add fy-fybar to sum */
free_svector(fybar);
}
else {
append_svector_list(fybar,lhs); /* add fy-fybar to vector list */
lhs=fybar;
}
margin+=lossval/n; /* add loss to rhs */
rt_total+=MAX(get_runtime()-rt1,0);
} /* end of example loop */
rt1=get_runtime();
/* create sparse vector from dense sum */
if(kparm->kernel_type == LINEAR) {
diff=create_svector_n(diff_n,sm->sizePsi,"",1.0);
free_nvector(diff_n);
}
else {
diff=lhs;
}
rt_total+=MAX(get_runtime()-rt1,0);
} /* end of finding most violated joint constraint */
rt1=get_runtime();
/**** if `error', then add constraint and recompute QP ****/
doc=create_example(cset.m,0,1,1,diff);
dist=classify_example(svmModel,doc);
ceps=MAX(0,margin-dist-slack);
if(slack > (margin-dist+0.000001)) {
printf("\nWARNING: Slack of most violated constraint is smaller than slack of working\n");
printf(" set! There is probably a bug in 'find_most_violated_constraint_*'.\n");
printf("slack=%f, newslack=%f\n",slack,margin-dist);
/* exit(1); */
}
if(ceps > sparm->epsilon) {
/**** resize constraint matrix and add new constraint ****/
cset.lhs=(DOC **)realloc(cset.lhs,sizeof(DOC *)*(cset.m+1));
if(sparm->slack_norm == 1)
cset.lhs[cset.m]=create_example(cset.m,0,1,1,diff);
else if(sparm->slack_norm == 2)
exit(1);
cset.rhs=(double *)realloc(cset.rhs,sizeof(double)*(cset.m+1));
cset.rhs[cset.m]=margin;
alpha=(double *)realloc(alpha,sizeof(double)*(cset.m+1));
alpha[cset.m]=0;
alphahist=(long *)realloc(alphahist,sizeof(long)*(cset.m+1));
alphahist[cset.m]=optcount;
cset.m++;
totconstraints++;
if((alg_type == DUAL_ALG) || (alg_type == DUAL_CACHE_ALG)) {
if(struct_verbosity>=1) {
printf(":");fflush(stdout);
}
rt2=get_runtime();
kparm->gram_matrix=update_kernel_matrix(kparm->gram_matrix,cset.m-1,
&cset,kparm);
rt_kernel+=MAX(get_runtime()-rt2,0);
}
开发者ID:jaimeguzman,项目名称:iaudp2014,代码行数:67,代码来源:svm_struct_learn.c
示例14: svm_learn_struct_joint
//.........这里部分代码省略.........
progress=0;
rt_total+=MAX(get_runtime()-rt1,0);
for(i=0; i<n; i++) {
rt1=get_runtime();
if(struct_verbosity>=1)
print_percent_progress(&progress,n,10,".");
/* compute most violating fydelta=fy-fybar and rhs for example i */
find_most_violated_constraint(&fydelta,&rhs_i,&ex[i],fycache[i],n,
sm,sparm,&rt_viol,&rt_psi,&argmax_count);
/* add current fy-fybar to lhs of constraint */
if(kparm->kernel_type == LINEAR_KERNEL) {
add_list_n_ns(lhs_n,fydelta,1.0); /* add fy-fybar to sum */
free_svector(fydelta);
}
else {
append_svector_list(fydelta,lhs); /* add fy-fybar to vector list */
lhs=fydelta;
}
rhs+=rhs_i; /* add loss to rhs */
rt_total+=MAX(get_runtime()-rt1,0);
} /* end of example loop */
rt1=get_runtime();
/* create sparse vector from dense sum */
if(kparm->kernel_type == LINEAR_KERNEL)
lhs=create_svector_n_r(lhs_n,sm->sizePsi,NULL,1.0,
COMPACT_ROUNDING_THRESH);
doc=create_example(cset.m,0,1,1,lhs);
lhsXw=classify_example(svmModel,doc);
free_example(doc,0);
viol=rhs-lhsXw;
rt_total+=MAX(get_runtime()-rt1,0);
} /* end of finding most violated joint constraint */
rt1=get_runtime();
/**** if `error', then add constraint and recompute QP ****/
if(slack > (rhs-lhsXw+0.000001)) {
printf("\nWARNING: Slack of most violated constraint is smaller than slack of working\n");
printf(" set! There is probably a bug in 'find_most_violated_constraint_*'.\n");
printf("slack=%f, newslack=%f\n",slack,rhs-lhsXw);
/* exit(1); */
}
ceps=MAX(0,rhs-lhsXw-slack);
if((ceps > sparm->epsilon) || cached_constraint) {
/**** resize constraint matrix and add new constraint ****/
cset.lhs=(DOC **)realloc(cset.lhs,sizeof(DOC *)*(cset.m+1));
cset.lhs[cset.m]=create_example(cset.m,0,1,1,lhs);
cset.rhs=(double *)realloc(cset.rhs,sizeof(double)*(cset.m+1));
cset.rhs[cset.m]=rhs;
alpha=(double *)realloc(alpha,sizeof(double)*(cset.m+1));
alpha[cset.m]=0;
alphahist=(long *)realloc(alphahist,sizeof(long)*(cset.m+1));
alphahist[cset.m]=optcount;
cset.m++;
totconstraints++;
if((alg_type == ONESLACK_DUAL_ALG)
|| (alg_type == ONESLACK_DUAL_CACHE_ALG)) {
开发者ID:aa755,项目名称:cfg3d,代码行数:67,代码来源:svm_struct_learn.cpp
示例15: svm_learn_struct
//.........这里部分代码省略.........
activenum--;
opti[i]=opti_round;
}
if(struct_verbosity>=2)
printf("no-incorrect-found(%i) ",i);
continue;
}
/**** get psi(y)-psi(ybar) ****/
rt2=get_runtime();
if(fycache)
fy=copy_svector(fycache[i]);
else
fy=psi(ex[i].x,ex[i].y,sm,sparm);
fybar=psi(ex[i].x,ybar,sm,sparm);
rt_psi+=MAX(get_runtime()-rt2,0);
/**** scale feature vector and margin by loss ****/
lossval=loss(ex[i].y,ybar,sparm);
if(sparm->slack_norm == 2)
lossval=sqrt(lossval);
if(sparm->loss_type == SLACK_RESCALING)
factor=lossval;
else /* do not rescale vector for */
factor=1.0; /* margin rescaling loss type */
for(f=fy;f;f=f->next)
f->factor*=factor;
for(f=fybar;f;f=f->next)
f->factor*=-factor;
margin=lossval;
/**** create constraint for current ybar ****/
append_svector_list(fy,fybar);/* append the two vector lists */
doc=create_example(cset.m,0,i+1,1,fy);
/**** compute slack for this example ****/
slack=0;
for(j=0;j<cset.m;j++)
if(cset.lhs[j]->slackid == i+1) {
if(sparm->slack_norm == 2) /* works only for linear kernel */
slack=MAX(slack,cset.rhs[j]
-(classify_example(svmModel,cset.lhs[j])
-sm->w[sizePsi+i]/(sqrt(2*svmCnorm))));
else
slack=MAX(slack,
cset.rhs[j]-classify_example(svmModel,cset.lhs[j]));
}
/**** if `error' add constraint and recompute ****/
dist=classify_example(svmModel,doc);
ceps=MAX(ceps,margin-dist-slack);
if(slack > (margin-dist+0.0001)) {
printf("\nWARNING: Slack of most violated constraint is smaller than slack of working\n");
printf(" set! There is probably a bug in 'find_most_violated_constraint_*'.\n");
printf("Ex %d: slack=%f, newslack=%f\n",i,slack,margin-dist);
/* exit(1); */
}
if((dist+slack)<(margin-epsilon)) {
if(struct_verbosity>=2)
{printf("(%i,eps=%.2f) ",i,margin-dist-slack); fflush(stdout);}
if(struct_verbosity==1)
{printf("."); fflush(stdout);}
/**** resize constraint matrix and add new constraint ****/
cset.m++;
cset.lhs=(DOC **)realloc(cset.lhs,sizeof(DOC *)*cset.m);
开发者ID:aa755,项目名称:cfg3d,代码行数:67,代码来源:svm_struct_learn.cpp
示例16: read_struct_model
STRUCTMODEL read_struct_model(char *file, STRUCT_LEARN_PARM *sparm)
{
/* Reads structural model sm from file file. This function is used
only in the prediction module, not in the learning module. */
FILE *modelfl;
STRUCTMODEL sm;
long i,queryid,slackid;
double costfactor;
long max_sv,max_words,ll,wpos;
char *line,*comment;
TOKEN *words;
char version_buffer[100];
MODEL *model;
nol_ll(file,&max_sv,&max_words,&ll); /* scan size of model file */
max_words+=2;
ll+=2;
words = (TOKEN *)my_malloc(sizeof(TOKEN)*(max_words+10));
line = (char *)my_malloc(sizeof(char)*ll);
model = (MODEL *)my_malloc(sizeof(MODEL));
if ((modelfl = fopen (file, "r")) == NULL)
{ perror (file); exit (1); }
fscanf(modelfl,"SVM-multiclass Version %s\n",version_buffer);
if(strcmp(version_buffer,INST_VERSION)) {
perror ("Version of model-file does not match version of svm_struct_classify!");
exit (1);
}
fscanf(modelfl,"%d%*[^\n]\n", &sparm->num_classes);
fscanf(modelfl,"%d%*[^\n]\n", &sparm->num_features);
fscanf(modelfl,"%d%*[^\n]\n", &sparm->loss_function);
fscanf(modelfl,"%ld%*[^\n]\n", &model->kernel_parm.kernel_type);
fscanf(modelfl,"%ld%*[^\n]\n", &model->kernel_parm.poly_degree);
fscanf(modelfl,"%lf%*[^\n]\n", &model->kernel_parm.rbf_gamma);
fscanf(modelfl,"%lf%*[^\n]\n", &model->kernel_parm.coef_lin);
fscanf(modelfl,"%lf%*[^\n]\n", &model->kernel_parm.coef_const);
fscanf(modelfl,"%[^#]%*[^\n]\n", model->kernel_parm.custom);
fscanf(modelfl,"%ld%*[^\n]\n", &model->totwords);
fscanf(modelfl,"%ld%*[^\n]\n", &model->totdoc);
fscanf(modelfl,"%ld%*[^\n]\n", &model->sv_num);
fscanf(modelfl,"%lf%*[^\n]\n", &model->b);
model->supvec = (DOC **)my_malloc(sizeof(DOC *)*model->sv_num);
model->alpha = (double *)my_malloc(sizeof(double)*model->sv_num);
model->index=NULL;
model->lin_weights=NULL;
for(i=1;i<model->sv_num;i++) {
fgets(line,(int)ll,modelfl);
if(!parse_document(line,words,&(model->alpha[i]),&queryid,&slackid,
&costfactor,&wpos,max_words,&comment, true)) {
printf("\nParsing error while reading model file in SV %ld!\n%s",
i,line);
exit(1);
}
model->supvec[i] = create_example(-1,0,0,0.0,
create_svector(words,comment,1.0));
model->supvec[i]->fvec->kernel_id=queryid;
}
fclose(modelfl);
free(line);
free(words);
if(verbosity>=1) {
fprintf(stdout, " (%d support vectors read) ",(int)(model->sv_num-1));
}
sm.svm_model=model;
sm.sizePsi=model->totwords;
sm.w=NULL;
return(sm);
}
开发者ID:JackZZhang,项目名称:iPM3F,代码行数:73,代码来源:svm_struct_api.cpp
示例17: add_constraint_to_constraint_cache
double add_constraint_to_constraint_cache(CCACHE *ccache, MODEL *svmModel, int exnum, SVECTOR *fydelta, double rhs, double gainthresh, int maxconst, double *rt_cachesum)
/* add new constraint fydelta*w>rhs for example exnum to cache,
if it is more violated (by gainthresh) than the currently most
violated constraint in cache. if this grows the number of
cached constraints for this example beyond maxconst, then the
least recently used constraint is deleted. the function
assumes that update_constraint_cache_for_model has been
run. */
{
double viol,viol_gain,viol_gain_trunc;
double dist_ydelta;
DOC *doc_fydelta;
SVECTOR *fydelta_new;
CCACHEELEM *celem;
int cnum;
double rt2=0;
/* compute violation of new constraint */
doc_fydelta=create_example(1,0,1,1,fydelta);
dist_ydelta=classify_example(svmModel,doc_fydelta);
free_example(doc_fydelta,0);
viol=rhs-dist_ydelta;
viol_gain=viol-ccache->constlist[exnum]->viol;
viol_gain_trunc=viol-MAX(ccache->constlist[exnum]->viol,0);
ccache->avg_viol_gain[exnum]=viol_gain;
/* check if violation of new constraint is larger than that of the
best cache element */
if(viol_gain > gainthresh) {
fydelta_new=fydelta;
if(struct_verbosity>=2) rt2=get_runtime();
if(svmModel->kernel_parm.kernel_type == LINEAR_KERNEL) {
if(COMPACT_CACHED_VECTORS == 1) { /* eval sum for linear */
fydelta_new=add_list_sort_ss_r(fydelta,COMPACT_ROUNDING_THRESH);
free_svector(fydelta);
}
else if(COMPACT_CACHED_VECTORS == 2) {
fydelta_new=add_list_ss_r(fydelta,COMPACT_ROUNDING_THRESH);
free_svector(fydelta);
}
else if(COMPACT_CACHED_VECTORS == 3) {
fydelta_new=add_list_ns_r(fydelta,COMPACT_ROUNDING_THRESH);
free_svector(fydelta);
}
}
if(struct_verbosity>=2) (*rt_cachesum)+=MAX(get_runtime()-rt2,0);
celem=ccache->constlist[exnum];
ccache->constlist[exnum]=(CCACHEELEM *)my_malloc(sizeof(CCACHEELEM));
ccache->constlist[exnum]->next=celem;
ccache->constlist[exnum]->fydelta=fydelta_new;
ccache->constlist[exnum]->rhs=rhs;
ccache->constlist[exnum]->viol=viol;
ccache->changed[exnum]+=2;
/* remove last constraint in list, if list is longer than maxconst */
cnum=2;
for(celem=ccache->constlist[exnum];celem && celem->next && celem->next->next;celem=celem->next)
cnum++;
if(cnum>maxconst) {
free_svector(celem->next->fydelta);
free(celem->next);
celem->next=NULL;
}
}
else {
free_svector(fydelta);
}
return(viol_gain_trunc);
}
开发者ID:aa755,项目名称:cfg3d,代码行数:69,代码来源:svm_struct_learn.cpp
示例18: main_classify
int main_classify (int argc, char* argv[])
{
DOC *doc; /* test example */
WORDSVM *words;
long max_docs,max_words_doc,lld;
long totdoc=0,queryid,slackid;
long correct=0,incorrect=0,no_accuracy=0;
long res_a=0,res_b=0,res_c=0,res_d=0,wnum,pred_format;
long j;
double t1,runtime=0;
double dist,doc_label,costfactor;
char *line,*comment;
FILE *predfl,*docfl;
MODEL *model;
read_input_parameters(argc,argv,docfile,modelfile,predictionsfile,
&verbosity,&pred_format);
nol_ll(docfile,&max_docs,&max_words_doc,&lld); /* scan size of input file */
max_words_doc+=2;
lld+=2;
line = (char *)my_malloc(sizeof(char)*lld);
words = (WORDSVM *)my_malloc(sizeof(WORDSVM)*(max_words_doc+10));
model=read_model(modelfile);
if(model->kernel_parm.kernel_type == 0) { /* linear kernel */
/* compute weight vector */
add_weight_vector_to_linear_model(model);
}
if(verbosity>=2) {
printf("Classifying test examples.."); fflush(stdout);
}
if ((docfl = fopen (docfile, "r")) == NULL)
{ perror (docfile); exit (1); }
if ((predfl = fopen (predictionsfile, "w")) == NULL)
{ perror (predictionsfile); exit (1); }
while((!feof(docfl)) && fgets(line,(int)lld,docfl)) {
if(line[0] == '#') continue; /* line contains comments */
parse_document(line,words,&doc_label,&queryid,&slackid,&costfactor,&wnum,
max_words_doc,&comment);
totdoc++;
if(model->kernel_parm.kernel_type == 0) { /* linear kernel */
for(j=0;(words[j]).wnum != 0;j++) { /* Check if feature numbers */
if((words[j]).wnum>model->totwords) /* are not larger than in */
(words[j]).wnum=0; /* model. Remove feature if */
} /* necessary. */
doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0));
t1=get_runtime();
dist=classify_example_linear(model,doc);
runtime+=(get_runtime()-t1);
free_example(doc,1);
}
else { /* non-linear kernel */
doc = create_example(-1,0,0,0.0,create_svector(words,comment,1.0));
t1=get_runtime();
dist=classify_example(model,doc);
runtime+=(get_runtime()-t1);
free_example(doc,1);
}
if(dist>0) {
if(pred_format==0) { /* old weired output format */
fprintf(predfl,"%.8g:+1 %.8g:-1\n",dist,-dist);
}
if(doc_label>0) correct++; else incorrect++;
if(doc_label>0) res_a++; else res_b++;
}
else {
if(pred_format==0) { /* old weired output format */
fprintf(predfl,"%.8g:-1 %.8g:+1\n",-dist,dist);
}
if(doc_label<0) correct++; else incorrect++;
if(doc_label>0) res_c++; else res_d++;
}
if(pred_format==1) { /* output the value of decision function */
fprintf(predfl,"%.8g\n",dist);
}
if((int)(0.01+(doc_label*doc_label)) != 1)
{ no_accuracy=1; } /* test data is not binary labeled */
if(verbosity>=2) {
if(totdoc % 100 == 0) {
printf("%ld..",totdoc); fflush(stdout);
}
}
}
free(line);
free(words);
free_model(model,1);
if(verbosity>=2) {
printf("done\n");
/* Note by Gary Boone Date: 29 April 2000 */
/* o Timing is inaccurate. The timer has 0.01 second resolution. */
/* Because classification of a single vector takes less than */
/* 0.01 secs, the timer was underflowing. */
//.........这里部分代码省略.........
开发者ID:119,项目名称:myimpsrc2,代码行数:101,代码来源:svm_classify.c
示例19: throw
//.........这里部分代码省略.........
rightEye->apply();
rightEye->getMaxLocation(rightEyeLocation, rightPSR);
rightEyeLocation.x += offset.x;
rightEyeLocation.y += offset.y;
}
}
if (roi)
cvReleaseImage(&roi);
center.x = (leftEyeLocation.x + rightEyeLocation.x) / 2;
center.y = leftEyeLocation.y + Globals::noseDrop;
fa.setNose(center);
offset.x = offset.y = 0;
roi = (roiFunction)? roiFunction(frame, fa, offset, Annotations::Nose) : 0;
// free the preprocessed image
fftw_free(preprocessedImage);
// all location extractors do identical preprocessing. Therefore, preprocess
// once using say the left eye extractor and re-use it for all three extractors
preprocessedImage = nose->getPreprocessedImage((roi)? roi : frame);
// get the location of the nose
nose->setImage(preprocessedImage);
nose->apply();
nose->getMaxLocation(noseLocation, nosePSR);
noseLocation.x += offset.x;
noseLocation.y += offset.y;
// free the preprocessed image
fftw_free(preprocessedImage);
fa.setLeftIris(leftEyeLocation);
fa.setRightIris(rightEyeLocation);
fa.setNose(noseLocation);
// we are done with the images at this point. Free roi if not zero
if (roi)
cvReleaseImage(&roi);
// cout << "Confidence (L, R, N) = (" << leftPSR << ", " <<
// rightPSR << ")" << endl;
// extract features vector
vector<double> data;
for (int i = 0; i < nFeatures; i++) {
double value = featureExtractors[i]->extract(&fa);
data.push_back(value);
}
// normalize
normalize(data);
// create SVM Light objects to classify
DOC* doc;
WORD* words = (WORD*)malloc(sizeof(WORD) * (nFeatures + 1));
for (int i = 0; i < nFeatures; i++) {
words[i].wnum = featureExtractors[i]->getId();
words[i].weight = data[i];
}
// SVM Light expects that the features vector has a zero element
// to indicate termination and hence
words[nFeatures].wnum = 0;
words[nFeatures].weight = 0.0;
// create doc
string comment = "Gaze SVM";
doc = create_example(-1, 0, 0, 0.0, create_svector(words, (char*)comment.c_str(), 1.0));
int maxIndex = 0;
confidence = -FLT_MAX;
double dists[Globals::numZones];
// classify using each zone model
#pragma omp parallel for num_threads(Globals::numZones)
for (unsigned int i = 0; i < Globals::numZones; i++) {
if (kernelType == Trainer::Linear)
dists[i] = classify_example_linear(models[i], doc);
else
dists[i] = classify_example(models[i], doc);
|
请发表评论