本文整理汇总了C++中cvMinMaxLoc函数的典型用法代码示例。如果您正苦于以下问题:C++ cvMinMaxLoc函数的具体用法?C++ cvMinMaxLoc怎么用?C++ cvMinMaxLoc使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvMinMaxLoc函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: get_hand_interval_2
void get_hand_interval_2 (IplImage *body, int *interval)
{
CvMat *data, *labels, *means;
int count;
#define CLUSTERS 2
count = cvCountNonZero(body);
data = cvCreateMat(count, 1, CV_32FC1);
labels = cvCreateMat(count, 1, CV_32SC1);
means = cvCreateMat(CLUSTERS, 1, CV_32FC1);
fill_mat(body, data);
cvKMeans2(data, CLUSTERS, labels,
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 10.0),
1, 0, 0, means, 0);
double tmp;
cvMinMaxLoc(body, &tmp, NULL, NULL, NULL, NULL);
interval[0] = tmp;
cvMinMaxLoc(means, &tmp, NULL, NULL, NULL, NULL);
interval[1] = tmp;
cvReleaseMat(&data);
cvReleaseMat(&labels);
}
开发者ID:ChristianFrisson,项目名称:XKin,代码行数:26,代码来源:clustering.c
示例2: asef_locate_eyes
void asef_locate_eyes(AsefEyeLocator *asef){
asef->face_image.cols = asef->face_rect.width;
asef->face_image.rows = asef->face_rect.height;
asef->face_image.type = CV_8UC1;
asef->face_image.step = asef->face_rect.width;
cvGetSubRect(asef->input_image, &asef->face_image, asef->face_rect);
double xscale = ((double)asef->scaled_face_image_8uc1->cols)/((double)asef->face_image.cols);
double yscale = ((double)asef->scaled_face_image_8uc1->rows)/((double)asef->face_image.rows);
cvResize(&asef->face_image, asef->scaled_face_image_8uc1, CV_INTER_LINEAR);
cvLUT(asef->scaled_face_image_8uc1, asef->scaled_face_image_32fc1, asef->lut);
cvDFT(asef->scaled_face_image_32fc1, asef->scaled_face_image_32fc1, CV_DXT_FORWARD, 0);
cvMulSpectrums(asef->scaled_face_image_32fc1, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
cvMulSpectrums(asef->scaled_face_image_32fc1, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);
cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);
cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, &asef->left_eye, NULL);
cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, &asef->right_eye, NULL);
asef->left_eye.x = (asef->lrect.x + asef->left_eye.x)/xscale + asef->face_rect.x;
asef->left_eye.y = (asef->lrect.y + asef->left_eye.y)/yscale + asef->face_rect.y;
asef->right_eye.x = (asef->rrect.x + asef->right_eye.x)/xscale + asef->face_rect.x;
asef->right_eye.y = (asef->rrect.y + asef->right_eye.y)/yscale + asef->face_rect.y;
}
开发者ID:rogerils,项目名称:ASEF,代码行数:30,代码来源:asef.c
示例3: asef_locate_eyes
void asef_locate_eyes(AsefEyeLocator *asef, IplImage *image, CvRect face_rect, CvPoint *leye, CvPoint *reye){
asef->face_img.cols = face_rect.width;
asef->face_img.rows = face_rect.height;
asef->face_img.type = CV_8UC1;
asef->face_img.step = face_rect.width;
cvGetSubRect(image, &asef->face_img, face_rect);
double xscale = ((double)asef->image_tile->cols)/((double)asef->face_img.cols);
double yscale = ((double)asef->image_tile->rows)/((double)asef->face_img.rows);
cvResize(&asef->face_img, asef->image_tile, CV_INTER_LINEAR);
cvLUT(asef->image_tile, asef->image, asef->lut);
cvDFT(asef->image, asef->image, CV_DXT_FORWARD, 0);
cvMulSpectrums(asef->image, asef->lfilter_dft, asef->lcorr, CV_DXT_MUL_CONJ);
cvMulSpectrums(asef->image, asef->rfilter_dft, asef->rcorr, CV_DXT_MUL_CONJ);
cvDFT(asef->lcorr, asef->lcorr, CV_DXT_INV_SCALE, 0);
cvDFT(asef->rcorr, asef->rcorr, CV_DXT_INV_SCALE, 0);
cvMinMaxLoc(asef->lroi, NULL, NULL, NULL, leye, NULL);
cvMinMaxLoc(asef->rroi, NULL, NULL, NULL, reye, NULL);
leye->x = (asef->lrect.x + leye->x)/xscale + face_rect.x;
leye->y = (asef->lrect.y + leye->y)/yscale + face_rect.y;
reye->x = (asef->rrect.x + reye->x)/xscale + face_rect.x;
reye->y = (asef->rrect.y + reye->y)/yscale + face_rect.y;
}
开发者ID:o2co2,项目名称:ASEF,代码行数:30,代码来源:asef.c
示例4: cvGetImageROI
void CvOneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const
{
distance = 1e10;
pose_idx = -1;
CvRect roi = cvGetImageROI(patch);
IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels);
float sum = cvSum(patch).val[0];
cvConvertScale(patch, patch_32f, 1/sum);
for(int i = 0; i < m_pose_count; i++)
{
if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height)
{
continue;
}
float dist = cvNorm(m_samples[i], patch_32f);
//float dist = 0.0f;
//float i1,i2;
//for (int y = 0; y<patch_32f->height; y++)
// for (int x = 0; x< patch_32f->width; x++)
// {
// i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x];
// i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x];
// dist+= (i1-i2)*(i1-i2);
// }
if(dist < distance)
{
distance = dist;
pose_idx = i;
}
#if 0
IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1);
double maxval;
cvMinMaxLoc(m_samples[i], 0, &maxval);
cvConvertScale(m_samples[i], img1, 255.0/maxval);
cvMinMaxLoc(patch_32f, 0, &maxval);
cvConvertScale(patch_32f, img2, 255.0/maxval);
cvNamedWindow("1", 1);
cvShowImage("1", img1);
cvNamedWindow("2", 1);
cvShowImage("2", img2);
printf("Distance = %f\n", dist);
cvWaitKey(0);
#endif
}
cvReleaseImage(&patch_32f);
}
开发者ID:PR2,项目名称:pr2_plugs,代码行数:54,代码来源:one_way_descriptor.cpp
示例5: convertFloatImageToUcharImage
// Get an 8-bit equivalent of the 32-bit Float image.
// Returns a new image, so remember to call 'cvReleaseImage()' on the result.
IplImage* convertFloatImageToUcharImage(const IplImage *srcImg)
{
IplImage *dstImg = 0;
if ((srcImg) && (srcImg->width > 0 && srcImg->height > 0)) {
// Spread the 32bit floating point pixels to fit within 8bit pixel range.
double minVal, maxVal;
cvMinMaxLoc(srcImg, &minVal, &maxVal);
//cout << "FloatImage:(minV=" << minVal << ", maxV=" << maxVal << ")." << endl;
// Deal with NaN and extreme values, since the DFT seems to give some NaN results.
if (cvIsNaN(minVal) || minVal < -1e30)
minVal = -1e30;
if (cvIsNaN(maxVal) || maxVal > 1e30)
maxVal = 1e30;
if (maxVal-minVal == 0.0f)
maxVal = minVal + 0.001; // remove potential divide by zero errors.
// Convert the format
dstImg = cvCreateImage(cvSize(srcImg->width, srcImg->height), 8, 1);
cvConvertScale(srcImg, dstImg, 255.0 / (maxVal - minVal), - minVal * 255.0 / (maxVal-minVal));
}
return dstImg;
}
开发者ID:HVisionSensing,项目名称:DesignProjectOpenCV,代码行数:27,代码来源:FaceRec.cpp
示例6: gst_motiondetect_apply
static gboolean gst_motiondetect_apply (
IplImage * cvReferenceImage, const IplImage * cvCurrentImage,
const IplImage * cvMaskImage, float noiseThreshold)
{
IplConvKernel *kernel = cvCreateStructuringElementEx (3, 3, 1, 1,
CV_SHAPE_ELLIPSE, NULL);
int threshold = (int)((1 - noiseThreshold) * 255);
IplImage *cvAbsDiffImage = cvReferenceImage;
double maxVal = -1.0;
cvAbsDiff( cvReferenceImage, cvCurrentImage, cvAbsDiffImage );
cvThreshold (cvAbsDiffImage, cvAbsDiffImage, threshold, 255,
CV_THRESH_BINARY);
cvErode (cvAbsDiffImage, cvAbsDiffImage, kernel, 1);
cvReleaseStructuringElement(&kernel);
cvMinMaxLoc(cvAbsDiffImage, NULL, &maxVal, NULL, NULL, cvMaskImage );
if (maxVal > 0) {
return TRUE;
} else {
return FALSE;
}
}
开发者ID:ekelly30,项目名称:stb-tester,代码行数:25,代码来源:gstmotiondetect.c
示例7: main
int main( int argc, char** argv ) {
IplImage *src, *templ,*ftmp[6]; //ftmp will hold results
CvPoint minloc[6], maxloc[6];
double minval[6], maxval[6];
int i;
if( argc == 3){
//Read in the source image to be searched:
if((src=cvLoadImage(argv[1], 1))== 0) {
printf("Error on reading src image %s\n",argv[i]);
return(-1);
}
//Read in the template to be used for matching:
if((templ=cvLoadImage(argv[2], 1))== 0) {
printf("Error on reading template %s\n",argv[2]);
return(-1);
}
//ALLOCATE OUTPUT IMAGES:
int iwidth = src->width - templ->width + 1;
int iheight = src->height - templ->height + 1;
for(i=0; i<6; ++i){
ftmp[i] = cvCreateImage(
cvSize(iwidth,iheight),32,1);
}
//DO THE MATCHING OF THE TEMPLATE WITH THE IMAGE:218 | Chapter 7: Histograms and Matching Example 7-5. Template matching (continued)
for(i=0; i<6; ++i){
cvMatchTemplate( src, templ, ftmp[i], i);
//cvNormalize(ftmp[i],ftmp[i],1,0,CV_MINMAX);
cvMinMaxLoc(ftmp[i], &minval[i], &maxval[i], &minloc[i], &maxloc[i], 0);
std::cerr /*<< i << ":" << "minval: " << minval[i] \
<< " maxval: " << maxval[i] */ \
<< " minloc: " << minloc[i].x << ", " << minloc[i].y \
<< " maxloc: " << maxloc[i].x << ", " << maxloc[i].y;
std::cerr << "\n";
}
//DISPLAY
cvNamedWindow( "Template", 0 );
cvShowImage( "Template", templ );
cvNamedWindow( "Image", 0 );
cvShowImage( "Image", src );
cvNamedWindow( "SQDIFF", 0 );
cvShowImage( "SQDIFF", ftmp[0] );
cvNamedWindow( "SQDIFF_NORMED", 0 );
cvShowImage( "SQDIFF_NORMED", ftmp[1] );
cvNamedWindow( "CCORR", 0 );
cvShowImage( "CCORR", ftmp[2] );
cvNamedWindow( "CCORR_NORMED", 0 );
cvShowImage( "CCORR_NORMED", ftmp[3] );
cvNamedWindow( "CCOEFF", 0 );
cvShowImage( "CCOEFF", ftmp[4] );
cvNamedWindow( "CCOEFF_NORMED", 0 );
cvShowImage( "CCOEFF_NORMED", ftmp[5] );
//LET USER VIEW RESULTS:
cvWaitKey(0);
} else {
printf("Call should be:"
"matchTemplate image template \n");
}
}
开发者ID:fragrans,项目名称:castek_git,代码行数:60,代码来源:opencv7_5.cpp
示例8: LogMinMax
void LogMinMax(CvArr* mat,std::ostream& os)
{
//cvNormalize(gray,frame,1,0,CV_MINMAX);
double m, M;
cvMinMaxLoc(mat, &m, &M, NULL, NULL, NULL);
os<<m<<"\t"<<M<<std::endl;
}
开发者ID:AAAyag,项目名称:handinput,代码行数:7,代码来源:harrisbuffer.cpp
示例9: ObtenerMaximo
double* ObtenerMaximo(IplImage* Imagen, STFrame* FrameData, CvRect Roi) {
// obtener matriz de distancias normalizadas al background
if (SHOW_VALIDATION_DATA == 1)
printf(" \n\n Busqueda del máximo umbral...");
IplImage* IDif = 0;
IplImage* peso = 0;
CvSize size = cvSize(Imagen->width, Imagen->height); // get current frame size
if (!IDif || IDif->width != size.width || IDif->height != size.height) {
cvReleaseImage(&IDif);
cvReleaseImage(&peso);
IDif = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_8U, 1); // imagen diferencia abs(I(pi)-u(p(i))
peso = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_32F, 1);//Imagen resultado wi ( pesos)
cvZero(IDif);
cvZero(peso);
}
// |I(p)-u(p)|/0(p)
cvAbsDiff(Imagen, FrameData->BGModel, IDif);
cvDiv(IDif, FrameData->IDesvf, peso);
// Buscar máximo
double* Maximo = 0;
cvMinMaxLoc(peso, Maximo, 0, 0, 0, FrameData->FG);
return Maximo;
}
开发者ID:beetecu,项目名称:trackingdrosophila,代码行数:28,代码来源:validacion2.cpp
示例10: main
int main(int argc, char** argv){
int i;
if(argc != 3){
printf("Error 1: 2 arguments expected, %d given.\n",argc-1);
return 0;
}
IplImage* source = cvLoadImage(argv[1],CV_LOAD_IMAGE_COLOR);
IplImage* tmpl = cvLoadImage(argv[2],CV_LOAD_IMAGE_COLOR);
int ww = source->width - tmpl->width + 1;
int hh = source->height - tmpl->height + 1;
IplImage *result = cvCreateImage(cvSize(ww,hh),IPL_DEPTH_32F, 1);//source->nChannels);
cvMatchTemplate(source, tmpl, result, CV_TM_SQDIFF);
CvPoint minLoc;
CvPoint maxLoc;
double minVal;
double maxVal;
cvMinMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, 0);
cvRectangle(source, minLoc, cvPoint(minLoc.x+tmpl->width,minLoc.y+tmpl->height),cvScalar(0,0,255,1),1,8,0);
cvNamedWindow("match",CV_WINDOW_AUTOSIZE);
cvShowImage("match",source);
cvWaitKey(0);
cvReleaseImage(&source);
cvReleaseImage(&tmpl);
cvReleaseImage(&result);
cvDestroyWindow("match");
}
开发者ID:ikx,项目名称:water-mark,代码行数:29,代码来源:bgr_match.c
示例11: cvBoundingRect
void MatchTemplatePlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg,
int method, CvSize winsize, IplImage* &map){
CvRect orect = cvBoundingRect(oimg->contourArray[i],1);
RestrictRectLoc(orect, cvRect(0,0,img->orig->width,img->orig->height));
cvSetImageROI(oimg->orig, orect);
CvRect rect = cvRect(MAX(0,orect.x-winsize.width), MAX(0,orect.y-winsize.height),orect.width+2*winsize.width, orect.height+2*winsize.height);
rect.width = MIN(rect.width,oimg->orig->width-rect.x);
rect.height = MIN(rect.height,oimg->orig->height-rect.y);
cvSetImageROI(img->orig, rect);
CvSize mapsize = MyPoint(MyPoint(rect)-MyPoint(orect)+wxPoint(1,1)).ToCvSize();
if (map && MyPoint(cvGetSize(map))!=MyPoint(mapsize))
cvReleaseImage(&map);
if( !map )
map = cvCreateImage(mapsize, IPL_DEPTH_32F, 1);
cvMatchTemplate( img->orig, oimg->orig, map, method );
cvResetImageROI(img->orig);
cvResetImageROI(oimg->orig);
CvPoint minloc;
CvPoint maxloc;
double minval, maxval;
cvMinMaxLoc( map, &minval, &maxval, &minloc, &maxloc);
bool minisbest = (method == CV_TM_SQDIFF || method==CV_TM_SQDIFF_NORMED);
rect.x = rect.x + (minisbest ? minloc.x : maxloc.x);
rect.y = rect.y + (minisbest ? minloc.y : maxloc.y);
CvPoint shift = cvPoint(rect.x - orect.x, rect.y - orect.y);
ShiftContour(oimg->contourArray[i],img->contourArray[i],shift);
ShiftFeatPoints(oimg->feats[i], img->feats[i], cvPointTo32f(shift));
}
开发者ID:p1r4nh4,项目名称:CellTrack,代码行数:32,代码来源:MatchTemplatePlugin.cpp
示例12: cvAvg
void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
roi.x = targetRect.x / divisor;
roi.y = targetRect.y / divisor;
maskImg.setROI(roi);
smallCurrentImg.setROI(roi);
CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
pupilAvg = tempPupilAvg.val[0];
// get average of pupil black iteratively(get average twice) to remove the influence of glint
cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY); // 30 is the distance from average.
cvSub(maskImg.getCvImage(), farFromAvg, newMask); // make a mask to get rid of those far points.
CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask); // get new average value.
// get average, min and max value of white area of an eye.
CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
for (int i = 0; i < 6; i++) notDiffImg.erode(); // this might be very useful to reduce the influence of small noise & glint
cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());
maskImg.resetROI();
smallCurrentImg.resetROI();
pupilAvg = newPupilAvg.val[0]; // value is in the first element of CvScalar
whiteAvg = tempWhiteAvg.val[0];
}
开发者ID:BluntBlade,项目名称:eyewriter,代码行数:29,代码来源:thresholdCalculator.cpp
示例13: cvSize
ofPoint matchFinder::getPoint() {
// get the size for our result image
CvSize result_size = cvSize(input.getWidth() - tpl.getWidth() + 1,
input.getHeight() - tpl.getHeight() + 1);
// create the result image for the comparison
IplImage *result_image = cvCreateImage(result_size, IPL_DEPTH_32F, 1);
// make the comparison
cvMatchTemplate(input.getCvImage(), tpl.getCvImage(), result_image, CV_TM_SQDIFF);
// copy to ofCv image.
IplImage *result_char = cvCreateImage(cvSize(result_image->width, result_image->height), 8, 1);
ofcv_result_image.allocate(result_size.width, result_size.height);
ofcv_result_image = result_char;
// get the location of the best match
CvPoint min_loc;
CvPoint max_loc;
cvMinMaxLoc(result_image, &min_val, &max_val, &min_loc, &max_loc, 0);
// clean up
cvReleaseImage(&result_image);
// return value
ofPoint p = ofPoint(min_loc.x, min_loc.y);
return p;
}
开发者ID:breakfastny,项目名称:Verbalizer,代码行数:29,代码来源:matchFinder.cpp
示例14: cvGetMat
DOUBLEVECT HoughAccumulator::FindBest()
{
DOUBLEVECT v;
CvMat temp;
CvMat* locMat = cvGetMat(acc, &temp, NULL, 1);
// int rowsize = 4 * ((acc->dims / 4) +
// ((acc->dims % 4 > 0) ? 1 : 0));
double max_val;
CvPoint max_loc;
cvMinMaxLoc(locMat, NULL, &max_val, NULL, &max_loc, NULL);
int indraw = max_loc.x + max_loc.y * locMat->step;
uchar* pValue = cvPtr2D(locMat, max_loc.y, max_loc.x);
if (*pValue < 10)
return v;
indices[0] = indraw / acc->dim[0].step;
indices[acc->dims - 1] = indraw % acc->dim[acc->dims - 2].step;
for (int i = 1; i < acc->dims - 1; i ++)
indices[i] = (indraw % acc->dim[i - 1].step) / acc->dim[i].step;
for (int j = 0; j < acc->dims; j++)
{
double d = indices[j] / (float)precision + paramRanges[j].min;
v.push_back(d);
}
return v;
}
开发者ID:Boosting,项目名称:stereo-gaze-tracker,代码行数:27,代码来源:IrisFinderHoughEllipse.cpp
示例15: CVImage
void Convert32FTo8U::execute() {
CVImage* cvimg = cvImageIn.getBuffer();
if(!cvimg) { if(debug) std::cerr << getName() << "::ERROR::cvImageIn is NULL!\n"; cvImageOut.setBuffer(NULL); cvImageOut.out(); return; }
if(cvimg->cvMatType != CV_32FC1) { if(debug) std::cerr << getName() << "::ERROR::cvImageIn has incorrect type (must be CV_32FC1)!\n"; cvImageOut.setBuffer(NULL); cvImageOut.out(); return; }
if(!mp_cvimg8u){
//mp_cvimg32f = new CVImage(cvSize(cvimg->width, cvimg->height), CV_32FC1, 0);
mp_cvimg8u = new CVImage(cvSize(cvimg->width, cvimg->height), CV_8UC1, 0);
}
IplImage* img = cvimg->ipl;
double minval, maxval;
cvMinMaxLoc(img, &minval, &maxval, NULL, NULL, NULL);
double scale, shift;
if(maxval == minval) { scale = 255.0; shift = 0.0; }
else {
scale = 255.0 / (maxval - minval);
shift = - minval * scale;
}
cvConvertScale(img, mp_cvimg8u->ipl, scale, shift);
cvImageOut.setBuffer(mp_cvimg8u);
cvImageOut.out();
}
开发者ID:gatsoulis,项目名称:cappocacciaactivevision,代码行数:28,代码来源:convert32FTo8U.cpp
示例16: template_original_match
double template_original_match(IplImage* original_image,IplImage* template_image)
{
//resize the template and original sub image.
//the scale difference 8 is concluded by repeated trials.
//8*8 times matches will be done.The max value is more reasonable than resize the template and original with the same size and only do the matching once.
IplImage* resized_original_image = cvCreateImage(cvSize(MATCHING_WIDTH + 8, MATCHING_HEIGHT + 8),original_image->depth,original_image-> nChannels);
IplImage* resized_template_image = cvCreateImage(cvSize(MATCHING_WIDTH, MATCHING_HEIGHT),template_image->depth,template_image-> nChannels);
IplImage* matching_result = cvCreateImage( cvSize(resized_original_image->width - resized_template_image -> width + 1,resized_original_image->height - resized_template_image->height + 1), IPL_DEPTH_32F, 1 );
double min_val;
double max_val;
CvPoint min_loc;
CvPoint max_loc;
cvResize(original_image,resized_original_image);
cvResize(template_image,resized_template_image);
//cvSmooth(resized_original_image,resized_original_image);
//cvSmooth(resized_template_image,resized_template_image);
//The match with max_val is the best match
cvMatchTemplate(resized_original_image,resized_template_image,matching_result,CV_TM_CCOEFF_NORMED);
cvMinMaxLoc(matching_result, &min_val, &max_val, &min_loc, &max_loc, NULL);
return max_val;
}
开发者ID:hahalaugh,项目名称:ComputerVision,代码行数:29,代码来源:road_signs.cpp
示例17: locate_eye
/**
* Locate the user's eye with template matching
*
* @param IplImage* img the source image
* @param IplImage* tpl the eye template
* @param CvRect* window search within this window,
* will be updated with the recent search window
* @param CvRect* eye output parameter, will contain the current
* location of user's eye
* @return int '1' if found, '0' otherwise
*/
int
locate_eye(IplImage* img, IplImage* tpl, CvRect* window, CvRect* eye)
{
IplImage* tm;
CvRect win;
CvPoint minloc, maxloc, point;
double minval, maxval;
int w, h;
/* get the centroid of eye */
point = cvPoint(
(*eye).x + (*eye).width / 2,
(*eye).y + (*eye).height / 2
);
/* setup search window
replace the predefined WIN_WIDTH and WIN_HEIGHT above
for your convenient */
win = cvRect(
point.x - WIN_WIDTH / 2,
point.y - WIN_HEIGHT / 2,
WIN_WIDTH,
WIN_HEIGHT
);
/* make sure that the search window is still within the frame */
if (win.x < 0)
win.x = 0;
if (win.y < 0)
win.y = 0;
if (win.x + win.width > img->width)
win.x = img->width - win.width;
if (win.y + win.height > img->height)
win.y = img->height - win.height;
/* create new image for template matching result where:
width = W - w + 1, and
height = H - h + 1 */
w = win.width - tpl->width + 1;
h = win.height - tpl->height + 1;
tm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 1);
/* apply the search window */
cvSetImageROI(img, win);
/* template matching */
cvMatchTemplate(img, tpl, tm, CV_TM_SQDIFF_NORMED);
cvMinMaxLoc(tm, &minval, &maxval, &minloc, &maxloc, 0);
/* release things */
cvResetImageROI(img);
cvReleaseImage(&tm);
/* only good matches */
if (minval > TM_THRESHOLD)
return 0;
/* return the search window */
*window = win;
/* return eye location */
*eye = cvRect(
win.x + minloc.x,
win.y + minloc.y,
TPL_WIDTH,
TPL_HEIGHT
);
return 1;
}
开发者ID:favoryoung,项目名称:Head-Position-Corrector,代码行数:70,代码来源:BlinkDLL.cpp
示例18: ofLog
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::contrastStretch() {
if( !bAllocated ){
ofLog(OF_LOG_ERROR, "in contrastStretch, image is not allocated");
return;
}
double minVal, maxVal;
cvMinMaxLoc( cvImage, &minVal, &maxVal, NULL, NULL, 0 );
rangeMap( cvImage, minVal,maxVal, 0,255 );
flagImageChanged();
}
开发者ID:3snail,项目名称:openFrameworks,代码行数:11,代码来源:ofxCvGrayscaleImage.cpp
示例19: ofLogError
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::contrastStretch() {
if( !bAllocated ){
ofLogError("ofxCvGrayscaleImage") << "contrastStretch(): image not allocated";
return;
}
double minVal, maxVal;
cvMinMaxLoc( cvImage, &minVal, &maxVal, NULL, NULL, 0 );
rangeMap( cvImage, minVal,maxVal, 0,255 );
flagImageChanged();
}
开发者ID:omarieclaire,项目名称:closer,代码行数:11,代码来源:ofxCvGrayscaleImage.cpp
示例20: cvCreateMat
void FacePredict::FaceSynthesis(AAM_Shape &shape, CvMat* texture, IplImage* newImage)
{
double thisfacewidth = shape.GetWidth();
shape.Scale(stdwidth / thisfacewidth);
shape.Translate(-shape.MinX(), -shape.MinY());
AAM_PAW paw;
CvMat* points = cvCreateMat (1, __shape.nPoints(), CV_32FC2);
CvMemStorage* storage = cvCreateMemStorage(0);
paw.Train(shape, points, storage, __paw.GetTri(), false); //the actual shape
__AAMRefShape.Translate(-__AAMRefShape.MinX(), -__AAMRefShape.MinY()); //refShape, central point is at (0,0);translate the min to (0,0)
double minV, maxV;
cvMinMaxLoc(texture, &minV, &maxV);
cvConvertScale(texture, texture, 1/(maxV-minV)*255, -minV*255/(maxV-minV));
cvZero(newImage);
int x1, x2, y1, y2, idx1 = 0, idx2 = 0;
int tri_idx, v1, v2, v3;
int minx, miny, maxx, maxy;
minx = shape.MinX(); miny = shape.MinY();
maxx = shape.MaxX(); maxy = shape.MaxY();
for(int y = miny; y < maxy; y++)
{
y1 = y-miny;
for(int x = minx; x < maxx; x++)
{
x1 = x-minx;
idx1 = paw.Rect(y1, x1);
if(idx1 >= 0)
{
tri_idx = paw.PixTri(idx1);
v1 = paw.Tri(tri_idx, 0);
v2 = paw.Tri(tri_idx, 1);
v3 = paw.Tri(tri_idx, 2);
x2 = paw.Alpha(idx1)*__AAMRefShape[v1].x + paw.Belta(idx1)*__AAMRefShape[v2].x +
paw.Gamma(idx1)*__AAMRefShape[v3].x;
y2 = paw.Alpha(idx1)*__AAMRefShape[v1].y + paw.Belta(idx1)*__AAMRefShape[v2].y +
paw.Gamma(idx1)*__AAMRefShape[v3].y;
idx2 = __paw.Rect(y2, x2);
if(idx2 < 0) continue;
CV_IMAGE_ELEM(newImage, byte, y, 3*x) = cvmGet(texture, 0, 3*idx2);
CV_IMAGE_ELEM(newImage, byte, y, 3*x+1) = cvmGet(texture, 0, 3*idx2+1);
CV_IMAGE_ELEM(newImage, byte, y, 3*x+2) = cvmGet(texture, 0, 3*idx2+2);
}
}
}
cvReleaseMat(&points);
cvReleaseMemStorage(&storage);
}
开发者ID:timedcy,项目名称:face,代码行数:54,代码来源:FacePredict.cpp
注:本文中的cvMinMaxLoc函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论