本文整理汇总了C++中cvSetImageROI函数的典型用法代码示例。如果您正苦于以下问题:C++ cvSetImageROI函数的具体用法?C++ cvSetImageROI怎么用?C++ cvSetImageROI使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvSetImageROI函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: char_ext
char* char_ext(IplImage* imagen,basicOCR ocr )
{
//cvNamedWindow("temp");
//cvShowImage("temp",imagen);
//cvWaitKey(0);
//char* plate=NULL;
char* no=(char*)malloc(20*sizeof(char));
//------------------------------------- -----------------------------------------------
//NUMBER ISOLATION
//Create needed images
smooth= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
threshold= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
open_morf= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
//Init variables for countours
contour = 0;
contourLow = 0;
//Create storage needed for contour detection
CvMemStorage* storage = cvCreateMemStorage(0);
//Smooth image
cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
CvScalar avg;
CvScalar avgStd;
cvAvgSdv(smooth, &avg, &avgStd, NULL);
//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
//threshold image
cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY_INV);
//Morfologic filters
cvErode(threshold, open_morf, NULL,1);
cvDilate(open_morf, open_morf, NULL,1);
//Duplicate image for countour
img_contornos=cvCloneImage(open_morf);
//Search countours in preprocesed image
cvFindContours( img_contornos, storage, &contour, sizeof(CvContour),
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0) );
//Optimize contours, reduce points
contourLow=cvApproxPoly(contour, sizeof(CvContour), storage,CV_POLY_APPROX_DP,1,1);
//-----------------------------------------------------------------------------------------------------------
//-----------------------------------------------------------------------------------------------------------
//NUMBER RECOGNITION
CvRect rect;
int carea=0,area=0;
int count=0;
int match;
int w,h;
w=imagen->width;
h=imagen->height;
area=(w)*(h);
// printf("area : %d, %d %d\n",area,w,h);
//printf("\n%d\n",area/26);
char name[6];
//static int width;
for( ; contourLow != 0; contourLow = contourLow->h_next )
{
rect=cvBoundingRect(contourLow,0);
cvSetImageROI(smooth,rect);
IplImage *temp22=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
IplImage *temp23=cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1);
cvCopy(smooth,temp22,NULL);
carea=rect.width*rect.height;
/*if((rect.width>rect.height)||(carea>(area/6))||(carea<(area/25)))
{
cvReleaseImage(&temp22);
continue;
}*/
if((carea<(area/4))&&(carea>(area/25)))
{
static int width = temp22->width;
sprintf(name,"char%d",count);
cvNamedWindow(name);
cvMoveWindow(name,840 - count*3*width,10);
cvThreshold(temp22, temp23, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);
cvShowImage(name,temp23);
cvWaitKey(500);
match=ocr.classify(temp23,0);
if(match<=25)
no[count]=97+match;
else
no[count]=48+match-26;
count++;
}
cvReleaseImage(&temp22);
cvReleaseImage(&temp23);
cvResetImageROI(smooth);
}
cvWaitKey(0);
no[count]='\0';
//.........这里部分代码省略.........
开发者ID:amnosuperman,项目名称:LPRS,代码行数:101,代码来源:split.c
示例2: main
int main(int argc, char* argv[]) {
// Déclarations
CvHaarClassifierCascade *pCascadeFrontal = 0, *pCascadeProfile = 0; // le detecteur de visage
CvMemStorage *pStorage = 0; // buffer mémoire expensible
CvSeq *pFaceRectSeq; // liste des visages detectés
int i;
/*/ Capture Webcam
CvCapture *capture;
capture = cvCreateCameraCapture(CV_CAP_ANY);
pInpImg = cvQueryFrame(capture);*/
// Initialisations
//IplImage* pInpImg = (argc > 1) ? cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR) : 0;
IplImage *pInpImg = cvLoadImage("D:/测试/test6/3.jpg", CV_LOAD_IMAGE_COLOR);
pStorage = cvCreateMemStorage(0);
pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_default.xml",0,0,0);
//pCascadeFrontal = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_frontalface_alt_tree.xml",0,0,0);
pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_profileface.xml",0,0,0);
//pCascadeProfile = (CvHaarClassifierCascade *) cvLoad ("D:/测试/test6/FaceRecognition/resources/haarcascade/haarcascade_lowerbody.xml",0,0,0);
// On valide que tout a bien été initialisé correctement
if (!pInpImg || !pStorage || !pCascadeFrontal || !pCascadeProfile) {
printf("L'initilisation a echoue");
exit(-1);
}
// Affiche une fenêtre pour l'affichage des visages
cvNamedWindow("Fenetre de Haar", CV_WINDOW_NORMAL);
cvShowImage("Fenetre de Haar", pInpImg);
cvWaitKey(50);
// Detection de visage DE FACE dans l'image
pFaceRectSeq = cvHaarDetectObjects
(pInpImg, pCascadeFrontal, pStorage,
1.1, // augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
3, // met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
/*0,*/ CV_HAAR_DO_CANNY_PRUNING, // [0] : explore tout ; [1] : abandonne les régions non candidates à contenir un visage
cvSize(0, 0)); // utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche
// Dessine un rectangle autour de chaque visage detecté
for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
CvPoint pt1 = { r->x, r->y };
CvPoint pt2 = { r->x + r->width, r->y + r->height };
cvRectangle(pInpImg, pt1, pt2, CV_RGB(0,255,0), 3, 4, 0);
// Floutage
cvSetImageROI(pInpImg, *r);
cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
cvResetImageROI(pInpImg);
}
cvShowImage("Fenetre de Haar", pInpImg);
cvWaitKey(1);
// Detection de visage DE PROFIL dans l'image
pFaceRectSeq = cvHaarDetectObjects
(pInpImg, pCascadeProfile, pStorage,
1.4, // augmente l'échelle de recherche de 10% à chaque passe [1.0-1.4] : plus c'est grand, plus c'est rapide
3, // met de côté les groupes plus petit que 3 détections [0-4] : plus c'est petit, plus il y aura de "hits"
/*0,*/ CV_HAAR_DO_CANNY_PRUNING, // abandonne les régions non candidates à contenir un visage
cvSize(0, 0)); // utilise les paramètres XML par défaut (24, 24) pour la plus petite echelle de recherche
// Dessine un rectangle autour de chaque visage detecté
for (i=0 ; i < (pFaceRectSeq ? pFaceRectSeq->total : 0) ; i++) {
CvRect* r = (CvRect*)cvGetSeqElem(pFaceRectSeq, i);
CvPoint pt1 = { r->x, r->y };
CvPoint pt2 = { r->x + r->width, r->y + r->height };
cvRectangle(pInpImg, pt1, pt2, CV_RGB(255,165,0), 3, 4, 0);
// Floutage
cvSetImageROI(pInpImg, *r);
cvSmooth(pInpImg, pInpImg, CV_GAUSSIAN, 5, 3);
cvResetImageROI(pInpImg);
}
// Affiche la détection de visage
cvShowImage("Fenetre de Haar", pInpImg);
cvWaitKey(0);
cvDestroyWindow("Fenetre de Haar");
// Libère les ressources
//cvReleaseCapture(&capture); // Capture Webcam
cvReleaseImage(&pInpImg);
if (pCascadeFrontal) cvReleaseHaarClassifierCascade(&pCascadeFrontal);
if (pCascadeProfile) cvReleaseHaarClassifierCascade(&pCascadeProfile);
if (pStorage) cvReleaseMemStorage(&pStorage);
}
开发者ID:Mrzhy,项目名称:test6.1,代码行数:90,代码来源:test6.1.cpp
示例3: cvCopyMakeBorder
void FTS_ANPR_Seg::extractCharByCCAnalysis( const cv::Mat& oBin,
FTS_ANPR_SegResult& oSegResult )
{
// Padd the input image first
// ------------------------------------------------------------------------
m_oPadded.create( oBin.rows + 2,
oBin.cols + 2,
CV_8UC1 );
cv::copyMakeBorder( oBin, m_oPadded, 1, 1, 1, 1, cv::BORDER_CONSTANT );
IplImage iiBin = oBin;
IplImage iiPadded = m_oPadded;
cvCopyMakeBorder( &iiBin,
&iiPadded,
cvPoint( 1, 1 ),
IPL_BORDER_CONSTANT,
cvScalarAll( 0 ) ); // pad with black border
// Initializes contour scanning process
// ------------------------------------------------------------------------
CvSeq* poContour = 0;
CvContourScanner oContourScanner;
oContourScanner = cvStartFindContours( &iiPadded,
m_poStorage,
sizeof( CvContour ),
CV_RETR_EXTERNAL, //CV_RETR_LIST,
CV_CHAIN_APPROX_SIMPLE,
cvPoint( 0, 0 ) );
// Contour scanning process
// ------------------------------------------------------------------------
while( ( poContour = cvFindNextContour( oContourScanner ) ) )
{
// Finding bounding boxes that meet the ratio tests
// --------------------------------------------------------------------
CvRect oBox = cvBoundingRect( poContour, 0 );
if( !testArea( oBox )
|| !testHeightOverWidth( oBox )
|| !testHeight( oBox.height, iiBin.height ) )
{
continue;
}
std::list< FTS_ANPR_SegChar*>& oChars = oSegResult.m_oChars;
// Make sure not too many candidates
// --------------------------------------------------------------------
if( oChars.size() >= m_nMaxNumCharCandidates )
{
break; // exit the while loop
}
// Store the character candidate to the segmentation structure
// --------------------------------------------------------------------
oChars.push_back( new FTS_ANPR_SegChar );
FTS_ANPR_SegChar& oSegChar = *( oChars.back() ); // fill in the empty object
oSegChar.m_oCharRect = oBox;
// Offset the bounding box from coordinates in padded image, into coordinates of input image.
--oSegChar.m_oCharRect.x;
--oSegChar.m_oCharRect.y;
// oSegChar.m_oCharBin.resize(oBox.width, oBox.height, SN_PIX_FMT_GREY );
oSegChar.m_oCharBin = cv::Mat::zeros( cv::Size( oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height ), CV_8UC1 );
IplImage iiSegCharBin = oSegChar.m_oCharBin;
// cvZero( &iiSegCharBin );
// printf("width = %d, height = %d\n", oSegChar.m_oCharRect.width, oSegChar.m_oCharRect.height );
// Draw the outer contour and fill all holes. No internal holes
// after this.
cvDrawContours( &iiSegCharBin,
poContour,
CV_RGB( 255, 255, 255 ),
CV_RGB( 255, 255, 255 ),
1,
CV_FILLED,
8,
cvPoint( -oBox.x, -oBox.y ) // offset contour to smaller image
);
// Recover all the holes in the original image
cvSetImageROI( &iiBin, oSegChar.m_oCharRect );
cvAnd( &iiBin, &iiSegCharBin, &iiSegCharBin, 0 );
// cv::namedWindow( "CCCCCCCCCCCCCCCCCCCCCCC" );
// cv::imshow( "CCCCCCCCCCCCCCCCCCCCCCC", oSegChar.m_oCharBin );
// cv::waitKey();
}
cvResetImageROI( &iiBin );
cvEndFindContours( &oContourScanner );
//.........这里部分代码省略.........
开发者ID:matthill,项目名称:DemoOpenCV,代码行数:101,代码来源:fts_anpr_seg.cpp
示例4: main
int main(int argc, char** argv)
{
CvMemStorage* mstrg = cvCreateMemStorage();
CvSeq* contours = 0;
CvSeq* contours2 = 0;
const char* filename = 0;
IplImage* rawImage = 0, *yuvImage = 0, *borde = 0; //yuvImage is for codebook method
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
CvCapture* capture = 0;
int c, n, nframes = 0;
int nframesToLearnBG = 300;
model = cvCreateBGCodeBookModel();
//Set color thresholds to default values
model->modMin[0] = 3;
model->modMin[1] = model->modMin[2] = 3;
model->modMax[0] = 10;
model->modMax[1] = model->modMax[2] = 10;
model->cbBounds[0] = model->cbBounds[1] = model->cbBounds[2] = 10;
bool pause = false;
bool singlestep = false;
printf("Capturando de la camara...\n");
capture = cvCaptureFromCAM( 0 );
if( !capture )
{
printf( "No se pudo inicializar la captura de video\n\n" );
return -1;
}
while (true)
{
rawImage = cvQueryFrame( capture );
++nframes;
if(!rawImage)
break;
//First time:
if( nframes == 1 && rawImage )
{
borde = cvLoadImage("Borde.png",0);
// CODEBOOK METHOD ALLOCATION
yuvImage = cvCloneImage(rawImage);
int w = yuvImage->width;
cvSetImageROI(yuvImage, cvRect(w-250,0,250,250));
IplImage *tmp = cvCreateImage(cvGetSize(yuvImage),yuvImage->depth,yuvImage->nChannels);
cvCopy(yuvImage, tmp, NULL);
cvResetImageROI(yuvImage);
yuvImage = cvCloneImage(tmp);
ImaskCodeBook = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
ImaskCodeBookCC = cvCreateImage( cvGetSize(yuvImage), IPL_DEPTH_8U, 1 );
cvSet(ImaskCodeBook,cvScalar(255));
cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);
printf (">>Aprendiendo fondo\n");
}
// If we've got an rawImage and are good to go:
if( rawImage )
{
cvFlip(rawImage, NULL, 1);
int w = rawImage->width;
cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);
//Dibujar contorno
cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
//
if(nframes - 1 < nframesToLearnBG)
{
char buffer [33];
_itoa (nframesToLearnBG - nframes,buffer,10);
CvFont font2;
cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
}
cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);
cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
//YUV para el metodo del codebook
//Construccion del modelo del fondo
if(nframes-1 < nframesToLearnBG )
//.........这里部分代码省略.........
开发者ID:rovim,项目名称:LSMRecognition,代码行数:101,代码来源:convexhull2.cpp
示例5: mouseHandler
/*mouse event handler*/
void mouseHandler( int event, int x, int y, int flags, void *param)
{
//Single object tracking code source: http://nashruddin.com/eyetracking-track-user-eye.htm
/*save left eye template */
if( event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 0) {
faceTrack.object_x0_left_eye = x - ( TPL_WIDTH / 2 );
faceTrack.object_y0_left_eye = y - ( TPL_HEIGHT / 2 );
cvSetImageROI( faceTrack.frame,
cvRect( faceTrack.object_x0_left_eye,
faceTrack.object_y0_left_eye,
TPL_WIDTH,
TPL_HEIGHT ) );
cvCopy( faceTrack.frame, faceTrack.tmplLeftEye, NULL );
cvResetImageROI( faceTrack.frame );
cout<<"Starting tracking of left eye..."<<endl;
faceTrack.left_eye_tracking = 1;
faceTrack.count++;
}
/*track right eye*/
else if( event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 1) {
faceTrack.object_x0_right_eye = x - ( TPL_WIDTH / 2 );
faceTrack.object_y0_right_eye = y - ( TPL_HEIGHT / 2 );
cvSetImageROI( faceTrack.frame,
cvRect( faceTrack.object_x0_right_eye,
faceTrack.object_y0_right_eye,
TPL_WIDTH,
TPL_HEIGHT ) );
cvCopy(faceTrack.frame,faceTrack.tmplRightEye, NULL );
cvResetImageROI( faceTrack.frame );
cout<<"Starting tracking of right eye..."<<endl;
faceTrack.right_eye_tracking = 1;
faceTrack.count++;
}
/*track left mouth*/
else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 2)
{
faceTrack.object_x0_left_mouth = x - ( TPL_WIDTH / 2 );
faceTrack.object_y0_left_mouth = y - ( TPL_HEIGHT / 2 );
cvSetImageROI( faceTrack.frame,
cvRect( faceTrack.object_x0_left_mouth,
faceTrack.object_y0_left_mouth,
TPL_WIDTH,
TPL_HEIGHT ) );
cvCopy( faceTrack.frame, faceTrack.tmplLeftMouth, NULL );
cvResetImageROI( faceTrack.frame );
cout<<"Starting tracking of left mouth..."<<endl;
faceTrack.left_mouth_tracking = 1;
faceTrack.count++;
}
/*track right mouth*/
else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 3)
{
faceTrack.object_x0_right_mouth = x - ( TPL_WIDTH / 2 );
faceTrack.object_y0_right_mouth = y - ( TPL_HEIGHT / 2 );
cvSetImageROI( faceTrack.frame,
cvRect( faceTrack.object_x0_right_mouth,
faceTrack.object_y0_right_mouth,
TPL_WIDTH,
TPL_HEIGHT ) );
cvCopy( faceTrack.frame, faceTrack.tmplRightMouth, NULL );
cvResetImageROI( faceTrack.frame );
cout<<"Starting tracking of right mouth..."<<endl;
faceTrack.right_mouth_tracking = 1;
faceTrack.count++;
}
/*track nose*/
else if (event == CV_EVENT_LBUTTONDOWN && faceTrack.count == 4)
{
faceTrack.object_x0_nose = x - ( TPL_WIDTH / 2 );
faceTrack.object_y0_nose = y - ( TPL_HEIGHT / 2 );
cvSetImageROI( faceTrack.frame,
cvRect( faceTrack.object_x0_nose,
faceTrack.object_y0_nose,
TPL_WIDTH,
TPL_HEIGHT ) );
cvCopy( faceTrack.frame, faceTrack.tmplNose, NULL );
cvResetImageROI( faceTrack.frame );
cout<<"Starting tracking of nose..."<<endl;
faceTrack.nose_tracking = 1;
//.........这里部分代码省略.........
开发者ID:KatieMcNabb,项目名称:Emotion_Detection_With_Graphic_Visualization,代码行数:101,代码来源:Main.cpp
示例6: main
//.........这里部分代码省略.........
else
i=0;
// Show the display image
cvShowImage( DISPLAY_WINDOW, pframe1 );
cvMoveWindow(DISPLAY_WINDOW,0,0);
c=cvWaitKey(10);
if(c==27)
{
exitProgram(0);
}
if(i>=3)
{ // exit loop when a hand is detected
if(pHandRect) {
i=0;
prevx=pHandRect->x;
initx=pHandRect->x;
prevy=pHandRect->y+pHandRect->height;
flag=3;
break;
}
}
}
// initialize tracking
KalmanFilter kfilter;
startTracking(pframe, *pHandRect,kfilter);
// Track the detected hand using CamShift
while( 1 )
{
CvRect handBox;
// get the next video frame
pframe=cvQueryFrame(capture);
pframe1=cvCloneImage(pframe);
handBox = combi_track(pframe,kfilter);
int old_ht;
int a;
IplImage* temp;
if(!((handBox.x<0)||(handBox.y<0)||((handBox.x+handBox.width)>pframe->width)||((handBox.y+handBox.height)>pframe->height)))
{
if(handBox.height>(1.3*handBox.width))
{
old_ht=handBox.height;
handBox.height=2.4*handBox.width;
handBox.y-=handBox.height-old_ht;
}
cvSetImageROI(pframe,handBox);
temp=cvCreateImage(cvGetSize(pframe),8,3);
cvCopy(pframe,temp,NULL);
a=recognize(temp);
cvReleaseImage(&temp);
if(handBox.height>(2.3*handBox.width))
{
if(a==3)
a=5;
}
diffx=handBox.x+(handBox.width/2)-initx;
diffy=handBox.y+handBox.height-(handBox.width/2)-prevy;
prevx=handBox.x+(handBox.width/2);
prevy=handBox.y+handBox.height-(handBox.width/2);
cvResetImageROI(pframe);
cvRectangle(pframe1,cvPoint(handBox.x,handBox.y),cvPoint(handBox.x+handBox.width,handBox.y+handBox.height),CV_RGB(0,0,255),3,8,0);
if(diffx<(-60))
{ click(display,1,0);
printf("right click\n");
goto x;
}
else if(diffx>(60))
{
fake(display, 0);
printf("left click\n");
goto x;
}
else
{}
}
else
goto x;
cvShowImage( DISPLAY_WINDOW, pframe1 );
ch=cvWaitKey(10);
if( ch==27 ) {
exitProgram(0);
break;
}
if(ch=='s'){
cvSetImageROI(pframe,handBox);
cvResize(pframe,temp);
cvSaveImage("image6.jpg",temp);
cvResetImageROI(pframe);
}
}
return 0;
}
开发者ID:amnosuperman,项目名称:Presenting-PPT-using-gestures,代码行数:101,代码来源:hci3.c
示例7: main
void main()
{
char message[100];
cvNamedWindow("template");
cvNamedWindow("sampling");
cvNamedWindow("result");
// initialize
int width = WIDTH;
int height = HEIGHT;
int startX = (width-TEMPLATE_WIDTH)/2;
int startY = (height-TEMPLATE_HEIGHT)/2;
IplImage* inputImage = NULL;
IplImage* grayImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
IplImage* samplingImage = NULL;
// initial template & homography
CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
homography._13 = startX;
homography._23 = startY;
windage::Matrix3 e = homography;
// Template based Tracking using Inverse Compositional
#if USE_IC
windage::InverseCompositional* tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
windage::HomographyESM* tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
tracker->SetInitialHomography(e);
// homography update stack
std::vector<windage::Matrix3> homographyList;
// camera
CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);
bool isTrained = false;
bool processing =true;
while(processing)
{
inputImage = cvRetrieveFrame(capture);
cvResize(inputImage, resultImage);
cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
if(GAUSSIAN_BLUR > 0)
cvSmooth(grayImage, grayImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);
// processing
int64 startTime = cvGetTickCount();
float error = 0.0;
float delta = 1.0;
int iter = 0;
homographyList.clear();
for(iter=0; iter<MAX_ITERATION; iter++)
{
error = tracker->UpdateHomography(grayImage, &delta);
homography = tracker->GetHomography();
homographyList.push_back(homography);
// if(delta < HOMOGRAPHY_DELTA)
// break;
}
int64 endTime = cvGetTickCount();
samplingImage = tracker->GetSamplingImage();
// draw result
int count = homographyList.size();
for(int i=0; i<count; i++)
DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(double)count) * 255.0, (i/(double)count) * 255.0, 0), 1);
double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
sprintf_s(message, "processing time : %.2lf ms (%02d iter), error : %.2lf", processingTime, iter, error);
std::cout << message << std::endl;
#if USE_IC
windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Inverse Compositional", 0.6);
#endif
#if USE_ESM
windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Efficient Second-order Minimization", 0.6);
#endif
windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 35), message, 0.6);
// draw image
cvShowImage("sampling", samplingImage);
cvShowImage("result", resultImage);
char ch = cvWaitKey(1);
switch(ch)
{
case ' ':
cvSetImageROI(grayImage, rect);
cvCopyImage(grayImage, templateImage);
cvShowImage("template", templateImage);
cvResetImageROI(grayImage);
//.........这里部分代码省略.........
开发者ID:Barbakas,项目名称:windage,代码行数:101,代码来源:main.cpp
示例8: aGestureRecognition
//.........这里部分代码省略.........
/* calculate homographic transformation matrix */
cx = (float)(width / 2.);
cy = (float)(height / 2.);
fx = fy = (float)571.2048;
/* define intrinsic camera parameters */
in[0][1] = in[1][0] = in[2][0] = in[2][1] = 0;
in[0][0] = fx; in[0][2] = cx;
in[1][1] = fy; in[1][2] = cy;
in[2][2] = 1;
OPENCV_CALL( cvCalcImageHomography(line, ¢er, in, h) );
rez_h = 0;
for(i=0;i<3;i++)
{
fscanf( fil_ver, "%f %f %f\n", &hv[0], &hv[1], &hv[2]);
for(j=0;j<3;j++)
{
rez_h+=(hv[j] - h[i][j])*(hv[j] - h[i][j]);
}
}
rez+=sqrt(rez_h)/9.;
/* image unwarping */
size.width = image->width;
size.height = image->height;
OPENCV_CALL( imagew = cvCreateImage(size, IPL_DEPTH_8U,3) );
OPENCV_CALL( image_maskw = cvCreateImage(size, IPL_DEPTH_8U,1) );
iplSet(image_maskw, 0);
cvSetImageROI(image, hand_roi);
cvSetImageROI(image_mask, hand_roi);
/* convert homographic transformation matrix from float to double */
for(i=0;i<3;i++)
for(j=0;j<3;j++)
coeffs[i][j] = (double)h[i][j];
/* get bounding rectangle for image ROI */
iplGetPerspectiveBound(image, coeffs, rect);
width = (int)(rect[1][0] - rect[0][0]);
height = (int)(rect[1][1] - rect[0][1]);
hand_roi_trans.x = (int)rect[0][0];hand_roi_trans.y = (int)rect[0][1];
hand_roi_trans.width = width; hand_roi_trans.height = height;
cvMaxRect(&hand_roi, &hand_roi_trans, &hand_roi);
iplSetROI((IplROI*)image->roi, 0, hand_roi.x, hand_roi.y,
hand_roi.width,hand_roi.height);
iplSetROI((IplROI*)image_mask->roi, 0, hand_roi.x, hand_roi.y,
hand_roi.width,hand_roi.height);
warpFlag = IPL_WARP_R_TO_Q;
/* interpolate = IPL_INTER_CUBIC; */
/* interpolate = IPL_INTER_NN; */
interpolate = IPL_INTER_LINEAR;
iplWarpPerspective(image, imagew, coeffs, warpFlag, interpolate);
iplWarpPerspective(image_mask, image_maskw, coeffs, warpFlag, IPL_INTER_NN);
/* set new image and mask ROI after transformation */
iplSetROI((IplROI*)imagew->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);
iplSetROI((IplROI*)image_maskw->roi,0, (int)rect[0][0], (int)rect[0][1],(int)width,(int)height);
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:66,代码来源:agesturerecognition.cpp
示例9: gst_face_detect_transform_ip
/*
* Performs the face detection
*/
static GstFlowReturn
gst_face_detect_transform_ip (GstOpencvVideoFilter * base, GstBuffer * buf,
IplImage * img)
{
GstFaceDetect *filter = GST_FACE_DETECT (base);
if (filter->cvFaceDetect) {
GstMessage *msg = NULL;
GstStructure *s;
GValue facelist = { 0 };
GValue facedata = { 0 };
CvSeq *faces;
CvSeq *mouth = NULL, *nose = NULL, *eyes = NULL;
gint i;
gboolean do_display = FALSE;
gboolean post_msg = FALSE;
if (filter->display) {
if (gst_buffer_is_writable (buf)) {
do_display = TRUE;
} else {
GST_LOG_OBJECT (filter, "Buffer is not writable, not drawing faces.");
}
}
cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
cvClearMemStorage (filter->cvStorage);
faces = gst_face_detect_run_detector (filter, filter->cvFaceDetect,
filter->min_size_width, filter->min_size_height);
switch (filter->updates) {
case GST_FACEDETECT_UPDATES_EVERY_FRAME:
post_msg = TRUE;
break;
case GST_FACEDETECT_UPDATES_ON_CHANGE:
if (faces && faces->total > 0) {
if (!filter->face_detected)
post_msg = TRUE;
} else {
if (filter->face_detected) {
post_msg = TRUE;
}
}
break;
case GST_FACEDETECT_UPDATES_ON_FACE:
if (faces && faces->total > 0) {
post_msg = TRUE;
} else {
post_msg = FALSE;
}
break;
case GST_FACEDETECT_UPDATES_NONE:
post_msg = FALSE;
break;
default:
post_msg = TRUE;
break;
}
filter->face_detected = faces ? faces->total > 0 : FALSE;
if (post_msg) {
msg = gst_face_detect_message_new (filter, buf);
g_value_init (&facelist, GST_TYPE_LIST);
}
for (i = 0; i < (faces ? faces->total : 0); i++) {
CvRect *r = (CvRect *) cvGetSeqElem (faces, i);
guint mw = filter->min_size_width / 8;
guint mh = filter->min_size_height / 8;
guint rnx = 0, rny = 0, rnw, rnh;
guint rmx = 0, rmy = 0, rmw, rmh;
guint rex = 0, rey = 0, rew, reh;
gboolean have_nose, have_mouth, have_eyes;
/* detect face features */
if (filter->cvNoseDetect) {
rnx = r->x + r->width / 4;
rny = r->y + r->height / 4;
rnw = r->width / 2;
rnh = r->height / 2;
cvSetImageROI (filter->cvGray, cvRect (rnx, rny, rnw, rnh));
nose =
gst_face_detect_run_detector (filter, filter->cvNoseDetect, mw, mh);
have_nose = (nose && nose->total);
cvResetImageROI (filter->cvGray);
} else {
have_nose = FALSE;
}
if (filter->cvMouthDetect) {
rmx = r->x;
rmy = r->y + r->height / 2;
rmw = r->width;
rmh = r->height / 2;
//.........这里部分代码省略.........
开发者ID:kittee,项目名称:gst-plugins-bad,代码行数:101,代码来源:gstfacedetect.c
示例10: compute_color_diff_map
/* computes the color difference map for the region of interest */
void compute_color_diff_map(Tracker *tracker, struct StaticData *data)
{
CvSize sz;
/* for all markers (LEDs) */
for(int nm=0; nm<data->NUM_OF_MARKERS; ++nm)
{
/* if marker was found in the prior frame */
if(tracker->marker[nm]->pos_is_set)
{
if(tracker->marker[nm]->vel_is_set)
{
/* if marker velocity is known predict new position */
tracker->marker[nm]->pos_predicted.x = tracker->marker[nm]->pos_measured.x + tracker->marker[nm]->vel.x;
tracker->marker[nm]->pos_predicted.y = tracker->marker[nm]->pos_measured.y + tracker->marker[nm]->vel.y;
}
else
{
/* otherwise take last known position for the center of the ROI as best guess */
tracker->marker[nm]->pos_predicted.x = tracker->marker[nm]->pos_measured.x;
tracker->marker[nm]->pos_predicted.y = tracker->marker[nm]->pos_measured.y;
}
/**
* dynamically adapt roi size according to marker acceleration
* linear upper boundary with slope 1.5 and intersect 4.
* roi size is actually 2 times the computed value, because LED can be located at
* x_predicted + x_prediction_error or at
* x_predicted - x_prediction_error.
* The same holds true for the y direction.
**/
uint8_t adapt_roi_width;
uint8_t adapt_roi_height;
if(tracker->marker[nm]->acc_is_set)
{
/**
* if marker acc is known adapt roi size dynamically according to linear
* upper bound of prediction error made (measured)
**/
adapt_roi_width = 2 * round(fabs(1.5 *tracker->marker[nm]->acc.x) + 4.0);
adapt_roi_height = 2 * round(fabs(1.5 * tracker->marker[nm]->acc.y) + 4.0);
}
else
{
/* otherwise take default roi size */
adapt_roi_width = ROI_WIDTH;
adapt_roi_height = ROI_HEIGHT;
}
/* check wether roi is within image boundaries and update roi position */
if( (int)tracker->marker[nm]->pos_predicted.x-adapt_roi_width/2>=0 &&
(int)tracker->marker[nm]->pos_predicted.x+adapt_roi_width/2<FRAME_WIDTH &&
(int)tracker->marker[nm]->pos_predicted.y-adapt_roi_height/2>=0 &&
(int)tracker->marker[nm]->pos_predicted.y+adapt_roi_height/2<FRAME_HEIGHT
)
{
tracker->marker[nm]->roi = cvRect( (int)tracker->marker[nm]->pos_predicted.x-adapt_roi_width/2,
(int)tracker->marker[nm]->pos_predicted.y-adapt_roi_height/2,
adapt_roi_width,
adapt_roi_height
);
/* set the region of interest to the computed size and origin */
cvSetImageROI(tracker->frame,tracker->marker[nm]->roi);
tracker->marker[nm]->roi_set = 1;
}
/* otherwise extend search on whole image */
else
{
tracker->marker[nm]->roi = cvRect(0,0,FRAME_WIDTH,FRAME_HEIGHT);
tracker->marker[nm]->roi_set = 0;
}
}
/* otherwise search on whole image */
else
{
tracker->marker[nm]->roi = cvRect(0,0,FRAME_WIDTH,FRAME_HEIGHT);
tracker->marker[nm]->roi_set = 0;
}
sz = cvSize(tracker->marker[nm]->roi.width,tracker->marker[nm]->roi.height);
/**
* Define intermediate images.
* ROI is converted to floating point HSV color space.
**/
IplImage *hsv = cvCreateImage(sz,IPL_DEPTH_8U,3);
IplImage *hsv_f = cvCreateImage(sz,IPL_DEPTH_32F,3);
/* create image header to hold distance map according to the computed ROI size */
tracker->marker[nm]->result = cvCreateImage(sz,IPL_DEPTH_32F,1);
/* reset distance map to zero */
cvZero(tracker->marker[nm]->result);
float h_val,s_val,v_val;
float res;
//.........这里部分代码省略.........
开发者ID:caxenie,项目名称:ip-camera-overhead-tracker,代码行数:101,代码来源:ColorDiffTracker.cpp
示例11: remove_border_ul
/*消除上下边界*/
void remove_border_ul(IplImage * img_plate)
{
int i = 0, j = 0;
/*这两个变量分别为上下边界的高度*/
int up_bound = -1, low_bound = -1;
int white_to_black = 0;
int black_to_white = 0;
/*从i从0 到高度一半进行遍历,进行投影,找到上边界*/
//cvNamedWindow("img_plate", 1);
//cvShowImage("img_plate", img_plate);
//cvWaitKey(0);
for (i = 0; i < (img_plate->height) / 2; i = i + 3) {
unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
white_to_black = 0;
black_to_white = 0;
/*记录下每一行的black_to_white和w_to_b的个数*/
for (j = 0; j < img_plate->width; j = j + 3) {
if (prow[j] == 0 && prow[j + 3] == 255) {
black_to_white++;
} else if (prow[j] == 255 && prow[j + 3] == 0) {
white_to_black++;
}
}
/*设成6的话对图片的清晰度有很高的要求*/
if (black_to_white >= 6 && white_to_black >= 6 && up_bound < 0) {
up_bound = i;
} else if (black_to_white < 6 && white_to_black < 6 && up_bound > 0) {
// printf("black_to_white : %d whilte_to_black: %d , up_bound %d\n",black_to_white, white_to_black, up_bound);
up_bound = -1;
}
}
/*i从最底端到高度的一半进行遍历*/
for (i = img_plate->height - 1; i > (img_plate->height) / 2; i = i - 3) {
unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
white_to_black = 0;
black_to_white = 0;
/*记录下每一行的black_to_white和w_to_b的个数*/
for (j = 0; j < img_plate->width; j = j + 3) {
if (prow[j] == 0 && prow[j + 3] == 255) {
black_to_white++;
} else if (prow[j] == 255 && prow[j + 3] == 0) {
white_to_black++;
}
}
if (black_to_white >= 6 && white_to_black >= 6 && low_bound < 0) {
low_bound = i;
} else if (black_to_white < 6 && white_to_black < 6 && low_bound > 0) {
low_bound = -1;
}
//printf("%d\n", low_bound);
}
#if 0
cvNamedWindow("img", 1);
printf("up_bound is %d, low_bound is %d\n", up_bound, low_bound);
/*画直线操作*/
/* void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 );*/
cvLine(img_plate, cvPoint(0, up_bound), cvPoint(img_plate->width - 3, up_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
cvLine(img_plate, cvPoint(0, low_bound), cvPoint(img_plate->width - 3, low_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
cvShowImage("img", img_plate);
cvWaitKey(0);
#endif
/*这里容易出错!*/
//printf("%d %d %d %d", 0, up_bound, img_plate->width - 2, low_bound - up_bound - 2);
// printf("low_bound:%d up_bound:%d\n", low_bound, up_bound);
assert(low_bound >= 0 && up_bound >= 0);
cvSetImageROI(img_plate, cvRect(0, up_bound, img_plate->width - 2, low_bound - up_bound - 2)); /*-2保证不要越界*/
IplImage * tmp_img = cvCreateImage(cvSize(img_plate->width - 2, low_bound - up_bound - 2), img_plate->depth, img_plate->nChannels);
cvCopy(img_plate, tmp_img);
cvSaveImage("image/img_after_border_removed.bmp", tmp_img);
cvResetImageROI(img_plate);
// printf("setROI in remove bound success\n");
}
开发者ID:XianB,项目名称:youyanQT,代码行数:84,代码来源:get_character.cpp
示例12: main
int main(int argc, char** argv)
{
printf("\nprogram started\n");
pthread_t thread_s;
int width, height, key;
int resWidth, resHeight;
if (argc != 6) {
quit("Usage: stream_server <server_port> <width> <height> <screen width> <screen height>", 0);
}
/* get the parameters */
// server_ip = argv[1];
server_port = atoi(argv[1]);
width = atoi(argv[2]);
height = atoi(argv[3]);
resWidth = atoi(argv[4]);
resHeight = atoi(argv[5]);
img[0] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
img[1] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
img[2] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
img[3] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
DispImage = cvCreateImage(cvSize(resWidth,resHeight), IPL_DEPTH_8U, 3 );
if (pthread_create(&thread_s, NULL, waitServer, NULL ))
{
quit("pthread_create failed.", 1);
}
printf("\nThread started\n");
while(key != 'q')
{
pthread_mutex_lock(&mutex);
if (is_data_ready) {
if(temp==1){
cvSetImageROI(DispImage, cvRect(0, 0,resWidth, resHeight));
//Resize the input image and copy the it to the Single Big Image
cvResize(img[0], DispImage,CV_INTER_LINEAR);
// Reset the ROI in order to display the next image
cvResetImageROI(DispImage);
cvShowImage("stream_server_cam",DispImage);
//cvShowImage("stream_server_cam",img[0]);
}
else if(temp ==2){
cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight));
// Resize the input image and copy the it to the Single Big Image
cvResize(img[0], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight));
cvResize(img[1], DispImage,CV_INTER_LINEAR);
cvResetImageROI(DispImage);
cvShowImage("stream_server_cam",DispImage);
}
else if(temp==3){
cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight/2));
cvResize(img[0], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight/2));
cvResize(img[1], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(0,resHeight/2,resWidth/2, resHeight));
cvResize(img[2], DispImage,CV_INTER_LINEAR);
cvResetImageROI(DispImage);
cvShowImage("stream_server_cam",DispImage);
}
else if(temp == 4){
cvSetImageROI(DispImage, cvRect(0, 0,resWidth/2, resHeight/2));
// Resize the input image and copy the it to the Single Big Image
cvResize(img[0], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(resWidth/2, 0,resWidth/2, resHeight/2));
// Resize the input image and copy the it to the Single Big Image
cvResize(img[1], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(0, resHeight/2,resWidth/2, resHeight));
// Resize the input image and copy the it to the Single Big Image
cvResize(img[2], DispImage,CV_INTER_LINEAR);
cvSetImageROI(DispImage, cvRect(resWidth/2,resHeight/2,resWidth, resHeight));
// Resize the input image and copy the it to the Single Big Image
cvResize(img[3], DispImage,CV_INTER_LINEAR);
cvResetImageROI(DispImage);
cvShowImage("stream_server_cam",DispImage);
}
else{}
//usleep(100);
is_data_ready = 0;
}
pthread_mutex_unlock(&mutex);
key = cvWaitKey(100);
}
if (pthread_cancel(thread_s)) {
quit("pthread_cancel failed.", 1);
}
/* free memory */
quit(NULL, 0);
}
开发者ID:natemontage,项目名称:Webcam-Server,代码行数:99,代码来源:stream_server_rev_2.0b_working.c
示例13: detect
CvSeq* detect()
{
if (!cascade) return 0;
double scale = this->scale == 0? 1.0 : this->scale;
IplImage* gray = cvCreateImage(cvSize(width, height ), 8, 1);
IplImage* small = cvCreateImage(cvSize(cvRound(width * scale), cvRound(height * scale)), 8, 1);
int min = cvRound(smallest * 1000);
CvSeq* faces = 0;
// use a region of interest to improve performance
// This idea comes from the More than Technical blog:
// http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
if ( roi.width > 0 && roi.height > 0)
{
cvSetImageROI(small, roi);
CvRect scaled_roi = cvRect(roi.x / scale, roi.y / scale,
roi.width / scale, roi.height / scale);
cvSetImageROI(image, scaled_roi);
cvSetImageROI(gray, scaled_roi);
}
// use an equalized grayscale to improve detection
cvCvtColor(image, gray, CV_BGR2GRAY);
// use a smaller image to improve performance
cvResize(gray, small, CV_INTER_LINEAR);
cvEqualizeHist(small, small);
// detect with OpenCV
cvClearMemStorage(storage);
faces = cvHaarDetectObjects(small, cascade, storage,
search_scale * 10.0,
cvRound(neighbors * 100),
CV_HAAR_DO_CANNY_PRUNING,
cvSize(min, min));
#ifdef USE_ROI
if (!faces || faces->total == 0)
{
// clear the region of interest
roi.width = roi.height = 0;
}
else if (faces && faces->total > 0)
{
// determine the region of interest from the first detected object
// XXX: based on the first object only?
CvRect* r = (CvRect*) cvGetSeqElem(faces, 0);
if (roi.width > 0 && roi.height > 0)
{
r->x += roi.x;
r->y += roi.y;
}
int startX = MAX(r->x - PAD, 0);
int startY = MAX(r->y - PAD, 0);
int w = small->width - startX - r->width - PAD * 2;
int h = small->height - startY - r->height - PAD * 2;
int sw = r->x - PAD, sh = r->y - PAD;
// store the region of interest
roi.x = startX;
roi.y = startY,
roi.width = r->width + PAD * 2 + ((w < 0) ? w : 0) + ((sw < 0) ? sw : 0);
roi.height = r->height + PAD * 2 + ((h < 0) ? h : 0) + ((sh < 0) ? sh : 0);
}
#endif
cvReleaseImage(&gray);
cvReleaseImage(&small);
cvResetImageROI(image);
return faces;
}
开发者ID:ttill,项目名称:frei0r,代码行数:70,代码来源:facedetect.cpp
|
请发表评论