本文整理汇总了C++中cvCreateMemStorage函数的典型用法代码示例。如果您正苦于以下问题:C++ cvCreateMemStorage函数的具体用法?C++ cvCreateMemStorage怎么用?C++ cvCreateMemStorage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvCreateMemStorage函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: main
//.........这里部分代码省略.........
}
else
{
cvCopy(frame_cur,undistor_image);
}
cvShowImage("cameraUndistor",undistor_image);
char c=cvWaitKey(33);
if(c==27)
break;
if(c=='u'||c=='U')
undist_flag=!undist_flag;
cvReleaseImage(&undistor_image);
}
cvReleaseImage(&undistor_image);
cvReleaseCapture(&captureCam2);
cvDestroyWindow("cameraUndistor");
}//ending undistortion_example
if(show_surf_example)
{
//using SURF
initModule_nonfree();// added at 16.04.2013
CvCapture* capture_cam_3=cvCreateCameraCapture(0);
cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO);
cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1);
IplImage* buf_frame_3=0;
IplImage* gray_copy=0;
IplImage* buf_frame_3_copy=0;
CvSeq *kp1,*descr1;
CvMemStorage *storage=cvCreateMemStorage(0);
CvSURFPoint *surf_pt;
bool surf_flag=false;
while(true)
{
buf_frame_3=cvQueryFrame(capture_cam_3);
if(surf_flag)
{
surf_flag=false;
gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1);
buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3);
cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY);
//cvSetImageROI(gray_copy,cvRect(280,200,40,40));
cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0));
cvReleaseImage(&gray_copy);
re_draw=true;
while(true)
{
if(re_draw)
{
cvCopy(buf_frame_3,buf_frame_3_copy);
double pi=acos(-1.0);
for(int i=0;i<kp1->total;i++)
{
surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i);
if(surf_pt->hessian<min_hessian)
continue;
int pt_x,pt_y;
开发者ID:uprun,项目名称:GraduateWork,代码行数:67,代码来源:main.cpp
示例2: main
void main(int argc, char** argv)
{
cvNamedWindow("src",0 );
cvNamedWindow("warp image",0 );
cvNamedWindow("warp image (grey)",0 );
cvNamedWindow("Smoothed warped gray",0 );
cvNamedWindow("threshold image",0 );
cvNamedWindow("canny",0 );
cvNamedWindow("final",1 );
CvPoint2D32f srcQuad[4], dstQuad[4];
CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
float Z=1;
dstQuad[0].x = 216; //src Top left
dstQuad[0].y = 15;
dstQuad[1].x = 392; //src Top right
dstQuad[1].y = 6;
dstQuad[2].x = 12; //src Bottom left
dstQuad[2].y = 187;
dstQuad[3].x = 620; //src Bot right
dstQuad[3].y = 159;
srcQuad[0].x = 100; //dst Top left
srcQuad[0].y = 120;
srcQuad[1].x = 540; //dst Top right
srcQuad[1].y = 120;
srcQuad[2].x = 100; //dst Bottom left
srcQuad[2].y = 360;
srcQuad[3].x = 540; //dst Bot right
srcQuad[3].y = 360;
cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
//CvCapture *capture = cvCaptureFromCAM(0);
/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
IplImage* image = cvRetrieveFrame(capture);
CvSize imgSize;
imgSize.width = image->width;
imgSize.height = image->height;
CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
int ik=0;
while(1)
{
//IplImage* img = cvQueryFrame(capture);
IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
cvShowImage( "src", img );
//cvWriteFrame(writer, img);
//cvSaveImage(nameGen(ik++), img, 0);
IplImage* warp_img = cvCloneImage(img);
CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
cvShowImage( "warp image", warp_img );
IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
cvShowImage( "warp image (grey)", grayimg );
cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
cvShowImage( "Smoothed warped gray", grayimg );
IplImage* thresholded_img=simplethreshold(grayimg, 220);
cvShowImage("threshold image",thresholded_img);
//grayimg = doCanny( thresholded_img, 50, 100, 3 );
grayimg = cvCloneImage(thresholded_img);
cvShowImage("canny",grayimg);
IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
CvMemStorage* line_storage=cvCreateMemStorage(0);
CvSeq* results = cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
double angle = 0.0, temp;
double lengthSqd, wSum=0;
double xc = 0, yc = 0;
for( int i = 0; i < results->total; i++ )
{
CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
wSum += 1;//lengthSqd;
if(line[0].y > line[1].y)
temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
else
temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
if(temp < 0)
angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
else
angle += (180/3.14*temp - 90)/* * lengthSqd*/;
xc += line[0].x + line[1].x;
yc += line[0].y + line[1].y;
}
angle=angle/wSum;
//angle+=10;
printf("total: %d, angle: % f\n", results->total, angle);
xc /= 2*results->total;
yc /= 2*results->total;
double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100; // 100 represents a very large slope (near vertical)
//.........这里部分代码省略.........
开发者ID:bhuvnesh-agarwal,项目名称:IGVC-2012,代码行数:101,代码来源:SafeZoneNav.cpp
示例3: bw_detect_blobs
int bw_detect_blobs(Tracker *tracker, struct StaticData *data)
{
/* circular kernel for dilation */
IplConvKernel *kernel = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_ELLIPSE);
/* temporary image to hold thresholded camera frame */
IplImage *thresh = cvCreateImage(cvGetSize(tracker->frame),IPL_DEPTH_8U,1);
/* variables for contour finding */
CvMemStorage *mem = cvCreateMemStorage(0);
CvSeq *contour;
CvMoments moments;
int it;
/**
* preprocessing
**/
/* threshold image, reasonably stable since frame is highly underexposed and LEDs are very bright */
cvThreshold(tracker->frame,thresh,180,255,CV_THRESH_BINARY);
/* Dilate image to increase size of responses from thresholding, gives more stable result in contour finding*/
cvDilate(thresh,thresh,kernel,2);
// cvShowImage("thresh",thresh);
/**
* blob extraction (connected component finding)
**/
/* find contours in image, should give one contour for each markers */
int nc = cvFindContours(thresh,mem,&contour,sizeof(CvContour),CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
// printf("nc = %d\n",nc);
it = 0;
/* if NUM_OF_MARKERS contours detected, compute mean position of each contour */
if(nc==data->NUM_OF_MARKERS)
{
if(contour)
{
// cvDrawContours(thresh,contour,cvScalarAll(255),cvScalarAll(0),100);
CvSeq *c;
for(c=contour; c!=NULL; c=c->h_next)
{
/* compute moments for each contour */
cvContourMoments(c,&moments);
/* make sure the contour encloses some area */
if(moments.m00>0.0)
{
/* compute center of mass -> mean blob position */
/* even though the computed position is stored in the marker structs, it doesn't neccessarily correspond to that specific marker */
tracker->marker[it]->blob_pos.x = moments.m10/moments.m00;
tracker->marker[it]->blob_pos.y = moments.m01/moments.m00;
// printf("(%f %f)\n",tracker->marker[it]->blob_pos.x,tracker->marker[it]->blob_pos.y);
}
else
{
/* for stable marker recognition all markers must have been detected */
tracker->state = OFF_TRACK;
break;
}
it++;
}
}
}
else
{
tracker->state = OFF_TRACK;
for(int nm=0; nm<data->NUM_OF_MARKERS; ++nm)
{
tracker->marker[nm]->pos_is_set = 0;
tracker->marker[nm]->blob_pos.x = 0;
tracker->marker[nm]->blob_pos.y = 0;
}
}
/* clean up memory */
cvReleaseMemStorage(&mem);
cvReleaseImage(&thresh);
return nc;
}
开发者ID:caxenie,项目名称:ip-camera-overhead-tracker,代码行数:87,代码来源:ColorDiffTracker.cpp
示例4: main
int main (int argc, char **argv)
{
CvCapture *capture = 0;
IplImage *frame, *frame_copy = 0;
cascade = (CvHaarClassifierCascade *) cvLoad ("yolo.xml", 0, 0, 0);
if (!cascade)
{
printf ("ERROR: Could not load classifier cascade\n");
return -1;
}
storage = cvCreateMemStorage (0);
capture = cvCaptureFromCAM (0);
if (capture){
int j = 0;
for (;;){
FILE *fin;
int i = 0;
flag = 0, f = 0;
if(!cvGrabFrame (capture)){
break;
}
frame = cvRetrieveFrame (capture);
if (!frame){
break;
}
if (!frame_copy){
frame_copy = cvCreateImage(
cvSize (frame->width, frame->height),
IPL_DEPTH_8U, frame->nChannels);
}
system ("ps -e | grep totem > sample.txt");
fin = fopen ("sample.txt", "r");
fflush (fin);
while (!feof (fin)){
char a[40];
fscanf (fin, "%s\n", a);
if (a[i] == 't' && a[i + 1] == 'o' && a[i + 2] == 't'
&& a[i + 3] == 'e' && a[i + 4] == 'm'){
f = 1;
break;
}
else{
f = 0;
}
}
fclose (fin);
if (frame->origin == IPL_ORIGIN_TL){
cvCopy (frame, frame_copy, 0);
}
else{
cvFlip (frame, frame_copy, 0);
}
flag = detect_and_draw (frame_copy);
if (f == 0)
{
printf("no totem playing\n
please switch off the application from the command centre\n
or open a video file\n");
sleep (5);
}
else if (flag == 0 && f == 1 && played == 1)
{
system ("totem --pause");
played = 0;
}
else if (flag == 1 && f == 1 && played == 0)
{
system ("totem --play");
played = 1;
}
if (cvWaitKey (10) >= 0)
break;
}
开发者ID:futureUnsure,项目名称:jarvis_beta,代码行数:94,代码来源:face-detect.cpp
示例5: fopen
int AdaBoost::read_num_class_data(const char* filename, int var_count, CvMat** data, CvMat** responses)
{
const int M = 1024;
FILE* f = fopen(filename, "rt");
CvMemStorage* storage;
CvSeq* seq;
char buf[M + 2];
float* el_ptr;
CvSeqReader reader;
int i=0, j=0;
if(!f)
return 0;
el_ptr = new float[var_count + 1];
storage = cvCreateMemStorage();
seq = cvCreateSeq(0, sizeof(*seq), (var_count + 1) * sizeof(float), storage);
for(;;)
{
char* ptr;
if(!fgets(buf, M, f) || !strchr(buf, ','))
break;
el_ptr[0] = buf[0];
ptr = buf + 2;
for(i = 1; i <= var_count; i++)
{
int n = 0;
sscanf(ptr, "%f%n", el_ptr + i, &n);
ptr += n + 1;
}
if (i <= var_count)
break;
cvSeqPush(seq, el_ptr);
}
fclose(f);
*data = cvCreateMat(seq->total, var_count, CV_32F);
*responses = cvCreateMat(seq->total, 1, CV_32F);
cvStartReadSeq(seq, &reader);
for (i = 0; i < seq->total; i++)
{
const float* sdata = (float*) reader.ptr + 1;
float* ddata = data[0]->data.fl + var_count * i;
float* dr = responses[0]->data.fl + i;
for (j = 0; j < var_count; j++)
ddata[j] = sdata[j];
*dr = sdata[-1];
CV_NEXT_SEQ_ELEM(seq->elem_size, reader);
}
cvReleaseMemStorage(&storage);
delete el_ptr;
return 1;
}
开发者ID:EduFill,项目名称:hbrs-ros-pkg,代码行数:64,代码来源:adaboost.cpp
示例6: cvCreateMemStorage
//--------------------------------------------------------------
void testApp::setup(){
// CAPTURE RESOLUTION
cw = 720;
ch = 576;
sb = 15.0;
br = 3.0;
// START VALUES
medianValue = 1;
lineThreshValue = 50;
lineMinLengthValue = 24;
lineMaxGapValue = 4;
cannyThresh1Value = 5;
cannyThresh2Value = 20;
cannyApertureValue = 3;
adaptiveThreshValue = 25;
approxValue = 10;
contourSmoothValue = 1;
fillsAlphaValue = 0x20;
fillsApproxValue = 10;
contourAlphaValue = 0x40;
approxAlphaValue = 0x40;
clearBGAlphaValue = 0x20;
doFillsApproxValue = false;
#ifdef _USE_LIVE_VIDEO
vidGrabber.setVerbose(true);
// vidGrabber.setDeviceID(3);
vidGrabber.initGrabber(cw,ch);
#else
vidPlayer.loadMovie("fingers.mov");
vidPlayer.play();
#endif
colorImg .allocate(cw, ch);
grayImage .allocate(cw, ch);
hsvImage .allocate(cw, ch);
satImage .allocate(cw, ch);
trsImage .allocate(cw, ch);
cannyImage.allocate(cw, ch);
medianImg .allocate(cw, ch);
bLearnBakground = true;
threshold = 80;
linesStorage = cvCreateMemStorage(0);
fillsStorage = cvCreateMemStorage(0);
edgesStorage = cvCreateMemStorage(0);
approxStorage = cvCreateMemStorage(0);
mode = MODE_PROCESS;
draw_contours = false;
draw_approx = false;
draw_edges = false;
draw_fills = true;
erase_bg = true;
edgeContours = NULL;
fillContours = NULL;
lines = NULL;
paperTexture.loadImage("paper6.jpg");
ofSetBackgroundAuto(erase_bg);
}
开发者ID:nagyistoce,项目名称:gasubasu,代码行数:76,代码来源:testApp.cpp
示例7: main
int main( int argc, char** argv )
{
contadorBlue = 0;
contadorGreen = 0;
contadorRed = 0;
CvCapture *capture = NULL;
IplImage *frame = NULL;
IplImage *result = NULL;
int key;
char *filename = (char*)"aGest.xml";
/* load the classifier
note that I put the file in the same directory with
this code */
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
/* setup memory buffer; needed by the face detector */
storage = cvCreateMemStorage( 0 );
/* initialize camera */
capture = cvCaptureFromCAM( 0 );
/* always check */
assert( cascade && storage && capture );
/* open and rezise images to be overlayed */
IplImage *drumblue = cvLoadImage("./Drums/DrumBlue.png");
IplImage *drumgreen = cvLoadImage("./Drums/DrumGreen.png");
IplImage *drumred = cvLoadImage("./Drums/DrumRed.png");
IplImage *lineblue = cvLoadImage("./Drums/BlueLine.png");
IplImage *linegreen = cvLoadImage("./Drums/GreenLine.png");
IplImage *linered = cvLoadImage("./Drums/RedLine.png");
IplImage *step1 = cvLoadImage("./Drums/Step.png");
IplImage *step2 = cvLoadImage("./Drums/Step2.png");
IplImage *arrow1 = cvLoadImage("./Drums/Arrow1.png");
IplImage *arrow2 = cvLoadImage("./Drums/Arrow2.png");
IplImage *bien = cvLoadImage("./Drums/Bien.png");
IplImage *buu = cvLoadImage("./Drums/Buu.png");
IplImage *rdrumblue = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
IplImage *rdrumgreen = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
IplImage *rdrumred = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
IplImage *rdrumblue2 = cvCreateImage(cvSize(110,95),drumblue->depth, drumblue->nChannels);
IplImage *rdrumgreen2 = cvCreateImage(cvSize(110,95),drumgreen->depth, drumgreen->nChannels);
IplImage *rdrumred2 = cvCreateImage(cvSize(110,95),drumred->depth, drumred->nChannels);
IplImage *rlineblue = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
IplImage *rlinegreen = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
IplImage *rlinered = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
IplImage *rlineblue2 = cvCreateImage(cvSize(230,80),lineblue->depth, lineblue->nChannels);
IplImage *rlinegreen2 = cvCreateImage(cvSize(230,80),linegreen->depth, linegreen->nChannels);
IplImage *rlinered2 = cvCreateImage(cvSize(230,80),linered->depth, linered->nChannels);
IplImage *rstep1 = cvCreateImage(cvSize(100,100),step1->depth, step1->nChannels);
IplImage *rstep2 = cvCreateImage(cvSize(100,100),step2->depth, step2->nChannels);
IplImage *rarrow1 = cvCreateImage(cvSize(110,70),arrow1->depth, arrow1->nChannels);
IplImage *rarrow2 = cvCreateImage(cvSize(110,70),arrow2->depth, arrow2->nChannels);
IplImage *rbien = cvCreateImage(cvSize(60,25),bien->depth, bien->nChannels);
IplImage *rbuu = cvCreateImage(cvSize(60,25),buu->depth, buu->nChannels);
cvResize(drumblue, rdrumblue);
cvResize(drumgreen, rdrumgreen);
cvResize(drumred, rdrumred);
cvResize(drumblue, rdrumblue2);
cvResize(drumgreen, rdrumgreen2);
cvResize(drumred, rdrumred2);
cvResize(lineblue, rlineblue);
cvResize(linegreen, rlinegreen);
cvResize(linered, rlinered);
cvResize(lineblue, rlineblue2);
cvResize(linegreen, rlinegreen2);
cvResize(linered, rlinered2);
cvResize(step1, rstep1);
cvResize(step2, rstep2);
cvResize(arrow1, rarrow1);
cvResize(arrow2, rarrow2);
cvResize(bien, rbien);
cvResize(buu, rbuu);
cvFlip(rdrumblue2, rdrumblue2,1);
cvFlip(rdrumgreen2, rdrumgreen2,1);
cvFlip(rdrumred2, rdrumred2,1);
cvFlip(rlineblue2, rlineblue2,1);
cvFlip(rlinegreen2, rlinegreen2,1);
cvFlip(rlinered2, rlinered2,1);
/* release memory */
cvReleaseImage( &drumblue);
cvReleaseImage( &drumgreen);
cvReleaseImage( &drumred);
cvReleaseImage( &lineblue);
cvReleaseImage( &linegreen);
cvReleaseImage( &linered );
cvReleaseImage( &step1 );
cvReleaseImage( &step2 );
cvReleaseImage( &arrow1 );
//.........这里部分代码省略.........
开发者ID:glebysg,项目名称:research,代码行数:101,代码来源:handgame.cpp
示例8: cvCreateMemStorage
//////////////////////
//
// 사각형 검출
//
//////////////////////
void ColorTracking::draw_square(IplImage* image)
{
CvMemStorage* storage2 = NULL; // 메모리 할당
CvMemStorage* storage3 = NULL; // 메모리 할당
storage2 = cvCreateMemStorage(0); //사각형 검출을 위한 윤곽선 추출 메모리
storage3 = cvCreateMemStorage(0); //검출된 사각형을 위한 메모리
CvSeq* contours ; //윤곽선
CvSeq* result ; //윤곽선 중에 사각형이 될만한 윤곽선 저장
CvPoint corner[4]; //사각형이 될 네 점
rects = NULL; //사각형이 될 네 점으로 만들 사각형 구조체 - 데이터 내보내기 위해 사용
rects = new CvRect [100]; //사각형의 갯수를 미리 알 수없기에 제한을 둠
//윤곽선추출
cvFindContours(image, storage2, &contours, sizeof(CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
//외곽선 검출 정보로 사각형 잇기
for(;contours !=0; contours = contours ->h_next)
{
//꼭지점 근사화
//cvApproxPoly 외곽선정보, 크기, 메모리, 메소드, 정밀도, 0이면 해당 외곽선만 검사
result = cvApproxPoly( contours, sizeof(CvContour), storage3,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
//꼭지점이 4개, 외곽선픽셀수가 1000개 이상 일때 사각형으로 간주
//컨백스 헐은 일단제외 cvCheckContourConvexity(result)
if( result->total == 4 &&
cvContourArea(result,CV_WHOLE_SEQ,0) > 500)
{
//초기 위치 설정
CvPoint* st = (CvPoint*)cvGetSeqElem(result, 0);
///////첫번째 꼭짓점 추출 - 임의의 점에서 가장 먼 점
double fMaxDist = 0.0;
for(int i = 1; i < result->total; i++)
{
CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);
double fDist = sqrt((double)( ( st->x - pt->x) * (st->x - pt->x)
+ (st->y - pt->y) * (st->y - pt->y) ));
if(fDist > fMaxDist)
{
corner[0] = *pt;
fMaxDist = fDist;
}
}
///////두번째 꼭짓점 추출 - 첫번째 점에서 가장 먼 점
fMaxDist = 0.0;
for(int i = 1; i < result->total; i++)
{
CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);
double fDist = sqrt((double)( ( corner[0].x - pt->x) * (corner[0].x - pt->x)
+ (corner[0].y - pt->y) * (corner[0].y - pt->y) ));
if(fDist > fMaxDist)
{
corner[1] = *pt;
fMaxDist = fDist;
}
}
////////세번째 꼭짓점 추출 - 첫번째 점과 두번째 점에서 가장 먼 점
fMaxDist = 0.0;
for(int i = 1; i < result->total; i++)
{
CvPoint* pt = (CvPoint *)cvGetSeqElem(result, i);
int tempx;
int tempy;
//첫번째 점과 두번째 사이의 중간 점 찾기
//좌표는 음수가 있을 수가 있어서 큰 숫자 판별후에 계산 함
//x좌표
if(corner[0].x >= corner[1].x)
{
tempx = corner[0].x - (corner[0].x - corner[1].x) / 2;
}
else if(corner[0].x < corner[1].x)
{
tempx = corner[0].x + (corner[1].x - corner[0].x) / 2;
//.........这里部分代码省略.........
开发者ID:OPRoS,项目名称:Component,代码行数:101,代码来源:ColorTracking.cpp
示例9: cvCreateMemStorage
IplImage *WorkingFrame=NULL;
IplImage *frame1 = NULL;
IplImage *Frame_at_t = NULL;
IplImage *Frame_at_t_dt = NULL;
IplImage *eig_image = NULL;
IplImage *temp_image = NULL;
IplImage *pyramid1 = NULL;
IplImage *frameone = NULL;
IplImage *frametwo = NULL;
IplImage *dots = NULL;
int p=1;
IplImage *pyramid2 = NULL;
CvSeq* first_contour, *contours2;
CvMemStorage* storage = cvCreateMemStorage();
double Result, Result2;
CvRect rect;
static int array[2]={0,0};
int* findhand(CvCapture *webcam) {
//---Initialise Variables for Optical Flow---//
CvSize OF_window = cvSize(3,3); //Setup the size of the window of each pyramid level
int no_of_points = 15000;
CvPoint2D32f Frame_t_points[15000];
CvPoint2D32f Frame_t_dt_points[15000];
char optical_flow_found_feature[15000];
float optical_flow_feature_error[15000];
CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );
开发者ID:jameserichards,项目名称:handTrack,代码行数:31,代码来源:finalDesign.cpp
示例10: cvCreateImage
ReturnType ColorTracking::onExecute()
{
// 영상을 Inport로부터 취득
opros_any *pData = ImageIn.pop();
RawImage result;
// 데이터 포트 백터
std::vector<PositionDataType> data;
if(pData != NULL){
// 포트로 부터 이미지 취득
RawImage Image = ImageIn.getContent(*pData);
RawImageData *RawImage = Image.getImage();
// 현재영상의 크기를 취득
m_in_width = RawImage->getWidth();
m_in_height = RawImage->getHeight();
// 원본영상의 이미지영역 확보
if(m_orig_img == NULL){
m_orig_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
}
if(m_dest_img == NULL){
m_dest_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
}
if(m_hsv_img == NULL){
m_hsv_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
}
if(m_gray_img == NULL){
m_gray_img = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
}
//영상에 대한 정보를 확보!memcpy
memcpy(m_orig_img->imageData, RawImage->getData(), RawImage->getSize());
//HSV변환
cvCvtColor(m_orig_img, m_hsv_img, CV_BGR2HSV);
//hsv 영역 축소 후, 설정값에 따라 해당 영역 이진 영상 추출
color_config(m_hsv_img, m_color);
//영상정리
image_filter(m_gray_img);
//검출 갯수 담을 변수 초기화
circle_cnt = 0;
rect_cnt = 0;
//검출된 원을 위한 메모리 공간 할당
storage0 = cvCreateMemStorage(0);
//원 그리기
draw_circle(m_gray_img);
//사각형 그리기
draw_square(m_gray_img);
//// DataOut
//한가지라도 검출되면
if(circles || rects != NULL)
{
//원 데이터가 존재함
if(circles)
{
//원의 갯수만큼
for(int k = 0; k < circles->total; k++)
{
float* cir;
int circle_x, circle_y;
double radi;
//검출된 원을 저장한 circles에서 원의 파라미터를 cir에 저장
//원의 중심 좌표 및 반지름이 배열에 순서대로 저장됨
cir = (float*)cvGetSeqElem(circles, k);
//검출된 원을 저장한 circles에서 원의 파라미터를 cir에 저장
//원의 중심 좌표 및 반지름이 배열에 순서대로 저장됨
circle_x = cvRound(cir[0]); //중심점 x 좌표
circle_y = cvRound(cir[1]); //중심점 y 좌표
radi = (double)cvRound(cir[2]); //반지름
PositionDataType base;
base.setName("circle");
base.setX(circle_x);
base.setY(circle_y);
base.setRadian(radi);
base.setHeight(NULL);
base.setWidth(NULL);
data.push_back(base);
}
}
//사각형 데이터가 존재함
if(rects != NULL)
{
for(int j = 0; j < rect_cnt; j++)
//.........这里部分代码省略.........
开发者ID:OPRoS,项目名称:Component,代码行数:101,代码来源:ColorTracking.cpp
示例11: update
void update(double time,
uint32_t* out,
const uint32_t* in)
{
if (!cascade) {
cvSetNumThreads(cvRound(threads * 100));
if (classifier.length() > 0) {
cascade = (CvHaarClassifierCascade*) cvLoad(classifier.c_str(), 0, 0, 0 );
if (!cascade)
fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
storage = cvCreateMemStorage(0);
}
else {
memcpy(out, in, size * 4);
return;
}
}
// sanitize parameters
search_scale = CLAMP(search_scale, 0.11, 1.0);
neighbors = CLAMP(neighbors, 0.01, 1.0);
// copy input image to OpenCV
if( !image )
image = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
memcpy(image->imageData, in, size * 4);
// only re-detect periodically to control performance and reduce shape jitter
int recheckInt = abs(cvRound(recheck * 1000));
if ( recheckInt > 0 && count % recheckInt )
{
// skip detect
count++;
// fprintf(stderr, "draw-only counter %u\n", count);
}
else
{
count = 1; // reset the recheck counter
if (objects) // reset the list of objects
cvClearSeq(objects);
double elapsed = (double) cvGetTickCount();
objects = detect();
// use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
elapsed = cvGetTickCount() - elapsed;
elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);
// Automatic recheck uses an undocumented negative parameter value,
// which is not compliant, but technically feasible.
if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
// fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
}
draw();
// copy filtered OpenCV image to output
memcpy(out, image->imageData, size * 4);
cvReleaseImage(&image);
}
开发者ID:iconnect,项目名称:frei0r,代码行数:62,代码来源:facedetect.cpp
示例12: aMatchContourTrees
//.........这里部分代码省略.........
if (!trsiRead(&nPoints2,"20","Number of points second contour"))
return TRS_UNDEF;
if(nPoints1>0&&nPoints2>0)
{
if (!trsiRead(&a1,"10","first radius of the first elipse"))
return TRS_UNDEF;
if (!trsiRead(&b1,"20","second radius of the first elipse"))
return TRS_UNDEF;
if (!trsiRead(&a2,"15","first radius of the second elipse"))
return TRS_UNDEF;
if (!trsiRead(&b2,"30","second radius of the second elipse"))
return TRS_UNDEF;
if (!trsiRead(&fi,"0","second radius of the second elipse"))
return TRS_UNDEF;
if (!trsdRead(&upper,"3","noise amplidude"))
return TRS_UNDEF;
xc = (int)(width/2.);
yc = (int)(height/2.);
xmin = width;
ymin = height;
xmax = 0;
ymax = 0;
cp1 = (CvPoint*) trsmAlloc(nPoints1*sizeof(CvPoint));
cp2 = (CvPoint*) trsmAlloc(nPoints2*sizeof(CvPoint));
for(i=0; i<nPoints1; i++)
{
cp1[i].x = (int)(a1*cos(2*pi*i/nPoints1))+xc;
cp1[i].y = (int)(b1*sin(2*pi*i/nPoints1))+yc;
if(xmin> cp1[i].x) xmin = cp1[i].x;
if(xmax< cp1[i].x) xmax = cp1[i].x;
if(ymin> cp1[i].y) ymin = cp1[i].y;
if(ymax< cp1[i].y) ymax = cp1[i].y;
}
if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;
lower = -upper;
/* upper = 3;*/
seed = 345753;
cvRandInit(&state, (float)lower,(float)upper, seed );
for(i=0; i<nPoints2; i++)
{
cvbRand( &state, &fr, 1 );
cp2[i].x =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*cos(2*pi*fi/360.))-
(int)(b2*sin(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+xc;
cvbRand( &state, &fr, 1 );
cp2[i].y =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+
(int)(b2*sin(2*pi*i/nPoints2)*cos(2*pi*fi/360.))+yc;
if(xmin> cp2[i].x) xmin = cp2[i].x;
if(xmax< cp2[i].x) xmax = cp2[i].x;
if(ymin> cp2[i].y) ymin = cp2[i].y;
if(ymax< cp2[i].y) ymax = cp2[i].y;
}
if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;
/* contours initialazing */
type_seq = CV_SEQ_POLYGON;
cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
(char*)cp1, nPoints1, (CvSeq*)&contour_h1, &contour_blk1);
cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
(char*)cp2, nPoints2, (CvSeq*)&contour_h2, &contour_blk2);
/* contour trees created*/
storage = cvCreateMemStorage( block_size );
tree1 = cvCreateContourTree ((CvSeq*)&contour_h1, storage, threshold);
tree2 = cvCreateContourTree ((CvSeq*)&contour_h2, storage, threshold);
/* countours matchig */
error_test = 0.;
method = 1;
rezult = cvMatchContourTrees (tree1, tree2, (CvContourTreesMatchMethod)method,threshold2);
error_test+=rezult;
if(error_test > eps_rez ) code = TRS_FAIL;
else code = TRS_OK;
trsWrite( ATS_CON | ATS_LST | ATS_SUM, "contours matching error_test =%f \n",
error_test);
cvReleaseMemStorage ( &storage );
trsFree (cp2);
trsFree (cp1);
}
/* _getch(); */
return code;
}
开发者ID:ddcien,项目名称:Iris-extraction,代码行数:101,代码来源:amatchcontourtrees.cpp
示例13: cvLine
void Gesture1::trackMarker (IplImage* destImg, CvPoint _r, CvPoint _b, CvPoint _g, CvPoint _y) {
// find tissue box!
CvPoint* objPoints = objectDetector->detect(destImg);
// draw
world->Step(1.0F/6.0F, 10, 10);
cvLine(destImg, cvPoint(0,HEIGHT), cvPoint(1000,HEIGHT), CV_RGB(0,255,0), 3);
for (b2Body* b = world->GetBodyList(); b; b = b->GetNext()) {
//printf("**draw body\n");
Box2DData* userData = (Box2DData*)b->GetUserData();
if (userData != NULL) {
if (strcmp(userData->type, "Circle") == 0) {
//b2Vec2 v = b->GetWorldCenter();
b2Vec2 v = b->GetPosition();
//printf("** x=%f y=%f r=%f\n", v.x, v.y, userData->radius);
CvPoint center = cvPoint(v.x*WORLD_SCALE, v.y*WORLD_SCALE);
cvCircle(destImg, center, userData->radius*WORLD_SCALE, CV_RGB(255,0,0), -1);
} else if (strcmp(userData->type, "Box") == 0) {
world->DestroyBody(b);
}
}
}
if (objPoints != NULL) {
printf("construct body\n");
b2PolygonShape cs;
b2Vec2 vertices[4] = {
b2Vec2((float)(objPoints[0].x)/WORLD_SCALE, (float)(objPoints[0].y)/WORLD_SCALE),
b2Vec2((float)(objPoints[1].x)/WORLD_SCALE, (float)(objPoints[1].y)/WORLD_SCALE),
b2Vec2((float)(objPoints[2].x)/WORLD_SCALE, (float)(objPoints[2].y)/WORLD_SCALE),
b2Vec2((float)(objPoints[3].x)/WORLD_SCALE, (float)(objPoints[3].y)/WORLD_SCALE)
};
cs.Set(vertices, 4);
b2BodyDef bd;
//bd.type = b2_staticBody;
Box2DData* obj = new Box2DData();
strcpy(obj->type, "Box");
bd.userData = obj;
b2Body* body1 = world->CreateBody(&bd);
body1->CreateFixture(&cs, 0.0f);
}
if (_r.x < 0) return;
Point2D r = toPoint2D(_r);
// if marker is not moving for a while, reset the path
int len = path.size();
if (len > KEEP_MAX) {
path.erase(path.begin());
}
int nearCount = 0;
int actual = min(KEEP_COUNT, len);
/*
for(int i=0; i<actual; i++){
Point2D p = path[len-1-i];
double d = dist(p, r);
//printf("dist=%f\n", d);
if (d < NEAR_THRESHOLD) ++nearCount;
}
if (nearCount > (double)actual * DONT_MOVE_THRESHOLD_RATE) {
// marker is not moving, so clear the path
printf("cleared\n");
path.clear();
}
*/
path.push_back(r);
// decide if we should recognize
time_t current;
time(¤t);
double interval = difftime(current, lastTime);
printf("interval=%f\n", interval);
if (interval < INTERVAL_SEC) return;
len = path.size();
if (len < 5) return;
RecognitionResult res = g.recognize(path);
printf("%s:%f\n", res.name.c_str(), res.score);
if (res.name == "Circle" && res.score > SCORE_THRESHOLD) {
printf("##circle detect##\n");
// convert to vector<Point2D> to CvSeq<CvPoint>
CvSeqWriter writer;
CvMemStorage* storage = cvCreateMemStorage(0);
cvStartWriteSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage, &writer);
for (int i=0; i<len; i++) {
CvPoint pt = toCvPoint(path[i]);
CV_WRITE_SEQ_ELEM(pt, writer);
}
CvSeq* seq = cvEndWriteSeq(&writer);
CvBox2D ellipse = cvFitEllipse2(seq);
float radius = std::min(ellipse.size.width, ellipse.size.height)/(4.0F*WORLD_SCALE);
cvEllipseBox(destImg, ellipse, CV_RGB(0,255,255), -1);
// add Box2D object
{
b2CircleShape cs;
cs.m_radius = radius;
//.........这里部分代码省略.........
开发者ID:thorikawa,项目名称:N3,代码行数:101,代码来源:Gesture1.cpp
示例14: main
int main( int argc, char** argv )
{
char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
CvRNG rng = cvRNG(-1);
if( (img0 = cvLoadImage(filename,1)) == 0 )
return 0;
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tr - restore the original image\n"
"\tw or SPACE - run watershed algorithm\n"
"\t\t(before running it, roughly mark the areas on the image)\n"
"\t (before that, roughly outline several markers on the image)\n" );
cvNamedWindow( "image", 1 );
cvNamedWindow( "watershed transform", 1 );
img = cvCloneImage( img0 );
img_gray = cvCloneImage( img0 );
wshed = cvCloneImage( img0 );
marker_mask = cvCreateImage( cvGetSize(img), 8, 1 );
markers = cvCreateImage( cvGetSize(img), IPL_DEPTH_32S, 1 );
cvCvtColor( img, marker_mask, CV_BGR2GRAY );
cvCvtColor( marker_mask, img_gray, CV_GRAY2BGR );
cvZero( marker_mask );
cvZero( wshed );
cvShowImage( "image", img );
cvShowImage( "watershed transform", wshed );
cvSetMouseCallback( "image", on_mouse, 0 );
for(;;)
{
int c = cvWaitKey(0);
if( (char)c == 27 )
break;
if( (char)c == 'r' )
{
cvZero( marker_mask );
cvCopy( img0, img );
cvShowImage( "image", img );
}
if( (char)c == 'w' || (char)c == ' ' )
{
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* contours = 0;
CvMat* color_tab;
int i, j, comp_count = 0;
//cvSaveImage( "wshed_mask.png", marker_mask );
//marker_mask = cvLoadImage( "wshed_mask.png", 0 );
cvFindContours( marker_mask, storage, &contours, sizeof(CvContour),
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
cvZero( markers );
for( ; contours != 0; contours = contours->h_next, comp_count++ )
{
cvDrawContours( markers, contours, cvScalarAll(comp_count+1),
cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
}
color_tab = cvCreateMat( 1, comp_count, CV_8UC3 );
for( i = 0; i < comp_count; i++ )
{
uchar* ptr = color_tab->data.ptr + i*3;
ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
}
{
double t = (double)cvGetTickCount();
cvWatershed( img0, markers );
t = (double)cvGetTickCount() - t;
printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) );
}
// paint the watershed image
for( i = 0; i < markers->height; i++ )
for( j = 0; j < markers->width; j++ )
{
int idx = CV_IMAGE_ELEM( markers, int, i, j );
uchar* dst = &CV_IMAGE_ELEM( wshed, uchar, i, j*3 );
if( idx == -1 )
dst[0] = dst[1] = dst[2] = (uchar)255;
else if( idx <= 0 || idx > comp_count )
dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here
else
{
uchar* ptr = color_tab->data.ptr + (idx-1)*3;
dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2];
}
}
cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed );
cvShowImage( "watershed transform", wshed );
cvReleaseMemStorage( &storage );
cvReleaseMat( &color_tab );
//.........这里部分代码省略.........
开发者ID:AndrewShmig,项目名称:FaceDetect,代码行数:101,代码来源:watershed.cpp
示例15: main
int main(int argc, char* argv[])
{
printf("Press Esc-Key to Exit Process.\n");
RASPIVID_CONFIG * config = new RASPIVID_CONFIG();
if(!config){
printf("failed to create RASPIDVID_CONFIG.\n");
return -1;
}
config->width=static_cast<int>(WIN_WIDTH);
config->height=static_cast<int>(WIN_HEIGHT);
config->bitrate=0; // zero: leave as default
config->framerate=0;
config->monochrome=0;
cvNamedWindow( DISP_WIN , CV_WINDOW_AUTOSIZE );
RaspiCamCvCapture* capture = NULL;
capture = raspiCamCvCreateCameraCapture2( 0, config );
if(config){
delete config;
config = NULL;
}
if(!capture){
printf("failed to create capture\n");
return -1;
}
// キャプチャサイズを設定する.
double w = WIN_WIDTH;
double h = WIN_HEIGHT;
raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_WIDTH, w);
raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_HEIGHT, h);
// 正面顔検出器の読み込み
CvHaarClassifierCascade* cvHCC = (CvHaarClassifierCascade*)cvLoad(CASCADE, NULL,NULL,NULL);
// 検出に必要なメモリストレージを用意する
CvMemStorage* cvMStr = cvCreateMemStorage(0);
while(1){
IplImage* frame = raspiCamCvQueryFrame(capture);
if(!frame){
printf("failed to query frame.\n");
break;
}
// 画像中から検出対象の情報を取得する
CvSeq* face = cvHaarDetectObjects( frame
, cvHCC
, cvMStr
, 1.2
, 2
, CV_HAAR_DO_CANNY_PRUNING
, minsiz
, minsiz
);
if(!face){
printf("failed to detect objects.\n");
break;
}
int i=0;
for(i = 0; i < face->total; i++) {
// 検出情報から顔の位置情報を取得
CvRect* faceRect = (CvRect*)cvGetSeqElem(face, i);
if(!faceRect){
printf("failed to get Face-Rect.\n");
break;
}
// 取得した顔の位置情報に基づき、矩形描画を行う
cvRectangle( frame
, cvPoint(faceRect->x, faceRect->y)
, cvPoint(faceRect->x + faceRect->width, faceRect->y + faceRect->height)
, CV_RGB(255, 0 ,0)
, 2
, CV_AA
, 0
);
}
cvShowImage( DISP_WIN, frame);
char c = cvWaitKey(DELAY_MSEC);
if( c==27 ){ // ESC-Key
break;
}
sleep(0);
}
// 用意したメモリストレージを解放
cvReleaseMemStorage(&cvMStr);
// カスケード識別器の解放
cvReleaseHaarClassifierCascade(&cvHCC);
raspiCamCvReleaseCapture(&capture);
cvDestroyWindow(DISP_WIN);
return 0;
}
开发者ID:IsaoNakamura,项目名称:StudyRPi,代码行数:98,代码来源:detectFaceRaspicam_cv.cpp
示例16: char_ext
char* char_ext(IplImage* imagen,basicOCR ocr )
{
//cvNamedWindow("temp");
//cvShowImage("temp",imagen);
//cvWaitKey(0);
//char* plate=NULL;
char* no=(char*)malloc(20*sizeof(char));
//------------------------------------- -----------------------------------------------
//NUMBER ISOLATION
//Create needed images
smooth= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
threshold= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
open_morf= cvCreateImage(cvGetSize(imagen), imagen->depth, 1);
//Init variables for countours
contour = 0;
contourLow = 0;
|
请发表评论