本文整理汇总了C++中cvLine函数的典型用法代码示例。如果您正苦于以下问题:C++ cvLine函数的具体用法?C++ cvLine怎么用?C++ cvLine使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvLine函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: cvReleaseCapture
//.........这里部分代码省略.........
//convert the image to grey image
IplImage* frame_cur = cvQueryFrame(cam) ;
IplImage* img_curr = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
cvCvtColor( frame_cur,img_curr ,CV_BGR2GRAY);
//create a imge to display result
IplImage* img_res = cvCreateImage(img_sz,IPL_DEPTH_8U,1) ;
for ( int y = 0 ; y < img_sz.height ; ++y )
{
uchar* ptr = (uchar*)( img_res->imageData + y * img_res->widthStep ) ;
for ( int x = 0 ; x <img_res->width; ++x )
{
ptr[x] = 255 ;
}
}
//get good features
IplImage* img_eig = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
IplImage* img_temp = cvCreateImage(img_sz,IPL_DEPTH_32F,1) ;
int corner_count = MAX_CORNERS ;
CvPoint2D32f* features_prev = new CvPoint2D32f[MAX_CORNERS] ;
cvGoodFeaturesToTrack(
img_prev,
img_eig,
img_temp,
features_prev,
&corner_count,
0.01,
5.0,
0,
3,
0,
0.4
);
cvFindCornerSubPix(
img_prev,
features_prev,
corner_count,
cvSize(win_size,win_size),
cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.03)
);
// L-K
char feature_found[ MAX_CORNERS ] ;
float feature_errors[ MAX_CORNERS ] ;
CvSize pyr_sz = cvSize( frame->width + 8 ,frame->height / 3 ) ;
IplImage* pyr_prev = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
IplImage* pyr_cur = cvCreateImage(pyr_sz,IPL_DEPTH_32F,1) ;
CvPoint2D32f* features_cur = new CvPoint2D32f[ MAX_CORNERS ] ;
cvCalcOpticalFlowPyrLK(
img_prev,
img_curr,
pyr_prev,
pyr_cur,
features_prev,
features_cur,
corner_count,
cvSize(win_size,win_size),
5,
feature_found,
feature_errors,
cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER,20,0.3),
0
);
for ( int i = 0 ; i < corner_count ; i++)
{
if ( 0 == feature_found[i] || feature_errors[i] > 550 )
{
// printf("error is %f \n" , feature_errors[i] ) ;
continue ;
}
// printf("find it !\n") ;
CvPoint pt_prev = cvPoint( features_prev[i].x , features_prev[i].y ) ;
CvPoint pt_cur = cvPoint( features_cur[i].x , features_cur[i].y ) ;
cvLine( img_res,pt_prev,pt_cur,CV_RGB( 255,0,0),2 );
}
if(27==cvWaitKey(33))
break;
MainWindow::Display(frame_cur,img_curr,img_res);
cvReleaseImage(&img_curr);
cvReleaseImage(&img_eig);
cvReleaseImage(&img_prev);
cvReleaseImage(&img_res);
cvReleaseImage(&img_temp);
}
}
开发者ID:lkpjj,项目名称:qt_demo,代码行数:101,代码来源:mainwindow.cpp
示例2: update_mhi
//.........这里部分代码省略.........
cvCvtPlaneToPix( mask, 0, 0, 0, dst );
// 计算运动历史图像的梯度方向
// 计算运动梯度趋向和合法的趋向掩码
// calculate motion gradient orientation and valid orientation mask
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
if( !storage )
storage = cvCreateMemStorage(0);
else
cvClearMemStorage(storage);
// 将整个运动分割为独立的运动部分
// 分割运动:获取运动组件序列
// 分割掩码是运动组件图标识出来的,不再过多的使用
// segment motion: get sequence of motion components
// segmask is marked motion components map. It is not used further
seq = cvSegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA);
// 按运动组件的数目来循环
// 通过运动组件迭代
// 根据整幅图像(全局运动)进行相应的一次或多次迭代
// iterate through the motion components,
// One more iteration (i == -1) corresponds to the whole image (global motion)
for (i = -1; i < seq->total; i++)
{
if (i < 0)
{
// 全局运动事件
// case of the whole image
// 获取当前帧的范围
comp_rect = cvRect( 0, 0, size.width, size.height );
// 设置颜色为白色
color = CV_RGB(255,255,255);
// 设置放大倍数为100
magnitude = 100;
}
else
{
// 第i个运动组件
// i-th motion component
// 获取当前运动组件的范围
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
// 丢弃很小的组件
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
continue;
// 设置颜色为红色
color = CV_RGB(255,0,0);
// 设置放大倍数为30
magnitude = 30;
}
// 选择组件感兴趣的区域
// select component ROI
cvSetImageROI( silh, comp_rect );
cvSetImageROI( mhi, comp_rect );
cvSetImageROI( orient, comp_rect );
cvSetImageROI( mask, comp_rect );
// 计算某些选择区域的全局运动方向
// 每个运动部件的运动方向就可以被这个函数利用提取的特定部件的掩模(mask)计算出来(使用cvCmp)
// 计算趋势
// calculate orientation
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
// 根据左上角的原点来调整图像的角度
angle = 360.0 - angle; // adjust for images with top-left origin
// 计算数组的绝对范数, 绝对差分范数或者相对差分范数
// 计算轮廓感兴趣区域中点的个数
count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
cvResetImageROI( mhi );
cvResetImageROI( orient );
cvResetImageROI( mask );
cvResetImageROI( silh );
// 检测小运动事件
// check for the case of little motion
if (count < comp_rect.width*comp_rect.height * 0.05)
{
continue;
}
// 画一个带箭头的时钟来指示方向
// draw a clock with arrow indicating the direction
center = cvPoint( (comp_rect.x + comp_rect.width/2),
(comp_rect.y + comp_rect.height/2) );
cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
}
}
开发者ID:runaway,项目名称:OpenCV1.1,代码行数:101,代码来源:motempl.c
示例3: testfaceLib_pThread
//.........这里部分代码省略.........
fprintf( fp_imaginfo, " %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
}
if( fp_imaginfo != NULL )
fprintf( fp_imaginfo, "\n" );
///////////////////////////////////////////////////////////////////
total_ticks += (cvGetTickCount() - start_ticks);
// frame face_num
frames++;
//auto focus faces
if(quiet == false && bAutoFocus)
{
if(imgAutoFocus)
cvCopy(image, imgAutoFocus);
else
imgAutoFocus = cvCloneImage(image);
CvRectItem *rects = faceAnalyzer.getFaceRects();
cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
}
// next frame if quiet
if( quiet )
continue;
else
{
// draw status info for custom interaction
if(mouse_faceparam.ret_online_collecting == 1)
{
sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
//draw face collecting region
cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
}
else
sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);
cxlibDrawCaption( image, pFont, sCaptionInfo);
}
//show Image
if (image.width() <= 800)
cvShowImage( str_title, image );
else
{ // display scaled smaller aimge
CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
cvResize (image, scale_image);
cvShowImage( str_title, scale_image );
}
// user interaction
int key = cvWaitKey(1);
//int key = cvWaitKey(0);
if( key == ' ' ) // press space bar to pause the video play
cvWaitKey( 0 );
else if( key == 27 ) // press 'esc' to exit
break;
else if( key == 'a' )
{ // add new face name
if(face_num > 0)
{
CvRect rect = faceAnalyzer.getFaceRect(0).rc;
int x = rect.x+rect.width/2;
int y = rect.y+rect.height/2;
开发者ID:ruyiweicas,项目名称:FaceSmileAgeSex_Detection,代码行数:67,代码来源:testfacelib.cpp
示例4: mainMatch
int mainMatch(void)
{
// Initialise capture device
CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
if(!capture) error("No Capture");
// Declare Ipoints and other stuff
IpPairVec matches;
IpVec ipts, ref_ipts;
// This is the reference object we wish to find in video frame
// Replace the line below with IplImage *img = cvLoadImage("imgs/object.jpg");
// where object.jpg is the planar object to be located in the video
IplImage *img = cvLoadImage("imgs/object.jpg");
if (img == NULL) error("Need to load reference image in order to run matching procedure");
CvPoint src_corners[4] = {{0,0}, {img->width,0}, {img->width, img->height}, {0, img->height}};
CvPoint dst_corners[4];
// Extract reference object Ipoints
surfDetDes(img, ref_ipts, false, 3, 4, 3, 0.004f);
drawIpoints(img, ref_ipts);
showImage(img);
// Create a window
cvNamedWindow("OpenSURF", CV_WINDOW_AUTOSIZE );
// Main capture loop
while( true )
{
// Grab frame from the capture source
img = cvQueryFrame(capture);
// Detect and describe interest points in the frame
surfDetDes(img, ipts, false, 3, 4, 3, 0.004f);
// Fill match vector
getMatches(ipts,ref_ipts,matches);
// This call finds where the object corners should be in the frame
if (translateCorners(matches, src_corners, dst_corners))
{
// Draw box around object
for(int i = 0; i < 4; i++ )
{
CvPoint r1 = dst_corners[i%4];
CvPoint r2 = dst_corners[(i+1)%4];
cvLine( img, cvPoint(r1.x, r1.y),
cvPoint(r2.x, r2.y), cvScalar(255,255,255), 3 );
}
for (unsigned int i = 0; i < matches.size(); ++i)
drawIpoint(img, matches[i].first);
}
// Draw the FPS figure
drawFPS(img);
// Display the result
cvShowImage("OpenSURF", img);
// If ESC key pressed exit loop
if( (cvWaitKey(10) & 255) == 27 ) break;
}
// Release the capture device
cvReleaseCapture( &capture );
cvDestroyWindow( "OpenSURF" );
return 0;
}
开发者ID:dalinhuang,项目名称:iTRTest,代码行数:69,代码来源:main.cpp
示例5: main
int main()
{
// Initialize capturing live feed from the camera
CvCapture* capture = 0;
capture = cvCaptureFromCAM(0);
// Couldn't get a device? Throw an error and quit
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
// The two windows we'll be using
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the ball
IplImage* imgScribble = NULL;
// An infinite loop
while(true)
{
// Will hold a frame captured from the camera
IplImage* frame = 0;
frame = cvQueryFrame(capture);
// If we couldn't grab a frame... quit
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the yellow thresholded image (yellow = white, rest = black)
IplImage* imgYellowThresh = GetThresholdedImage(frame);
// Calculate the moments to estimate the position of the ball
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgYellowThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current ball positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
// Print it out for debugging purposes
printf("position (%d,%d)\n", posX, posY);
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
}
// Add the scribbling image and the frame... and we get a combination of the two
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgYellowThresh);
cvShowImage("video", frame);
// Wait for a keypress
int c = cvWaitKey(10);
if(c!=-1)
{
// If pressed, break out of the loop
break;
}
// Release the thresholded image... we need no memory leaks.. please
cvReleaseImage(&imgYellowThresh);
delete moments;
}
// We're done using the camera. Other applications can now use it
cvReleaseCapture(&capture);
return 0;
}
开发者ID:Alicg,项目名称:AI-Shack--Tracking-with-OpenCV,代码行数:93,代码来源:TrackColour.cpp
示例6: cvZero
CvRect SimpleHandDetector::get_feature(IplImage *img, FeatureData *feature)
{
IplImage* pFrame = img;
char* f = feature->ch;
#if DEBUG
cvZero(feature_img);
#endif
cvCvtColor(pFrame, YCrCb, CV_BGR2YCrCb);
cvInRangeS(YCrCb, lower, upper, skin);
//cvErode(skin,skin, 0, 1); //形态学滤波,除去噪声
//cvDilate(skin,skin, 0, 3);
cvSmooth(skin,skin);
//cvCopy(skin,hand);
cvClearMemStorage(storage);
CvSeq * contour = 0;
cvFindContours(skin, storage, &contour, sizeof (CvContour), CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));
if(contour == NULL)
return cvRect(-1,-1,-1,-1);
float max = fabs(cvContourArea(contour, CV_WHOLE_SEQ));
for (CvSeq* p = contour; p != NULL; p = p->h_next)
{
float now = fabs(cvContourArea(p, CV_WHOLE_SEQ));
if (now > max)
{
max = now;
contour = p;
}
}
CvRect rect = cvBoundingRect(contour, 0);
if (rect.width > STEP && rect.height > STEP)
{
//cvRectangle(hand, cvPoint(rect.x, rect.y), cvPoint(rect.x + rect.width, rect.y + rect.height), cvScalar(255, 255, 255), 3);
cvZero(hand);
cvDrawContours(hand, contour, CV_RGB(255, 255, 255), CV_RGB(255, 255, 255), 0, CV_FILLED,8, cvPoint(0, 0));
int w_s = rect.width / STEP;
int h_s = rect.height / STEP;
int w_off = rect.x;
int h_off = rect.y;
#if DEBUG
for(int s = 0;s <= STEP;s++)
{
cvLine(hand,cvPoint(rect.x,h_off),cvPoint(rect.x + rect.width,h_off),cvScalar(255),1);
h_off += h_s;
cvLine(hand,cvPoint(w_off,rect.y),cvPoint(w_off,rect.y + rect.height),cvScalar(255),1);
w_off += w_s;
}
#endif
w_s = rect.width / STEP;
h_s = rect.height / STEP;
int half = w_s * h_s;
for(int p = 0;p < STEP;p++)
{
for(int q = 0;q < STEP;q++)
{
int count = 0;
w_off = rect.x + q * w_s;
h_off = rect.y + p * h_s;
for(int y = 0;y < h_s;y++)
{
for(int x = 0;x < w_s;x++)
{
if(IMG8U(hand,w_off + x,h_off + y) == 255)
count++;
}
}
if((double)count / half > 0.5)
{
f[p * STEP + q] = '1';
#if DEBUG
cvSetImageROI(feature,cvRect(q * 100,p * 100,100,100));
cvSet(feature,cvScalar(255));
#endif
}else
f[p * STEP + q] = '0';
}
}
#if DEBUG
cvShowImage("hand",hand);
cvResetImageROI(feature_img);
cvShowImage("feature",feature_img);
cvWaitKey(10);
#endif
}else
rect = cvRect(-1,-1,-1,-1);
return rect;
/*char ch;
if((ch = cvWaitKey(10)) > 0)
{
if(is_train)
{
if(ch >= '0' && ch <= '9')
{
printf("%c:",ch);
//.........这里部分代码省略.........
开发者ID:xizhibei,项目名称:SmartHome,代码行数:101,代码来源:simplehanddetector.cpp
示例7: cvLine
void CvFunctionPlot::PlotLine(const CvPoint& Point1,const CvPoint& Point2,const CvScalar& PlotColour)
{
cvLine(Canvas,Point1,Point2, PlotColour,1,8,0);
}
开发者ID:ACAVJW4H,项目名称:Pedestrian_Counter,代码行数:4,代码来源:CvFunctionPlot.cpp
示例8: bw_track_blobs
void bw_track_blobs(StereoCluster *cluster)
{
int *free_blobs = (int *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));
double *d_epi = (double *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));
double *d_pred = (double *)calloc(cluster->data->NUM_OF_MARKERS,sizeof(int));
for(int k=0; k<cluster->data->NUM_OF_MARKERS; ++k)
{
free_blobs[k] = 1;
}
double nx,ny;
double lx,ly,lz;
double d_min;
double d_thresh = 5.0;
int id;
int d_cum;
double d;
CvPoint2D32f old_marker_pos;
/* compute distances between markers, if markers too close, no stable tracking possible */
for(int m=0; m<cluster->data->NUM_OF_MARKERS-1; ++m)
{
for(int n=m+1; n<cluster->data->NUM_OF_MARKERS; ++n)
{
d = sqrt( (cluster->tracker[1]->marker[m]->blob_pos.x-cluster->tracker[1]->marker[n]->blob_pos.x)*(cluster->tracker[1]->marker[m]->blob_pos.x-cluster->tracker[1]->marker[n]->blob_pos.x) +
(cluster->tracker[1]->marker[m]->blob_pos.y-cluster->tracker[1]->marker[n]->blob_pos.y)*(cluster->tracker[1]->marker[m]->blob_pos.y-cluster->tracker[1]->marker[n]->blob_pos.y) );
if(d<4.0)
{
cluster->tracker[1]->state = OFF_TRACK;
cluster->state = OFF_TRACK;
return;
}
}
}
for(int mc=0; mc<cluster->data->NUM_OF_MARKERS; ++mc)
{
/* read out pixel coordinates from color camera */
nx = cluster->tracker[0]->marker[mc]->pos_measured.x;
ny = cluster->tracker[0]->marker[mc]->pos_measured.y;
/* compute epipolar line for right image*/
lx = cluster->FundamentalMatrix[0][0]*nx + cluster->FundamentalMatrix[0][1]*ny + cluster->FundamentalMatrix[0][2];
ly = cluster->FundamentalMatrix[1][0]*nx + cluster->FundamentalMatrix[1][1]*ny + cluster->FundamentalMatrix[1][2];
lz = cluster->FundamentalMatrix[2][0]*nx + cluster->FundamentalMatrix[2][1]*ny + cluster->FundamentalMatrix[2][2];
lx /= ly;
lz /= ly;
ly = 1.0;
if(mc==0)
{
double x0,x1,y0,y1;
x0 = 0.0;
x1 = 779.0;
y0 = -(lx*x0+lz);
y1 = -(lx*x1+lz);
// printf("%f %f %f %f\n",x0,y0,x1,y1);
cvLine(cluster->tracker[1]->frame,cvPoint((int)x0,(int)y0),cvPoint((int)x1,(int)y1),cvScalarAll(255),1,CV_AA);
}
#if 0
if(cluster->tracker[1]->marker[mc]->pos_is_set)
{
old_marker_pos.x = cluster->tracker[1]->marker[mc]->pos_measured.x;
old_marker_pos.y = cluster->tracker[1]->marker[mc]->pos_measured.y;
// printf("m = (%f %f)\n",tracker->marker[nm]->pos_measured.x,tracker->marker[nm]->pos_measured.y);
if(cluster->tracker[1]->marker[mc]->vel_is_set)
{
/* if marker velocity is known predict new position */
cluster->tracker[1]->marker[mc]->pos_predicted.x = cluster->tracker[1]->marker[mc]->pos_measured.x + cluster->tracker[1]->marker[mc]->vel.x;
cluster->tracker[1]->marker[mc]->pos_predicted.y = cluster->tracker[1]->marker[mc]->pos_measured.y + cluster->tracker[1]->marker[mc]->vel.y;
}
else
{
/* otherwise take last known position for the center of the ROI as best guess */
cluster->tracker[1]->marker[mc]->pos_predicted.x = cluster->tracker[1]->marker[mc]->pos_measured.x;
cluster->tracker[1]->marker[mc]->pos_predicted.y = cluster->tracker[1]->marker[mc]->pos_measured.y;
}
for(int blob=0; blob<cluster->data->NUM_OF_MARKERS; ++blob)
{
/* distance of blob to epipolar line */
d_epi[blob] = abs( (lx*cluster->tracker[1]->marker[blob]->blob_pos.x + cluster->tracker[1]->marker[blob]->blob_pos.y + lz) / sqrt(1.0 + lx*lx) );
/*
//.........这里部分代码省略.........
开发者ID:caxenie,项目名称:ip-camera-overhead-tracker,代码行数:101,代码来源:ColorDiffTracker.cpp
示例9: main
void main(int argc, char** argv)
{
cvNamedWindow("src",0 );
cvNamedWindow("warp image",0 );
cvNamedWindow("warp image (grey)",0 );
cvNamedWindow("Smoothed warped gray",0 );
cvNamedWindow("threshold image",0 );
cvNamedWindow("canny",0 );
cvNamedWindow("final",1 );
CvPoint2D32f srcQuad[4], dstQuad[4];
CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
float Z=1;
dstQuad[0].x = 216; //src Top left
dstQuad[0].y = 15;
dstQuad[1].x = 392; //src Top right
dstQuad[1].y = 6;
dstQuad[2].x = 12; //src Bottom left
dstQuad[2].y = 187;
dstQuad[3].x = 620; //src Bot right
dstQuad[3].y = 159;
srcQuad[0].x = 100; //dst Top left
srcQuad[0].y = 120;
srcQuad[1].x = 540; //dst Top right
srcQuad[1].y = 120;
srcQuad[2].x = 100; //dst Bottom left
srcQuad[2].y = 360;
srcQuad[3].x = 540; //dst Bot right
srcQuad[3].y = 360;
cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
//CvCapture *capture = cvCaptureFromCAM(0);
/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
IplImage* image = cvRetrieveFrame(capture);
CvSize imgSize;
imgSize.width = image->width;
imgSize.height = image->height;
CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
int ik=0;
while(1)
{
//IplImage* img = cvQueryFrame(capture);
IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
cvShowImage( "src", img );
//cvWriteFrame(writer, img);
//cvSaveImage(nameGen(ik++), img, 0);
IplImage* warp_img = cvCloneImage(img);
CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
cvShowImage( "warp image", warp_img );
IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
cvShowImage( "warp image (grey)", grayimg );
cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
cvShowImage( "Smoothed warped gray", grayimg );
IplImage* thresholded_img=simplethreshold(grayimg, 220);
cvShowImage("threshold image",thresholded_img);
//grayimg = doCanny( thresholded_img, 50, 100, 3 );
grayimg = cvCloneImage(thresholded_img);
cvShowImage("canny",grayimg);
IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
CvMemStorage* line_storage=cvCreateMemStorage(0);
CvSeq* results = cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
double angle = 0.0, temp;
double lengthSqd, wSum=0;
double xc = 0, yc = 0;
for( int i = 0; i < results->total; i++ )
{
CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
wSum += 1;//lengthSqd;
if(line[0].y > line[1].y)
temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
else
temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
if(temp < 0)
angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
else
angle += (180/3.14*temp - 90)/* * lengthSqd*/;
xc += line[0].x + line[1].x;
yc += line[0].y + line[1].y;
}
angle=angle/wSum;
//angle+=10;
printf("total: %d, angle: % f\n", results->total, angle);
xc /= 2*results->total;
yc /= 2*results->total;
double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100; // 100 represents a very large slope (near vertical)
//.........这里部分代码省略.........
开发者ID:bhuvnesh-agarwal,项目名称:IGVC-2012,代码行数:101,代码来源:SafeZoneNav.cpp
示例10: cvRound
void Figure::DrawAxis(IplImage *output)
{
int bs = border_size;
int h = figure_size.height;
int w = figure_size.width;
// size of graph
int gh = h - bs * 2;
int gw = w - bs * 2;
// draw the horizontal and vertical axis
// let x, y axies cross at zero if possible.
float y_ref = y_min;
if ((y_max > 0) && (y_min <= 0))
y_ref = 0;
int x_axis_pos = h - bs - cvRound((y_ref - y_min) * y_scale);
cvLine(output, cvPoint(bs, x_axis_pos),
cvPoint(w - bs, x_axis_pos),
axis_color);
cvLine(output, cvPoint(bs, h - bs),
cvPoint(bs, h - bs - gh),
axis_color);
// Write the scale of the y axis
CvFont font;
cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.55,0.7, 0,1,CV_AA);
int chw = 6, chh = 10;
char text[16];
// y max
if ((y_max - y_ref) > 0.05 * (y_max - y_min))
{
snprintf(text, sizeof(text)-1, "%.1f", y_max);
cvPutText(output, text, cvPoint(bs / 5, bs - chh / 2), &font, text_color);
}
// y min
if ((y_ref - y_min) > 0.05 * (y_max - y_min))
{
snprintf(text, sizeof(text)-1, "%.1f", y_min);
cvPutText(output, text, cvPoint(bs / 5, h - bs + chh), &font, text_color);
}
// x axis
snprintf(text, sizeof(text)-1, "%.1f", y_ref);
cvPutText(output, text, cvPoint(bs / 5, x_axis_pos + chh / 2), &font, text_color);
// Write the scale of the x axis
snprintf(text, sizeof(text)-1, "%.0f", x_max );
cvPutText(output, text, cvPoint(w - bs - strlen(text) * chw, x_axis_pos + chh),
&font, text_color);
// x min
snprintf(text, sizeof(text)-1, "%.0f", x_min );
cvPutText(output, text, cvPoint(bs, x_axis_pos + chh),
&font, text_color);
}
开发者ID:7yadavrupesh,项目名称:imageprocessing,代码行数:61,代码来源:cvplot.cpp
示例11: main
int main(int argc, char** argv)
{
profile_name = (argc > 1 ? argv[1] : (char*)"blue_goal.yml");
int cam = (argc > 2 ? atoi(argv[2]) : 0);
// value loading
fs = cvOpenFileStorage(profile_name, 0, CV_STORAGE_READ, NULL);
Hmax = cvReadIntByName(fs, NULL, "Hmax", Hmax);
Smax = cvReadIntByName(fs, NULL, "Smax", Smax);
Vmax = cvReadIntByName(fs, NULL, "Vmax", Vmax);
Hmin = cvReadIntByName(fs, NULL, "Hmin", Hmin);
Smin = cvReadIntByName(fs, NULL, "Smin", Smin);
Vmin = cvReadIntByName(fs, NULL, "Vmin", Vmin);
minH = cvReadIntByName(fs, NULL, "minH", minH);
cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
cvNamedWindow("treshed", CV_WINDOW_AUTOSIZE);
cvNamedWindow("graph", CV_WINDOW_AUTOSIZE);
cvCreateTrackbar("Hmin", "treshed", &Hmin, 360, onTrack);
cvCreateTrackbar("Smin", "treshed", &Smin, 255, onTrack);
cvCreateTrackbar("Vmin", "treshed", &Vmin, 255, onTrack);
cvCreateTrackbar("Hmax", "treshed", &Hmax, 360, onTrack);
cvCreateTrackbar("Smax", "treshed", &Smax, 255, onTrack);
cvCreateTrackbar("Vmax", "treshed", &Vmax, 255, onTrack);
cvCreateTrackbar("minH", "treshed", &minH, 255, onTrack);
onTrack(0);
CvCapture* camera = cvCaptureFromCAM(cam);
while(1){
img = cvQueryFrame(camera);
allocateCvImage(&imgHSV, cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
allocateCvImage(&imgThreshed, cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, cvScalar(Hmin, Smin, Vmin, 0), cvScalar(Hmax,
Smax, Vmax, 0), imgThreshed);
cvErode(imgThreshed, imgThreshed, 0, 2);
int width = imgThreshed->width;
int height = imgThreshed->height;
int nchannels = imgThreshed->nChannels;
int step = imgThreshed->widthStep;
uchar* data = (uchar *)imgThreshed->imageData;
unsigned int graph[width];
int x,y;
for(x = 0; x < width ; x++)
graph[x] = 0;
int sum = 0, notnull = 0;
for(x = 0; x < width; x++){
for( y = 0 ; y < height ; y++ ) {
if(data[y*step + x*nchannels] == 255){
graph[x]++;
}
}
sum += graph[x];
if(graph[x] != 0)
notnull += 1;
// printf("%d\t%d\n", x, graph[x]);
}
if(notnull == 0)
notnull = 1;
int average = sum/notnull;
if(average == 0)
average = 1;
float pix = 12.0/average;
printf("\n sum: %d average: %d\n",sum,average);
int first = 0, last = 0;
// looking for goal
for(x = 0;x < width; x++){
if(graph[x] >= average && graph[x-1] < average){
cvLine(img, cvPoint(x, 0), cvPoint(x, height),
cvScalar(255, 255, 0, 0), 1, 0, 0);
if(first == 0)
first = x;
}
if(graph[x] >= average && graph[x+1] < average){
cvLine(img, cvPoint(x, 0), cvPoint(x, height),
cvScalar(255, 255, 0, 0), 1, 0, 0);
last = x;
}
//.........这里部分代码省略.........
开发者ID:xlcteam,项目名称:visy,代码行数:101,代码来源:ViSyLite.c
示例12: remove_border_ul
/*消除上下边界*/
void remove_border_ul(IplImage * img_plate)
{
int i = 0, j = 0;
/*这两个变量分别为上下边界的高度*/
int up_bound = -1, low_bound = -1;
int white_to_black = 0;
int black_to_white = 0;
/*从i从0 到高度一半进行遍历,进行投影,找到上边界*/
//cvNamedWindow("img_plate", 1);
//cvShowImage("img_plate", img_plate);
//cvWaitKey(0);
for (i = 0; i < (img_plate->height) / 2; i = i + 3) {
unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
white_to_black = 0;
black_to_white = 0;
/*记录下每一行的black_to_white和w_to_b的个数*/
for (j = 0; j < img_plate->width; j = j + 3) {
if (prow[j] == 0 && prow[j + 3] == 255) {
black_to_white++;
} else if (prow[j] == 255 && prow[j + 3] == 0) {
white_to_black++;
}
}
/*设成6的话对图片的清晰度有很高的要求*/
if (black_to_white >= 6 && white_to_black >= 6 && up_bound < 0) {
up_bound = i;
} else if (black_to_white < 6 && white_to_black < 6 && up_bound > 0) {
// printf("black_to_white : %d whilte_to_black: %d , up_bound %d\n",black_to_white, white_to_black, up_bound);
up_bound = -1;
}
}
/*i从最底端到高度的一半进行遍历*/
for (i = img_plate->height - 1; i > (img_plate->height) / 2; i = i - 3) {
unsigned char * prow = (unsigned char *)(img_plate->imageData + i * img_plate->widthStep);
white_to_black = 0;
black_to_white = 0;
/*记录下每一行的black_to_white和w_to_b的个数*/
for (j = 0; j < img_plate->width; j = j + 3) {
if (prow[j] == 0 && prow[j + 3] == 255) {
black_to_white++;
} else if (prow[j] == 255 && prow[j + 3] == 0) {
white_to_black++;
}
}
if (black_to_white >= 6 && white_to_black >= 6 && low_bound < 0) {
low_bound = i;
} else if (black_to_white < 6 && white_to_black < 6 && low_bound > 0) {
low_bound = -1;
}
//printf("%d\n", low_bound);
}
#if 0
cvNamedWindow("img", 1);
printf("up_bound is %d, low_bound is %d\n", up_bound, low_bound);
/*画直线操作*/
/* void cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, CvScalar color, int thickness=1, int line_type=8, int shift=0 );*/
cvLine(img_plate, cvPoint(0, up_bound), cvPoint(img_plate->width - 3, up_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
cvLine(img_plate, cvPoint(0, low_bound), cvPoint(img_plate->width - 3, low_bound), CV_RGB(0xbF, 0xfd, 0xba), 3, 8, 0);
cvShowImage("img", img_plate);
cvWaitKey(0);
#endif
/*这里容易出错!*/
//printf("%d %d %d %d", 0, up_bound, img_plate->width - 2, low_bound - up_bound - 2);
// printf("low_bound:%d up_bound:%d\n", low_bound, up_bound);
assert(low_bound >= 0 && up_bound >= 0);
cvSetImageROI(img_plate, cvRect(0, up_bound, img_plate->width - 2, low_bound - up_bound - 2)); /*-2保证不要越界*/
IplImage * tmp_img = cvCreateImage(cvSize(img_plate->width - 2, low_bound - up_bound - 2), img_plate->depth, img_plate->nChannels);
cvCopy(img_plate, tmp_img);
cvSaveImage("image/img_after_border_removed.bmp", tmp_img);
cvResetImageROI(img_plate);
// printf("setROI in remove bound success\n");
}
开发者ID:XianB,项目名称:youyanQT,代码行数:84,代码来源:get_character.cpp
示例13: LOGD
//============================================================================
void AAM_TDM::SaveSeriesTemplate(const CvMat* AllTextures, const AAM_PAW& m_warp)
{
LOGD("Saving the face template image...\n");
AAM_Common::MkDir("registration");
AAM_Common::MkDir("Modes");
AAM_Common::MkDir("Tri");
char filename[100];
int i;
for(i = 0; i < AllTextures->rows; i++)
{
CvMat oneTexture;
cvGetRow(AllTextures, &oneTexture, i);
sprintf(filename, "registration/%d.jpg", i);
m_warp.SaveWarpTextureToImage(filename, &oneTexture);
}
for(int nmodes = 0; nmodes < nModes(); nmodes++)
{
CvMat oneVar;
cvGetRow(__TextureEigenVectors, &oneVar, nmodes);
sprintf(filename, "Modes/A%03d.jpg", nmodes+1);
m_warp.SaveWarpTextureToImage(filename, &oneVar);
}
IplImage* templateimg = cvCreateImage
(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);
IplImage* convexImage = cvCreateImage
(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);
IplImage* TriImage = cvCreateImage
(cvSize(m_warp.Width(), m_warp.Height()), IPL_DEPTH_8U, 3);
m_warp.SaveWarpTextureToImage("Modes/Template.jpg", __MeanTexture);
m_warp.TextureToImage(templateimg, __MeanTexture);
cvSetZero(convexImage);
for(i = 0; i < m_warp.nTri(); i++)
{
CvPoint p, q;
int ind1, ind2;
cvCopy(templateimg, TriImage);
ind1 = m_warp.Tri(i, 0); ind2 = m_warp.Tri(i, 1);
p = cvPointFrom32f(m_warp.Vertex(ind1));
q = cvPointFrom32f(m_warp.Vertex(ind2));
cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
cvLine(convexImage, p, q, CV_RGB(255, 255, 255));
ind1 = m_warp.Tri(i, 1); ind2 = m_warp.Tri(i, 2);
p = cvPointFrom32f(m_warp.Vertex(ind1));
q = cvPointFrom32f(m_warp.Vertex(ind2));
cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
cvLine(convexImage, p, q, CV_RGB(255, 255, 255));
ind1 = m_warp.Tri(i, 2); ind2 = m_warp.Tri(i, 0);
p = cvPointFrom32f(m_warp.Vertex(ind1));
q = cvPointFrom32f(m_warp.Vertex(ind2));
cvLine(TriImage, p, q, CV_RGB(255, 255, 255));
cvLine(convexImage, p, q, CV_RGB(255, 255, 255));
sprintf(filename, "Tri/%03i.jpg", i+1);
cvSaveImage(filename, TriImage);
}
cvSaveImage("Tri/convex.jpg", convexImage);
cvReleaseImage(&templateimg);
cvReleaseImage(&convexImage);
cvReleaseImage(&TriImage);
}
开发者ID:2php,项目名称:aamlibrary,代码行数:72,代码来源:AAM_TDM.cpp
示例14: process_image
void process_image()
{
int i, j;
int *inliers_index;
CvSize ellipse_axis;
CvPoint gaze_point;
static int lost_frame_num = 0;
Grab_Camera_Frames();
cvZero(ellipse_image);
cvSmooth(eye_image, eye_image, CV_GAUSSIAN, 5, 5);
Reduce_Line_Noise(eye_image);
if (save_image == 1) {
printf("save image %d\n", image_no);
sprintf(eye_file, "./Eye/Eye_%05d.jpg", image_no);
image_no++;
cvSaveImage(eye_file, eye_image);
}
//corneal reflection
remove_corneal_reflection(eye_image, threshold_image, (int)start_point.x, (int)start_point.y, cr_window_size,
(int)eye_image->height/10, corneal_reflection.x, corneal_reflection.y, corneal_reflection_r);
printf("corneal reflection: (%d, %d)\n", corneal_reflection.x, corneal_reflection.y);
Draw_Cross(ellipse_image, corneal_reflection.x, corneal_reflection.y, 15, 15, Yellow);
//starburst pupil contour detection
starburst_pupil_contour_detection((UINT8*)eye_image->imageData, eye_image->width, eye_image->height,
edge_threshold, rays, min_feature_candidates);
inliers_num = 0;
inliers_index = pupil_fitting_inliers((UINT8*)eye_image->imageData, eye_image->width, eye_image->height, inliers_num);
ellipse_axis.width = (int)pupil_param[0];
ellipse_axis.height = (int)pupil_param[1];
pupil.x = (int)pupil_param[2];
pupil.y = (int)pupil_param[3];
Draw_Cross(ellipse_image, pupil.x, pupil.y, 15, 15, Red);
cvLine(eye_image, pupil, corneal_reflection, Red, 4, 8);
cvLine(ellipse_image, pupil, corneal_reflection, Red, 4, 8);
printf("ellipse a:%lf; b:%lf, cx:%lf, cy:%lf, theta:%lf; inliers_num:%d\n\n",
pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4], inliers_num);
bool is_inliers = 0;
for (int i = 0; i < edge_point.size(); i++) {
is_inliers = 0;
for (int j = 0; j < inliers_num; j++) {
if (i == inliers_index[j])
is_inliers = 1;
}
stuDPoint *edge = edge_point.at(i);
if (is_inliers)
Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 5, 5, Green);
else
Draw_Cross(ellipse_image, (int)edge->x,(int)edge->y, 3, 3, Yellow);
}
free(inliers_index);
if (ellipse_axis.width > 0 && ellipse_axis.height > 0) {
start_point.x = pupil.x;
start_point.y = pupil.y;
//printf("start_point: %d,%d\n", start_point.x, start_point.y);
Draw_Cross(eye_image, pupil.x, pupil.y, 10, 10, Green);
cvEllipse(eye_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Red, 2);
cvEllipse(ellipse_image, pupil, ellipse_axis, -pupil_param[4]*180/PI, 0, 360, Green, 2);
diff_vector.x = pupil.x - corneal_reflection.x;
diff_vector.y = pupil.y - corneal_reflection.y;
if (do_map2scene) {
gaze_point = homography_map_point(diff_vector);
printf("gaze_point: (%d,%d)\n", gaze_point.x, gaze_point.y);
Draw_Cross(scene_image, gaze_point.x, gaze_point.y, 60, 60, Red);
}
lost_frame_num = 0;
} else {
lost_frame_num++;
}
if (lost_frame_num > 5) {
start_point.x = FRAMEW/2;
start_point.y = FRAMEH/2;
}
Draw_Cross(ellipse_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue);
Draw_Cross(eye_image, (int)start_point.x, (int)start_point.y, 7, 7, Blue);
if (save_ellipse == 1) {
printf("save ellipse %d\n", ellipse_no);
sprintf(ellipse_file, "./Ellipse/Ellipse_%05d.jpg", ellipse_no);
ellipse_no++;
cvSaveImage(ellipse_file, ellipse_image);
fprintf(ellipse_log, "%.3f\t %8.2lf %8.2lf %8.2lf %8.2lf %8.2lf\n",
Time_Elapsed(), pupil_param[0], pupil_param[1], pupil_param[2], pupil_param[3], pupil_param[4]);
}
printf("Time elapsed: %.3f\n", Time_Elapsed());
fprintf(logfile,"%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
Time_Elapsed(),
pupil.x,
pupil.y,
corneal_reflection.x,
corneal_reflection.y,
//.........这里部分代码省略.........
开发者ID:downbeat,项目名称:senseye,代码行数:101,代码来源:cvEyeTracker.c
示例15: drawFloatGraph
// Draw the graph of an array of floats into imageDst or a new image, between minV & maxV if given.
// Remember to free the newly created image if imageDst is not given.
IplImage* drawFloatGraph(const float *arraySrc, int nArrayLength, IplImage *imageDst, float minV, float maxV, int width, int height, char *graphLabel, bool showScale)
{
int w = width;
int h = height;
int b = 10; // border around graph within the image
if (w <= 20)
w = nArrayLength + b*2; // width of the image
if (h <= 20)
h = 220;
int s = h - b*2;// size of graph height
float xscale = 1.0;
if (nArrayLength > 1)
xscale = (w - b*2) / (float)(nArrayLength-1); // horizontal scale
IplImage *imageGraph; // output image
// Get the desired image to draw into.
if (!imageDst) {
// Create an RGB image for graphing the data
imageGraph = cvCreateImage(cvSize(w,h), 8, 3);
// Clear the image
cvSet(imageGraph, WHITE);
}
else {
// Draw onto the given image.
imageGraph = imageDst;
}
if (!imageGraph) {
std::cerr << "ERROR in drawFloatGraph(): Couldn't create image of " << w << " x " << h << std::endl;
exit(1);
}
CvScalar colorGraph = getGraphColor(); // use a different color each time.
// If the user didnt supply min & mav values, find them from the data, so we can draw it at full scale.
if (fabs(minV) < 0.0000001f && fabs(maxV) < 0.0000001f) {
for (int i=0; i<nArrayLength; i++) {
float v = (float)arraySrc[i];
if (v < minV)
minV = v;
if (v > maxV)
maxV = v;
}
}
float diffV = maxV - minV;
if (diffV == 0)
diffV = 0.00000001f; // Stop a divide-by-zero error
float fscale = (float)s / diffV;
// Draw the horizontal & vertical axis
int y0 = cvRound(minV*fscale);
cvLine(imageGraph, cvPoint(b,h-(b-y0)), cvPoint(w-b, h-(b-y0)), BLACK);
cvLine(imageGraph, cvPoint(b,h-(b)), cvPoint(b, h-(b+s)), BLACK);
// Write the scale of the y axis
CvFont font;
cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.55,0.7, 0,1,CV_AA); // For OpenCV 1.1
if (showScale) {
//cvInitFont(&font,CV_FONT_HERSHEY_PLAIN,0.5,0.6, 0,1, CV_AA); // For OpenCV 2.0
CvScalar clr = GREY;
char text[16];
sprintf_s(text, sizeof(text)-1, "%.1f", maxV);
cvPutText(imageGraph, text, cvPoint(1, b+4), &font, clr);
// Write the scale of the x axis
sprintf_s(text, sizeof(text)-1, "%d", (nArrayLength-1) );
cvPutText(imageGraph, text, cvPoint(w-b+4-5*strlen(text), (h/2)+10), &font, clr);
}
// Draw the values
CvPoint ptPrev = cvPoint(b,h-(b-y0)); // Start the lines at the 1st point.
for (int i=0; i<nArrayLength; i++) {
int y = cvRound((arraySrc[i] - minV) * fscale); // Get the values at a bigger scale
int x = cvRound(i * xscale);
CvPoint ptNew = cvPoint(b+x, h-(b+y));
cvLine(imageGraph, ptPrev, ptNew, colorGraph, 1, CV_AA); // Draw a line from the previous point to the new point
ptPrev = ptNew;
}
|
请发表评论