本文整理汇总了C++中cvRound函数的典型用法代码示例。如果您正苦于以下问题:C++ cvRound函数的具体用法?C++ cvRound怎么用?C++ cvRound使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvRound函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: icvTestSeqReadElemOne
//.........这里部分代码省略.........
newy *= p->pImg->height-1;
}
pT->T[2] = -(pT->T[0]*newx+pT->T[1]*newy);
pT->T[5] = -(pT->T[3]*newx+pT->T[4]*newy);
}
} /* Modify transformation old. */
} /* Next record. */
/* Initialize frame number array: */
KeyFrames[0] = FirstFrame;
if(pTransSeq&&KeyFrameNum>1)
{
int i0,i1;
for(int i=0; i<KeyFrameNum; ++i)
{
CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i);
KeyFrames[i] = cvReadIntByName(fs,pTN,"frame",-1);
}
if(KeyFrames[0]<0)KeyFrames[0]=FirstFrame;
if(KeyFrames[KeyFrameNum-1]<0)KeyFrames[KeyFrameNum-1]=LastFrame;
for(i0=0, i1=1; i1<KeyFrameNum;)
{
for(i1=i0+1; i1<KeyFrameNum && KeyFrames[i1]<0; i1++) {}
assert(i1<KeyFrameNum);
assert(i1>i0);
for(int i=i0+1; i<i1; ++i)
{
KeyFrames[i] = cvRound(KeyFrames[i0] + (float)(i-i0)*(float)(KeyFrames[i1] - KeyFrames[i0])/(float)(i1-i0));
}
i0 = i1;
i1++;
} /* Next key run. */
} /* Initialize frame number array. */
if(pTransNode || pTransSeq)
{ /* More complex transform. */
int param;
CvFileNode* pTN = pTransSeq?(CvFileNode*)cvGetSeqElem(pTransSeq,0):pTransNode;
for(p=pElem; p; p=p->next)
{
//int trans_num = p->TransNum;
for(param=0; param_name[param]; ++param)
{
const char* name = param_name[param];
float defv = param_defval[param];
if(KeyFrameNum==1)
{ /* Only one transform record: */
int i;
double val;
CvFileNode* fnode = cvGetFileNodeByName( fs, pTN,name);
if(fnode == NULL) continue;
val = cvReadReal(fnode,defv);
for(i=0; i<p->TransNum; ++i)
{
icvUpdateTrans(
p->pTrans+i, param, val,
p->pImg?(float)(p->pImg->width-1):1.0f,
p->pImg?(float)(p->pImg->height-1):1.0f);
开发者ID:DevShah,项目名称:18551,代码行数:67,代码来源:testseq.cpp
示例2: toCircleRad
int toCircleRad(const double dRad)
{
return clip<int>(cvRound(dRad), 1, 255);
}
开发者ID:JustineSurGithub,项目名称:tom-cv,代码行数:4,代码来源:imageUtil.cpp
示例3: frame_gray
std::vector<cv::Vec3f> CircularSampleAreaDetector::detect(cv::Mat frame) {
// Convert the image to grayscale
cv::Mat frame_gray(frame);
cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);
// cv::cvtColor(frame, frame_gray, CV_BGR2HSV);
// std::vector<cv::Mat> channels;
// cv::split(frame_gray, channels);
// frame_gray = channels[2];
// Blur to remove extraneous detail before edge detection
// cv::medianBlur(frame_gray, frame_gray, 9);
// cv::blur(frame_gray, frame_gray, cv::Size(3, 3));
cv::GaussianBlur(frame_gray, frame_gray, cv::Size(9, 9), 2, 2);
// cv::imshow("blur_win", frame_gray);
// Edge detection
// cv::adaptiveThreshold(frame_gray, frame_gray, 255, cv::ADAPTIVE_THRESH_MEAN_C, cv::THRESH_BINARY, 11, 1);
cv::Mat frame_canny;
// int erosion_size = 2;
// cv::Mat element = getStructuringElement(cv::MORPH_ELLIPSE,
// cv::Size( 2*erosion_size + 1, 2*erosion_size+1),
// cv::Point( erosion_size, erosion_size ));
// cv::dilate(frame_gray, frame_gray, element );
// cv::erode(frame_gray, frame_gray, element );
// cv::Canny(frame_gray, frame_canny, 5, 50);
// cv::imshow("canny_win", frame_canny);
// Extract circle features
std::vector<cv::Vec3f> circles;
// HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT, 1, 50, 50, 40, 0, 0);
HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
2, // inverse resolution ratio
50, // min dist between circle centers
50, // canny upper threshold
150, // center detection threshold
0, // min radius
0 // max radius
);
// HoughCircles(frame_gray, circles, CV_HOUGH_GRADIENT,
// 1, // inverse resolution ratio
// 50, // min dist between circle centers
// 50, // canny upper threshold
// 50, // center detection threshold
// 0, // min radius
// 0 // max radius
// );
// Of the circles found, pick the one closest to the center of the frame
// TODO: This is not the best way to do this. Research probabilistic methods?
cv::Point frame_center(frame_gray.cols / 2, frame_gray.rows / 2);
std::vector<cv::Vec3f> good_circles;
for(size_t i = 0; i < circles.size(); i++) {
cv::Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = circles[i][2];
// Ensure circle is entirely in screen
if(center.x - radius < 0 || center.x + radius > frame_gray.cols
|| center.y - radius < 0 || center.y + radius > frame_gray.rows) {
continue;
}
good_circles.push_back(cv::Vec3f(circles[i][0], circles[i][1], circles[i][2] * CIRCLE_SHRINK_FACTOR));
}
return good_circles;
}
开发者ID:RemchoResearchGroup,项目名称:OccuChrome-GitHub,代码行数:71,代码来源:CircularSampleAreaDetector.cpp
示例4: main
//.........这里部分代码省略.........
imagePoints[0][i][j].y * lines[1][j][1] +
lines[1][j][2]) +
std::fabs(imagePoints[1][i][j].x * lines[0][j][0] +
imagePoints[1][i][j].y * lines[0][j][1] +
lines[0][j][2]);
err += errij;
}
npoints += size;
}
std::cout << "average reprojection error = " << err / npoints << std::endl;
cv::Mat R1, R2, P1, P2, Q;
cv::Rect validROI[2];
stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1],
distCoeffs[1], lefts[0].size(), R, T, R1, R2, P1, P2, Q,
cv::CALIB_ZERO_DISPARITY, 1, lefts[0].size(),
&validROI[0], &validROI[1]);
{
cv::FileStorage fs(FLAGS_intrinsics.c_str(), cv::FileStorage::WRITE);
if (fs.isOpened()) {
fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0]
<< "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
fs.release();
}
}
cv::Mat rmap[2][2];
cv::initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1,
lefts[0].size(),
CV_16SC2,
rmap[0][0], rmap[0][1]);
cv::initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2,
lefts[0].size(),
CV_16SC2,
rmap[1][0], rmap[1][1]);
{
cv::FileStorage fs(FLAGS_extrinsics.c_str(), cv::FileStorage::WRITE);
if (fs.isOpened()) {
fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2
<< "P1" << P1 << "P2" << P2 << "Q" << Q
<< "V1" << validROI[0] << "V2" << validROI[1];
fs.release();
}
}
cv::Mat canvas;
double sf;
int w, h;
sf = 600. / MAX(lefts[0].size().width, lefts[0].size().height);
w = cvRound(lefts[0].size().width * sf);
h = cvRound(lefts[0].size().height * sf);
canvas.create(h, w * 2, CV_8UC3);
cv::namedWindow("Rectified", CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
for (int i = 0; i < FLAGS_size; i++) {
for (int k = 0; k < 2; k++) {
if (k == 0) {
cv::Mat img = lefts[i].clone(), rimg, cimg;
cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cv::cvtColor(rimg, cimg, CV_GRAY2BGR);
cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
cv::Rect vroi(cvRound(validROI[k].x * sf),
cvRound(validROI[k].y * sf),
cvRound(validROI[k].width * sf),
cvRound(validROI[k].height * sf));
cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
} else {
cv::Mat img = rights[i].clone(), rimg, cimg;
cv::remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
cvtColor(rimg, cimg, CV_GRAY2BGR);
cv::Mat canvasPart = canvas(cv::Rect(w * k, 0, w, h));
cv::resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
cv::Rect vroi(cvRound(validROI[k].x * sf),
cvRound(validROI[k].y * sf),
cvRound(validROI[k].width * sf),
cvRound(validROI[k].height * sf));
cv::rectangle(canvasPart, vroi, cv::Scalar(0, 0, 255), 3, 8);
}
}
for (int j = 0; j < canvas.rows; j += 16)
cv::line(canvas, cv::Point(0, j), cv::Point(canvas.cols, j),
cv::Scalar(0, 255, 0), 1, 8);
cv::imshow("Rectified", canvas);
if (cv::waitKey(0) == 'q')
break;
}
cv::destroyAllWindows();
return 0;
}
开发者ID:aleksandaratanasov,项目名称:rgbd-grabber,代码行数:101,代码来源:StereoCameraCalibration.cpp
示例5: Run
void Run()
{
int w, h;
IplImage *pCapImage;
PBYTE pCapBuffer = NULL;
// Create camera instance
_cam = CLEyeCreateCamera(_cameraGUID, _mode, _resolution, _fps);
if(_cam == NULL) return;
// Get camera frame dimensions
CLEyeCameraGetFrameDimensions(_cam, w, h);
// Depending on color mode chosen, create the appropriate OpenCV image
if(_mode == CLEYE_COLOR_PROCESSED || _mode == CLEYE_COLOR_RAW)
pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 4);
else
pCapImage = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
// Set some camera parameters
//CLEyeSetCameraParameter(_cam, CLEYE_GAIN, 30);
//CLEyeSetCameraParameter(_cam, CLEYE_EXPOSURE, 500);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_EXPOSURE, false);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_GAIN, false);
//CLEyeSetCameraParameter(_cam, CLEYE_AUTO_WHITEBALANCE, false);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_RED, 100);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_BLUE, 200);
//CLEyeSetCameraParameter(_cam, CLEYE_WHITEBALANCE_GREEN, 200);
// Start capturing
CLEyeCameraStart(_cam);
CvMemStorage* storage = cvCreateMemStorage(0);
IplImage* hsv_frame = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 3);
IplImage* thresholded = cvCreateImage(cvSize(pCapImage->width, pCapImage->height), IPL_DEPTH_8U, 1);
IplImage* temp = cvCreateImage(cvSize(pCapImage->width >> 1, pCapImage->height >> 1), IPL_DEPTH_8U, 3);
// Create a window in which the captured images will be presented
cvNamedWindow( "Camera" , CV_WINDOW_AUTOSIZE );
cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );
//int hl = 100, hu = 115, sl = 95, su = 135, vl = 115, vu = 200;
int hl = 5, hu = 75, sl = 40, su = 245, vl = 105, vu = 175;
// image capturing loop
while(_running)
{
// Detect a red ball
CvScalar hsv_min = cvScalar(hl, sl, vl, 0);
CvScalar hsv_max = cvScalar(hu, su, vu, 0);
cvGetImageRawData(pCapImage, &pCapBuffer);
CLEyeCameraGetFrame(_cam, pCapBuffer);
cvConvertImage(pCapImage, hsv_frame);
// Get one frame
if( !pCapImage )
{
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
// Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
cvCvtColor(pCapImage, hsv_frame, CV_RGB2HSV);
// Filter out colors which are out of range.
cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
// Memory for hough circles
CvMemStorage* storage = cvCreateMemStorage(0);
// hough detector works better with some smoothing of the image
cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
thresholded->height/4, 100, 50, 10, 400);
for (int i = 0; i < circles->total; i++)
{
float* p = (float*)cvGetSeqElem( circles, i );
//printf("Ball! x=%f y=%f r=%f\n\r",p[0],p[1],p[2] );
cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
3, CV_RGB(0,255,0), -1, 8, 0 );
cvCircle( pCapImage, cvPoint(cvRound(p[0]),cvRound(p[1])),
cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
}
cvShowImage( "Camera", pCapImage ); // Original stream with detected ball overlay
cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
cvShowImage( "EdgeDetection", thresholded ); // The stream after color filtering
cvReleaseMemStorage(&storage);
//.........这里部分代码省略.........
开发者ID:JuannyWang,项目名称:CVTrack,代码行数:101,代码来源:TennisTrack2.cpp
示例6: cvRound
template<> inline int saturate_cast<int>(double v) { return cvRound(v); }
开发者ID:Alegzandra,项目名称:opencv,代码行数:1,代码来源:base.hpp
示例7: cvSize
//
// Transform
// Transform the sample 'in place'
//
HRESULT CKalmTrack::Transform(IMediaSample *pSample)
{
BYTE* pData;
CvImage image;
pSample->GetPointer(&pData);
AM_MEDIA_TYPE* pType = &m_pInput->CurrentMediaType();
VIDEOINFOHEADER *pvi = (VIDEOINFOHEADER *) pType->pbFormat;
// Get the image properties from the BITMAPINFOHEADER
CvSize size = cvSize( pvi->bmiHeader.biWidth, pvi->bmiHeader.biHeight );
int stride = (size.width * 3 + 3) & -4;
cvInitImageHeader( &image, size, IPL_DEPTH_8U, 3, IPL_ORIGIN_TL, 4 );
cvSetImageData( &image, pData,stride );
if(IsTracking == false)
{
if(IsInit == false)
{
CvPoint p1, p2;
// Draw box
p1.x = cvRound( size.width * m_params.x );
p1.y = cvRound( size.height * m_params.y );
p2.x = cvRound( size.width * (m_params.x + m_params.width));
p2.y = cvRound( size.height * (m_params.y + m_params.height));
CheckBackProject( &image );
cvRectangle( &image, p1, p2, -1, 1 );
}
else
{
m_object.x = cvRound( size.width * m_params.x );
m_object.y = cvRound( size.height * m_params.y );
m_object.width = cvRound( size.width * m_params.width );
m_object.height = cvRound( size.height * m_params.height );
ApplyCamShift( &image, true );
CheckBackProject( &image );
IsTracking = true;
}
}
else
{
cvKalmanUpdateByTime(Kalman);
m_object.x = cvRound( Kalman->PriorState[0]-m_object.width*0.5);
m_object.y = cvRound( Kalman->PriorState[2]-m_object.height*0.5 );
ApplyCamShift( &image, false );
CheckBackProject( &image );
cvRectangle( &image,
cvPoint( m_object.x, m_object.y ),
cvPoint( m_object.x + m_object.width, m_object.y + m_object.height ),
-1, 1 );
Rectang(&image,m_Indicat1,-1);
m_X.x = 10;
m_X.y = 10;
m_X.width=50*m_Old.x/size.width;
m_X.height =10;
Rectang(&image,m_X,CV_RGB(0,0,255));
m_Y.x = 10;
m_Y.y = 10;
m_Y.width=10;
m_Y.height = 50*m_Old.y/size.height;
Rectang(&image,m_Y,CV_RGB(255,0,0));
m_Indicat2.x = 0;
m_Indicat2.y = size.height-50;
m_Indicat2.width = 50;
m_Indicat2.height = 50;
Rectang(&image,m_Indicat2,-1);
float Norm = cvSqrt(Measurement[1]*Measurement[1]+Measurement[3]*Measurement[3]);
int VXNorm = (fabs(Measurement[1])>5)?(int)(12*Measurement[1]/Norm):0;
int VYNorm = (fabs(Measurement[3])>5)?(int)(12*Measurement[3]/Norm):0;
CvPoint pp1 = {25,size.height-25};
CvPoint pp2 = {25+VXNorm,size.height-25+VYNorm};
cvLine(&image,pp1,pp2,CV_RGB(0,0,0),3);
/*CvPoint pp1 = {25,size.height-25};
double angle = atan2( Measurement[3], Measurement[1] );
CvPoint pp2 = {cvRound(25+12*cos(angle)),cvRound(size.height-25-12*sin(angle))};
cvLine(&image,pp1,pp2,0,3);*/
}
cvSetImageData( &image, 0, 0 );
return NOERROR;
} // Transform
开发者ID:JackJone,项目名称:opencv,代码行数:97,代码来源:Kalman.cpp
示例8: cvCloneImage
// This function is copied from http://mehrez.kristou.org/opencv-change-contrast-and-brightness-of-an-image/
boost::shared_ptr< Image > Image::ContrastBrightness( int contrast, int brightness ) const
{
if(contrast > 100) contrast = 100;
if(contrast < -100) contrast = -100;
if(brightness > 100) brightness = 100;
if(brightness < -100) brightness = -100;
uchar lut[256];
CvMat* lut_mat;
int hist_size = 256;
float range_0[]={0,256};
float* ranges[] = { range_0 };
int i;
IplImage * dest = cvCloneImage(this);
IplImage * GRAY;
if (this->nChannels == 3)
{
GRAY = cvCreateImage(cvGetSize(this),this->depth,1);
cvCvtColor(this,GRAY,CV_RGB2GRAY);
}
else
{
GRAY = cvCloneImage(this);
}
lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 );
cvSetData( lut_mat, lut, 0 );
/*
* The algorithm is by Werner D. Streidt
* (http://visca.com/ffactory/archives/5-99/msg00021.html)
*/
if( contrast > 0 )
{
double delta = 127.* contrast/100;
double a = 255./(255. - delta*2);
double b = a*(brightness - delta);
for( i = 0; i < 256; i++ )
{
int v = cvRound(a*i + b);
if( v < 0 )
v = 0;
if( v > 255 )
v = 255;
lut[i] = v;
}
}
else
{
double delta = -128.* contrast/100;
double a = (256.-delta*2)/255.;
double b = a* brightness + delta;
for( i = 0; i < 256; i++ )
{
int v = cvRound(a*i + b);
if( v < 0 )
v = 0;
if( v > 255 )
v = 255;
lut[i] = v;
}
}
if (this->nChannels ==3)
{
IplImage * R = cvCreateImage(cvGetSize(this),this->depth,1);
IplImage * G = cvCreateImage(cvGetSize(this),this->depth,1);
IplImage * B = cvCreateImage(cvGetSize(this),this->depth,1);
cvCvtPixToPlane(this,R,G,B,NULL);
cvLUT( R, R, lut_mat );
cvLUT( G, G, lut_mat );
cvLUT( B, B, lut_mat );
cvCvtPlaneToPix(R,G,B,NULL,dest);
cvReleaseImage(&R);
cvReleaseImage(&G);
cvReleaseImage(&B);
}
else
{
cvLUT( GRAY, dest, lut_mat );
}
cvReleaseImage(&GRAY);
cvReleaseMat( &lut_mat);
return boost::shared_ptr< Image >( new Image( dest, true ) );
}
开发者ID:Killerregenwurm,项目名称:utvision,代码行数:89,代码来源:Image.cpp
示例9: process_image
void process_image(){
///////////////////////////////////////////////////////
//////////////////// PUPIL/////////////////////////////
///////////////////////////////////////////////////////
int numBins = 256;
float range[] = {0, 255};
float *ranges[] = { range };
CvHistogram *hist = cvCreateHist(1, &numBins, CV_HIST_ARRAY, ranges, 1);
cvClearHist(hist);
cvCalcHist(&smooth, hist, 0, 0);
IplImage* imgHist = DrawHistogram(hist,1,1);
cvClearHist(hist);
//cvShowImage("hist", imgHist);
cvThreshold(smooth,pupil,50,255,CV_THRESH_BINARY);
//cvShowImage( "pupi_binary",pupil);
cvCanny(pupil,pedge,40,50);
//cvShowImage( "pupil_edge",pedge);
//////////////////////////////////////////////////////////
//////////////////////IRIS////////////////////////////////
//////////////////////////////////////////////////////////
//cvEqualizeHist(smooth,smooth);
//cvShowImage("Equalized",smooth);
cvThreshold(smooth,iris,100,255,CV_THRESH_BINARY); //115
//cvShowImage( "iris_binary",iris);
//cvSobel(iris,iedge,1,0,3);
cvCanny(iris,iedge,1,255);
//cvShowImage( "iris_edge",iedge);
/////////////////////////////////////////////////////////
///////////////////////Eyelids///////////////////////////
/////////////////////////////////////////////////////////
cvThreshold(smooth,eyelid_mask,150,255,CV_THRESH_OTSU);
cvNot(eyelid_mask,eyelid_mask);
//cvShowImage("eyelid",eyelid_mask);
//cvAdaptiveThreshold(smooth,contour,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,1);
//cvThreshold(smooth,contour,130,255,CV_THRESH_BINARY);
//cvShowImage( "contour",contour);
//CvSeq* firstContour = NULL;
//CvMemStorage* cstorage = cvCreateMemStorage(0);
//cvFindContours(con, cstorage, &firstContour,sizeof(CvContour), CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
//cvDrawContours(dst,firstContour,CV_RGB(0,255,0),CV_RGB(0,0,255),10,2,8);
CvMemStorage* storage_pupil = cvCreateMemStorage(0);
CvSeq* presults = cvHoughCircles(pedge,storage_pupil,CV_HOUGH_GRADIENT,2,src->width,255,1);
for( int i = 0; i < presults->total; i++ )
{
float* p = (float*) cvGetSeqElem( presults, i );
CvPoint pt = cvPoint( cvRound( p[0] ),cvRound( p[1] ) );
xp=cvRound( p[0] );
yp=cvRound( p[1] );
rp=p[2];
cvCircle(dst,pt,cvRound( p[2] ),CV_RGB(0,255,255),1,400);
xroi= xp-shift;
yroi= yp-shift;
cvRectangle(dst,cvPoint(( p[0] )-shift,p[1]-shift),cvPoint(( p[0] )+shift,p[1]+shift),CV_RGB(255,0,255), 1);
CvRect roi= cvRect(xroi ,yroi,shift*2,shift*2);
//.........这里部分代码省略.........
开发者ID:Exorcismus,项目名称:IRIS-Recognition,代码行数:101,代码来源:process_image.cpp
示例10: llcv_hough
DMZ_INTERNAL CvLinePolar llcv_hough(const CvArr *src_image, IplImage *dx, IplImage *dy, float rho, float theta, int threshold, float theta_min, float theta_max, bool vertical, float gradient_angle_threshold) {
CvMat img_stub, *img = (CvMat*)src_image;
img = cvGetMat(img, &img_stub);
CvMat dx_stub, *dx_mat = (CvMat*)dx;
dx_mat = cvGetMat(dx_mat, &dx_stub);
CvMat dy_stub, *dy_mat = (CvMat*)dy;
dy_mat = cvGetMat(dy_mat, &dy_stub);
if(!CV_IS_MASK_ARR(img)) {
CV_Error(CV_StsBadArg, "The source image must be 8-bit, single-channel");
}
if(rho <= 0 || theta <= 0 || threshold <= 0) {
CV_Error(CV_StsOutOfRange, "rho, theta and threshold must be positive");
}
if(theta_max < theta_min + theta) {
CV_Error(CV_StsBadArg, "theta + theta_min (param1) must be <= theta_max (param2)");
}
cv::AutoBuffer<int> _accum;
cv::AutoBuffer<int> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
float scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
const uint8_t *dx_mat_ptr = (uint8_t *)(dx_mat->data.ptr);
int dx_step = dx_mat->step;
const uint8_t *dy_mat_ptr = (uint8_t *)(dy_mat->data.ptr);
int dy_step = dy_mat->step;
numangle = cvRound((theta_max - theta_min) / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum;
int *tabSin = _tabSin, *tabCos = _tabCos;
memset(accum, 0, sizeof(accum[0]) * (numangle + 2) * (numrho + 2));
#define FIXED_POINT_EXPONENT 10
#define FIXED_POINT_MULTIPLIER (1 << FIXED_POINT_EXPONENT)
for(ang = theta_min, n = 0; n < numangle; ang += theta, n++) {
tabSin[n] = (int)floorf(FIXED_POINT_MULTIPLIER * sinf(ang) * irho);
tabCos[n] = (int)floorf(FIXED_POINT_MULTIPLIER * cosf(ang) * irho);
}
float slope_bound_a, slope_bound_b;
if(vertical) {
slope_bound_a = tanf((float)TO_RADIANS(180 - gradient_angle_threshold));
slope_bound_b = tanf((float)TO_RADIANS(180 + gradient_angle_threshold));
} else {
slope_bound_a = tanf((float)TO_RADIANS(90 - gradient_angle_threshold));
slope_bound_b = tanf((float)TO_RADIANS(90 + gradient_angle_threshold));
}
// stage 1. fill accumulator
for(i = 0; i < height; i++) {
int16_t *dx_row_ptr = (int16_t *)(dx_mat_ptr + i * dx_step);
int16_t *dy_row_ptr = (int16_t *)(dy_mat_ptr + i * dy_step);
for(j = 0; j < width; j++) {
if(image[i * step + j] != 0) {
int16_t del_x = dx_row_ptr[j];
int16_t del_y = dy_row_ptr[j];
bool use_pixel = false;
if(dmz_likely(del_x != 0)) { // avoid div by 0
float slope = (float)del_y / (float)del_x;
if(vertical) {
if(slope >= slope_bound_a && slope <= slope_bound_b) {
use_pixel = true;
}
} else {
if(slope >= slope_bound_a || slope <= slope_bound_b) {
use_pixel = true;
}
}
} else {
use_pixel = !vertical;
}
//.........这里部分代码省略.........
开发者ID:COCUS-NEXT,项目名称:card.io-dmz,代码行数:101,代码来源:hough.cpp
示例11: icvOnMouse
static gboolean icvOnMouse( GtkWidget *widget, GdkEvent *event, gpointer user_data )
{
// TODO move this logic to CvImageWidget
CvWindow* window = (CvWindow*)user_data;
CvPoint2D32f pt32f(-1., -1.);
CvPoint pt(-1,-1);
int cv_event = -1, state = 0;
CvImageWidget * image_widget = CV_IMAGE_WIDGET( widget );
if( window->signature != CV_WINDOW_MAGIC_VAL ||
window->widget != widget || !window->widget ||
!window->on_mouse /*|| !image_widget->original_image*/)
return FALSE;
if( event->type == GDK_MOTION_NOTIFY )
{
GdkEventMotion* event_motion = (GdkEventMotion*)event;
cv_event = CV_EVENT_MOUSEMOVE;
pt32f.x = cvRound(event_motion->x);
pt32f.y = cvRound(event_motion->y);
state = event_motion->state;
}
else if( event->type == GDK_BUTTON_PRESS ||
event->type == GDK_BUTTON_RELEASE ||
event->type == GDK_2BUTTON_PRESS )
{
GdkEventButton* event_button = (GdkEventButton*)event;
pt32f.x = cvRound(event_button->x);
pt32f.y = cvRound(event_button->y);
if( event_button->type == GDK_BUTTON_PRESS )
{
cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONDOWN :
event_button->button == 2 ? CV_EVENT_MBUTTONDOWN :
event_button->button == 3 ? CV_EVENT_RBUTTONDOWN : 0;
}
else if( event_button->type == GDK_BUTTON_RELEASE )
{
cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONUP :
event_button->button == 2 ? CV_EVENT_MBUTTONUP :
event_button->button == 3 ? CV_EVENT_RBUTTONUP : 0;
}
else if( event_button->type == GDK_2BUTTON_PRESS )
{
cv_event = event_button->button == 1 ? CV_EVENT_LBUTTONDBLCLK :
event_button->button == 2 ? CV_EVENT_MBUTTONDBLCLK :
event_button->button == 3 ? CV_EVENT_RBUTTONDBLCLK : 0;
}
state = event_button->state;
}
if( cv_event >= 0 ){
// scale point if image is scaled
if( (image_widget->flags & CV_WINDOW_AUTOSIZE)==0 &&
image_widget->original_image &&
image_widget->scaled_image ){
// image origin is not necessarily at (0,0)
int x0 = (widget->allocation.width - image_widget->scaled_image->cols)/2;
int y0 = (widget->allocation.height - image_widget->scaled_image->rows)/2;
pt.x = cvRound( ((pt32f.x-x0)*image_widget->original_image->cols)/
image_widget->scaled_image->cols );
pt.y = cvRound( ((pt32f.y-y0)*image_widget->original_image->rows)/
image_widget->scaled_image->rows );
}
else{
pt = cvPointFrom32f( pt32f );
}
// if((unsigned)pt.x < (unsigned)(image_widget->original_image->width) &&
// (unsigned)pt.y < (unsigned)(image_widget->original_image->height) )
{
int flags = (state & GDK_SHIFT_MASK ? CV_EVENT_FLAG_SHIFTKEY : 0) |
(state & GDK_CONTROL_MASK ? CV_EVENT_FLAG_CTRLKEY : 0) |
(state & (GDK_MOD1_MASK|GDK_MOD2_MASK) ? CV_EVENT_FLAG_ALTKEY : 0) |
(state & GDK_BUTTON1_MASK ? CV_EVENT_FLAG_LBUTTON : 0) |
(state & GDK_BUTTON2_MASK ? CV_EVENT_FLAG_MBUTTON : 0) |
(state & GDK_BUTTON3_MASK ? CV_EVENT_FLAG_RBUTTON : 0);
window->on_mouse( cv_event, pt.x, pt.y, flags, window->on_mouse_param );
}
}
return FALSE;
}
开发者ID:406089450,项目名称:opencv,代码行数:85,代码来源:window_gtk.cpp
示例12: do_work
void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
{
// Work on the image.
try
{
// Convert the image into something opencv can handle.
cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;
// Messages
opencv_apps::FaceArrayStamped faces_msg;
faces_msg.header = msg->header;
// Do the work
std::vector<cv::Rect> faces;
cv::Mat frame_gray;
cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
cv::equalizeHist( frame_gray, frame_gray );
//-- Detect faces
#ifndef CV_VERSION_EPOCH
face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif
for( size_t i = 0; i < faces.size(); i++ )
{
cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
cv::ellipse( frame, center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
opencv_apps::Face face_msg;
face_msg.face.x = center.x;
face_msg.face.y = center.y;
face_msg.face.width = faces[i].width;
face_msg.face.height = faces[i].height;
cv::Mat faceROI = frame_gray( faces[i] );
std::vector<cv::Rect> eyes;
//-- In each face, detect eyes
#ifndef CV_VERSION_EPOCH
eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif
for( size_t j = 0; j < eyes.size(); j++ )
{
cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );
opencv_apps::Rect eye_msg;
eye_msg.x = eye_center.x;
eye_msg.y = eye_center.y;
eye_msg.width = eyes[j].width;
eye_msg.height = eyes[j].height;
face_msg.eyes.push_back(eye_msg);
}
faces_msg.faces.push_back(face_msg);
}
//-- Show what you got
if( debug_view_) {
cv::imshow( "Face detection", frame );
int c = cv::waitKey(1);
}
// Publish the image.
sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
img_pub_.publish(out_img);
msg_pub_.publish(faces_msg);
}
catch (cv::Exception &e)
{
NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
}
prev_stamp_ = msg->header.stamp;
}
开发者ID:srmanikandasriram,项目名称:vision_opencv,代码行数:79,代码来源:face_detection_nodelet.cpp
示例13: cvCreateTestSeq
CvTestSeq* cvCreateTestSeq(char* pConfigfile, char** videos, int numvideo, float Scale, int noise_type, double noise_ampl)
{
int size = sizeof(CvTestSeq_);
CvTestSeq_* pTS = (CvTestSeq_*)cvAlloc(size);
CvFileStorage* fs = cvOpenFileStorage( pConfigfile, NULL, CV_STORAGE_READ);
int i;
if(pTS == NULL || fs == NULL) return NULL;
memset(pTS,0,size);
pTS->pFileStorage = fs;
pTS->noise_ampl = noise_ampl;
pTS->noise_type = noise_type;
pTS->IVar_DI = 0;
pTS->ObjNum = 0;
/* Read all videos: */
for (i=0; i<numvideo; ++i)
{
CvTestSeqElem* pElemNew = icvTestSeqReadElemAll(pTS, fs, videos[i]);
if(pTS->pElemList==NULL)pTS->pElemList = pElemNew;
else
{
CvTestSeqElem* p = NULL;
for(p=pTS->pElemList;p->next;p=p->next) {}
p->next = pElemNew;
}
} /* Read all videos. */
{ /* Calculate elements and image size and video length: */
CvTestSeqElem* p = pTS->pElemList;
int num = 0;
CvSize MaxSize = {0,0};
int MaxFN = 0;
for(p = pTS->pElemList; p; p=p->next, num++)
{
int FN = p->FrameBegin+p->FrameNum;
CvSize S = {0,0};
if(p->pImg && p->BG)
{
S.width = p->pImg->width;
S.height = p->pImg->height;
}
if(MaxSize.width < S.width) MaxSize.width = S.width;
if(MaxSize.height < S.height) MaxSize.height = S.height;
if(MaxFN < FN)MaxFN = FN;
}
pTS->ListNum = num;
if(MaxSize.width == 0)MaxSize.width = 320;
if(MaxSize.height == 0)MaxSize.height = 240;
MaxSize.width = cvRound(Scale*MaxSize.width);
MaxSize.height = cvRound(Scale*MaxSize.height);
pTS->pImg = cvCreateImage(MaxSize,IPL_DEPTH_8U,3);
pTS->pImgMask = cvCreateImage(MaxSize,IPL_DEPTH_8U,1);
pTS->FrameNum = MaxFN;
for(p = pTS->pElemList; p; p=p->next)
{
if(p->FrameNum<=0)p->FrameNum=MaxFN;
}
} /* Calculate elements and image size. */
return (CvTestSeq*)pTS;
} /* cvCreateTestSeq */
开发者ID:DevShah,项目名称:18551,代码行数:73,代码来源:testseq.cpp
示例14: cvCheckChessboard
// does a fast check if a chessboard is in the input image. This is a workaround to
// a problem of cvFindChessboardCorners being slow on images with no chessboard
// - src: input image
// - size: chessboard size
// Returns 1 if a chessboard can be in this image and findChessboardCorners should be called,
// 0 if there is no chessboard, -1 in case of error
int cvCheckChessboard(IplImage* src, CvSize size)
{
if(src->nChannels > 1)
{
cvError(CV_BadNumChannels, "cvCheckChessboard", "supports single-channel images only",
__FILE__, __LINE__);
}
if(src->depth != 8)
{
cvError(CV_BadDepth, "cvCheckChessboard", "supports depth=8 images only",
__FILE__, __LINE__);
}
const int erosion_count = 1;
const float black_level = 20.f;
const float white_level = 130.f;
const float black_white_gap = 70.f;
#if defined(DEBUG_WINDOWS)
cvNamedWindow("1", 1);
cvShowImage("1", src);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
CvMemStorage* storage = cvCreateMemStorage();
IplImage* white = cvCloneImage(src);
IplImage* black = cvCloneImage(src);
cvErode(white, white, NULL, erosion_count);
cvDilate(black, black, NULL, erosion_count);
IplImage* thresh = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
int result = 0;
for(float thresh_level = black_level; thresh_level < white_level && !result; thresh_level += 20.0f)
{
cvThreshold(white, thresh, thresh_level + black_white_gap, 255, CV_THRESH_BINARY);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
CvSeq* first = 0;
std::vector<std::pair<float, int> > quads;
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 1);
cvThreshold(black, thresh, thresh_level, 255, CV_THRESH_BINARY_INV);
#if defined(DEBUG_WINDOWS)
cvShowImage("1", thresh);
cvWaitKey(0);
#endif //DEBUG_WINDOWS
cvFindContours(thresh, storage, &first, sizeof(CvContour), CV_RETR_CCOMP);
icvGetQuadrangleHypotheses(first, quads, 0);
const size_t min_quads_count = size.width*size.height/2;
std::sort(quads.begin(), quads.end(), less_pred);
// now check if there are many hypotheses with similar sizes
// do this by floodfill-style algorithm
const float size_rel_dev = 0.4f;
for(size_t i = 0; i < quads.size(); i++)
{
size_t j = i + 1;
for(; j < quads.size(); j++)
{
if(quads[j].first/quads[i].first > 1.0f + size_rel_dev)
{
break;
}
}
if(j + 1 > min_quads_count + i)
{
// check the number of black and white squares
std::vector<int> counts;
countClasses(quads, i, j, counts);
const int black_count = cvRound(ceil(size.width/2.0)*ceil(size.height/2.0));
const int white_count = cvRound(floor(size.width/2.0)*floor(size.height/2.0));
if(counts[0] < black_count*0.75 ||
counts[1] < white_count*0.75)
{
continue;
}
result = 1;
break;
}
}
}
//.........这里部分代码省略.........
开发者ID:bkuhlman80,项目名称:opencv,代码行数:101,代码来源:checkchessboard.cpp
示例15: detect_Face_and_eyes
Mat detect_Face_and_eyes( Mat& img, double scale, QVector <face> &find_faces)
{
vector<Rect> faces;
const static Scalar colors[] =
{
Scalar(255,0,0),
Scalar(255,128,0),
Scalar(255,255,0),
Scalar(0,255,0),
Scalar(0,128,255),
Scalar(0,255,255),
Scalar(0,0,255),
Scalar(255,0,255)
};
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY);
double fx = 1 / scale;
resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
obj.cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Scalar color = colors[i%8];
int radius;
Rect r = faces[i];
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
face temp;
find_faces.push_back(temp);
double aspect_ratio = (double)r.width/r.height;
if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
{
center.x = cvRound((r.x + r.width*0.5)*scale);
center.y = cvRound((r.y + r.height*0.5)*scale);
}
smallImgROI = smallImg( r );
obj.nestedCascade.detectMultiScale(smallImgROI, nestedObjects,
1.1, 2, 0
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
find_faces.value(i).set_coord_face(center);
QVector <Point> write_eyes_array;
QVector <int> write_radius_eyes_array;
for ( size_t j = 0; j < nestedObjects.size(); j++ )
{
Rect nr = nestedObjects[j];
center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale);
center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale);
radius = cvRound((nr.width + nr.height)*0.25*scale);
if((radius>=20)&&((center.x>10)&&(center.x<img.size().width-10))&&((center.y>10)&&(center.x<img.size().height-10)))
{
write_radius_eyes_array.push_back(radius);
write_eyes_array.push_back(center);
circle(img, center, radius, color, 3, 8, 0 );
}
}
find_faces[i].set_coord_eyes(write_eyes_array);
find_faces[i].set_radius_eyes(write_radius_eyes_array);
}
return img;
}
开发者ID:vlastick,项目名称:opencv_summer,代码行数:69,代码来源:find_face_and_eyes.cpp
示例16: cvCircle
void VisuoThread::updatePFTracker()
{
Vector *trackVec=pftInPort.read(false);
Vector stereo;
if(trackVec!=NULL && trackVec->size()==12)
{
//must check if the tracker has gone mad.
if(checkTracker(trackVec))
{
trackMutex.wait();
stereoTracker.vec=*trackVec;
trackMutex.post();
stereo.resize(4);
stereo[0]=stereoTracker.vec[0];
stereo[1]=stereoTracker.vec[1];
|
请发表评论