本文整理汇总了C++中cvTermCriteria函数 的典型用法代码示例。如果您正苦于以下问题:C++ cvTermCriteria函数的具体用法?C++ cvTermCriteria怎么用?C++ cvTermCriteria使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvTermCriteria函数 的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: throw
/*!
Initialise the tracking by extracting KLT keypoints on the provided image.
\param I : Grey level image used as input. This image should have only 1 channel.
\param mask : Image mask used to restrict the keypoint detection area.
If mask is NULL, all the image will be considered.
\exception vpTrackingException::initializationError : If the image I is not
initialized, or if the image or the mask have bad coding format.
*/
void vpKltOpencv::initTracking(const IplImage *I, const IplImage *mask)
{
if (!I) {
throw(vpException(vpTrackingException::initializationError, "Image Not initialized")) ;
}
if (I->depth != IPL_DEPTH_8U || I->nChannels != 1) {
throw(vpException(vpTrackingException::initializationError, "Bad Image format")) ;
}
if (mask) {
if (mask->depth != IPL_DEPTH_8U || I->nChannels != 1) {
throw(vpException(vpTrackingException::initializationError, "Bad Image format")) ;
}
}
//Creation des buffers
CvSize Sizeim, SizeI;
SizeI = cvGetSize(I);
bool b_imOK = true;
if(image != NULL){
Sizeim = cvGetSize(image);
if(SizeI.width != Sizeim.width || SizeI.height != Sizeim.height) b_imOK = false;
}
if(image == NULL || prev_image == NULL || pyramid==NULL || prev_pyramid ==NULL || !b_imOK){
reset();
image = cvCreateImage(cvGetSize(I), 8, 1);image->origin = I->origin;
prev_image = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
prev_pyramid = cvCreateImage(cvGetSize(I), IPL_DEPTH_8U, 1);
}else{
swap_temp = 0;
countFeatures = 0;
countPrevFeatures = 0;
flags = 0;
initialized = 0;
globalcountFeatures = 0;
}
initialized = 1;
//Import
cvCopy(I, image, 0);
//Recherche de points d'int�rets
countFeatures = maxFeatures;
countPrevFeatures = 0;
IplImage* eig = cvCreateImage(cvGetSize(image), 32, 1);
IplImage* temp = cvCreateImage(cvGetSize(image), 32, 1);
cvGoodFeaturesToTrack(image, eig, temp, features,
&countFeatures, quality, min_distance,
mask, block_size, use_harris, harris_free_parameter);
cvFindCornerSubPix(image, features, countFeatures, cvSize(win_size, win_size),
cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|
CV_TERMCRIT_EPS,20,0.03));
cvReleaseImage(&eig);
cvReleaseImage(&temp);
if (OnInitialize)
OnInitialize(_tid);
//printf("Number of features at init: %d\n", countFeatures);
for (int boucle=0; boucle<countFeatures;boucle++) {
featuresid[boucle] = globalcountFeatures;
globalcountFeatures++;
if (OnNewFeature){
OnNewFeature(_tid, boucle, featuresid[boucle], features[boucle].x,
features[boucle].y);
}
}
}
开发者ID:ILoveFree2, 项目名称:visp-deb, 代码行数:82, 代码来源:vpKltOpencv.cpp
示例2: lk2
// Lucas-Kanade
Eigen::Matrix<double, 4, 150> lk2(IplImage* imgI, IplImage* imgJ, Eigen::Matrix<double, 2,
150> const & pointsI, Eigen::Matrix<double, 2, 150> const & pointsJ,
unsigned int sizeI, unsigned int sizeJ, unsigned int level) {
double nan = std::numeric_limits<double>::quiet_NaN();
int Level;
if (level != 0) {
Level = (int) level;
} else {
Level = 5;
}
int I = 0;
int J = 1;
int Winsize = 10;
// Images
if (IMG[I] != 0) {
IMG[I] = imgI;
} else {
CvSize imageSize = cvGetSize(imgI);
IMG[I] = cvCreateImage(imageSize, 8, 1);
PYR[I] = cvCreateImage(imageSize, 8, 1);
IMG[I] = imgI;
}
if (IMG[J] != 0) {
IMG[J] = imgJ;
} else {
CvSize imageSize = cvGetSize(imgJ);
IMG[J] = cvCreateImage(imageSize, 8, 1);
PYR[J] = cvCreateImage(imageSize, 8, 1);
IMG[J] = imgJ;
}
// Points
int nPts = sizeI;
if (nPts != sizeJ) {
std::cout << "Inconsistent input!" << std::endl;
return Eigen::MatrixXd::Zero(1, 1);
}
points[0] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // template
points[1] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // target
points[2] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // forward-backward
for (int i = 0; i < nPts; i++) {
points[0][i].x = pointsI(0, i);
points[0][i].y = pointsI(1, i);
points[1][i].x = pointsJ(0, i);
points[1][i].y = pointsJ(1, i);
points[2][i].x = pointsI(0, i);
points[2][i].y = pointsI(1, i);
}
float *ncc = (float*) cvAlloc(nPts * sizeof(float));
float *fb = (float*) cvAlloc(nPts * sizeof(float));
char *status = (char*) cvAlloc(nPts);
cvCalcOpticalFlowPyrLK(IMG[I], IMG[J], PYR[I], PYR[J], points[0],
points[1], nPts, cvSize(win_size, win_size), Level, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
CV_LKFLOW_INITIAL_GUESSES);
cvCalcOpticalFlowPyrLK(IMG[J], IMG[I], PYR[J], PYR[I], points[1],
points[2], nPts, cvSize(win_size, win_size), Level, 0, 0,
cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY
| CV_LKFLOW_PYR_B_READY );
normCrossCorrelation(IMG[I], IMG[J], points[0], points[1], nPts, status,
ncc, Winsize, CV_TM_CCOEFF_NORMED);
euclideanDistance(points[0], points[2], fb, nPts);
// Output
int M = 4;
Eigen::MatrixXd output(M, 150);
for (int i = 0; i < nPts; i++) {
if (status[i] == 1) {
output(0, i) = (double) points[1][i].x;
output(1, i) = (double) points[1][i].y;
output(2, i) = (double) fb[i];
output(3, i) = (double) ncc[i];
} else {
output(0, i) = nan;
output(1, i) = nan;
output(2, i) = nan;
output(3, i) = nan;
}
}
return output;
}
开发者ID:spatial, 项目名称:OpenTLDC, 代码行数:95, 代码来源:lk.cpp
示例3: main
int main(int argc, char* argv[]) {
if(argc != 6){
printf("too few args\n");
return -1;
}
// INPUT PARAMETERS:
//
int board_w = atoi(argv[1]);
int board_h = atoi(argv[2]);
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
CvMat* intrinsic = (CvMat*)cvLoad(argv[3]);
CvMat* distortion = (CvMat*)cvLoad(argv[4]);
IplImage* image = 0;
IplImage* gray_image = 0;
if( (image = cvLoadImage(argv[5])) == 0 ) {
printf("Error: Couldn't load %s\n",argv[5]);
return -1;
}
CvMat* image_points = cvCreateMat(1*board_n,2,CV_32FC1);
CvMat* object_points = cvCreateMat(1*board_n,3,CV_32FC1);
CvMat* objdrawpoints = cvCreateMat(1,1,CV_32FC3);
CvMat* imgdrawpoints = cvCreateMat(1,1,CV_32FC2);
float x=0;
float y=0;
float z=0;
double grid_width=2.85;
gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
cvCvtColor(image, gray_image, CV_BGR2GRAY );
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
int corner_count = 0;
int found = cvFindChessboardCorners(
gray_image,
board_sz,
corners,
&corner_count,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
);
if(!found){
printf("Couldn't aquire chessboard on %s, "
"only found %d of %d corners\n",
argv[5],corner_count,board_n
);
return -1;
}
//Get Subpixel accuracy on those corners:
cvFindCornerSubPix(
gray_image,
corners,
corner_count,
cvSize(11,11),
cvSize(-1,-1),
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
);
// If we got a good board, add it to our data
for( int i=0, j=0; j<board_n; ++i,++j ) {
CV_MAT_ELEM(*image_points, float,i,0) = corners[j].x;
CV_MAT_ELEM(*image_points, float,i,1) = corners[j].y;
CV_MAT_ELEM(*object_points,float,i,0) =grid_width*( j/board_w);
// cout<<j/board_w<<" "<<j%board_w<<endl;
CV_MAT_ELEM(*object_points,float,i,1) = grid_width*(j%board_w);
CV_MAT_ELEM(*object_points,float,i,2) = 0.0f;
}
// DRAW THE FOUND CHESSBOARD
//
cvDrawChessboardCorners(
image,
board_sz,
corners,
corner_count,
found
);
// FIND THE HOMOGRAPHY
//
CvMat *trans = cvCreateMat( 1, 3, CV_32F);
CvMat *rot = cvCreateMat( 1, 3, CV_32F);
// LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
//
cvFindExtrinsicCameraParams2(object_points,image_points,intrinsic,distortion,rot,trans);
// cvSave("trans.xml",trans);
// cvSave("rot.xml",rot);
int key = 0;
IplImage *drawn_image = cvCloneImage(image);
cvNamedWindow("translation");
// LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
//
// escape key stops
//
// cvSetZero(trans);
//.........这里部分代码省略.........
开发者ID:erebuswolf, 项目名称:IGVC-Code, 代码行数:101, 代码来源:transcalib.cpp
示例4: main
int main(int argc, char * argv[])
{
if(argc < 2) {
fprintf(stderr, "%s image1 image2\n", argv[0]);
return 1;
}
char * im1fname = argv[1];
char * im2fname = argv[2];
IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);
IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);
int count = MAX_COUNT;
double quality = 0.5;
// double min_distance = 2;
double min_distance = 50;
int block_size = 7;
int use_harris = 0;
int win_size = 10;
int flags = 0;
CvPoint2D32f * source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
CvPoint2D32f * dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
quality, min_distance, 0, block_size, use_harris, 0.04 );
printf("%d features\n",count);
setbuf(stdout, NULL);
printf("Finding corner subpix...");
cvFindCornerSubPix( image1, source_points, count,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
printf("done.\n");
cvReleaseImage(&eigenvalues);
cvReleaseImage(&temp);
IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);
char * status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);
IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );
printf("Computing optical flow...");
cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
dest_points, count, cvSize(win_size,win_size), 4, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
flags);
printf("done.\n");
int num_matches = 0;
int num_out_matches = 0;
int max_dist = 30;
int offset = 200;
CvMemStorage * storage = cvCreateMemStorage(0);
CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);
cvReleaseImage(&image1);
cvReleaseImage(&image2);
image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
cvSet( image1, cvScalarAll(255) );
std::map<CvPoint, CvPoint> point_lookup_map;
std::vector<std::pair<CvPoint, CvPoint> > point_lookup;
// put corners in the point lookup as going to themselves
point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);
point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));
printf("Inserting corners...");
// put corners in the Delaunay subdivision
for(unsigned int i = 0; i < point_lookup.size(); i++) {
cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
}
printf("done.\n");
CvSubdiv2DEdge proxy_edge;
for(int i = 0; i < count; i++) {
if(status[i]) {
CvPoint source = cvPointFrom32f(source_points[i]);
CvPoint dest = cvPointFrom32f(dest_points[i]);
//.........这里部分代码省略.........
开发者ID:ryanfb, 项目名称:homer, 代码行数:101, 代码来源:opticaltri.cpp
示例5: CVAPI
#define CV_CALIB_FIX_K5 4096
#define CV_CALIB_FIX_K6 8192
#define CV_CALIB_RATIONAL_MODEL 16384
/* Finds intrinsic and extrinsic camera parameters
from a few views of known calibration pattern */
CVAPI(double) cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
/* Computes various useful characteristics of the camera from the data computed by
cvCalibrateCamera2 */
CVAPI(void) cvCalibrationMatrixValues( const CvMat *camera_matrix,
CvSize image_size,
double aperture_width CV_DEFAULT(0),
double aperture_height CV_DEFAULT(0),
double *fovx CV_DEFAULT(NULL),
double *fovy CV_DEFAULT(NULL),
double *focal_length CV_DEFAULT(NULL),
CvPoint2D64f *principal_point CV_DEFAULT(NULL),
double *pixel_aspect_ratio CV_DEFAULT(NULL));
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
开发者ID:ThomasLengeling, 项目名称:ParticleTriangle_Robots, 代码行数:32, 代码来源:calib3d.hpp
示例6: color_cluster
int color_cluster(char *filename)
{
IplImage* originimg=cvLoadImage(filename);
int i,j;
CvMat *samples=cvCreateMat((originimg->width)*(originimg->height),1,CV_32FC3);//创建样本矩阵,CV_32FC3代表32位浮点3通道(彩色图像)
CvMat *clusters=cvCreateMat((originimg->width)*(originimg->height),1,CV_32SC1);//创建类别标记矩阵,CV_32SF1代表32位整型1通道
int k=0;
for (i=0;i<originimg->width;i++)
{
for (j=0;j<originimg->height;j++)
{
CvScalar s;
//获取图像各个像素点的三通道值(BGR)
s.val[0]=(float)cvGet2D(originimg,j,i).val[0];//B
s.val[1]=(float)cvGet2D(originimg,j,i).val[1];//G
s.val[2]=(float)cvGet2D(originimg,j,i).val[2];//R
cvSet2D(samples,k++,0,s);//将像素点三通道的值按顺序排入样本矩阵
}
}
int nCuster=2;//聚类类别数,后期可以通过学习确定分类数。
cvKMeans2(samples,nCuster,clusters,cvTermCriteria(CV_TERMCRIT_ITER,100,1.0));//开始聚类,迭代100次,终止误差1.0
//创建整体显示聚类后的图像
IplImage *clusterimg=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
//创建用于单独显示每个聚类结果的图像
IplImage *cluster_img0=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
IplImage *cluster_img1=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
IplImage *cluster_img2=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
k=0;
int val=0;
float step=255/(nCuster-1);
CvScalar bg={223,124,124,0};//背景设置为白色
for (i=0;i<originimg->width;i++)
{
for (j=0;j<originimg->height;j++)
{
cvSet2D(cluster_img0,j,i,bg);
cvSet2D(cluster_img1,j,i,bg);
cvSet2D(cluster_img1,j,i,bg);
}
}
for (i=0;i<originimg->width;i++)
{
for (j=0;j<originimg->height;j++)
{
val=(int)clusters->data.i[k++];
CvScalar s;
s.val[0]=255-val*step;//这个是将不同类别取不同的像素值,
cvSet2D(clusterimg,j,i,s); //存储聚类后的图像
//将每个聚类进行分离
switch(val)
{
case 0:
cvSet2D(cluster_img0,j,i,s);break;//白色类
case 1:
cvSet2D(cluster_img1,j,i,s);break;//灰色类
case 2:
cvSet2D(cluster_img2,j,i,s);break;//黑色类
default:
break;
}
}
}
//cvSaveImage("PicVideo//cluster_img0.png",cluster_img0);
//cvSaveImage("PicVideo//cluster_img1.png",cluster_img1);
//cvSaveImage("PicVideo//cluster_img2.png",cluster_img2);
cvNamedWindow( "原始图像", 1 );
cvNamedWindow( "聚类图像", 1 );
cvShowImage( "原始图像", originimg );
cvShowImage( "聚类图像", clusterimg );
cvSaveImage("clusterimg.png",clusterimg);//结果保存
cvWaitKey(0);
cvDestroyWindow( "原始图像" );
cvDestroyWindow( "聚类图像" );
cvReleaseImage( &originimg );
cvReleaseImage( &clusterimg );
cvReleaseImage(&cluster_img0);
cvReleaseImage(&cluster_img1);
cvReleaseImage(&cluster_img0);
return 0;
//.........这里部分代码省略.........
开发者ID:crescent-hacker, 项目名称:OpenCV, 代码行数:101, 代码来源:color_cluster.cpp
示例7: main
int main(int argc, char * argv[])
{
int corner_count;
int successes = 0;
int step, frame = 0;
const char* intrinsics_path = argv[1];
const char* distortions_path = argv[2];
int total = argc - 3 ;
int start = 3;
const char* loc = argv[start] ;
board_w = 7; // Board width in squares
board_h = 4; // Board height
n_boards = total; // Number of boards
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
// Allocate Sotrage
CvMat* image_points = cvCreateMat( n_boards*board_n, 2, CV_32FC1 );
CvMat* object_points = cvCreateMat( n_boards*board_n, 3, CV_32FC1 );
CvMat* point_counts = cvCreateMat( n_boards, 1, CV_32SC1 );
CvMat* intrinsic_matrix = cvCreateMat( 3, 3, CV_32FC1 );
CvMat* distortion_coeffs = cvCreateMat( 5, 1, CV_32FC1 );
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
IplImage *image = cvLoadImage( loc );
//IplImage *image = cvQueryFrame(capture);
IplImage *gray_image = cvCreateImage( cvGetSize( image ), 8, 1 );
// Capture Corner views loop until we've got n_boards
// succesful captures (all corners on the board are found)
while( start < total ){
// Skp every board_dt frames to allow user to move chessboard
// if( frame++ % board_dt == 0 ){
// Find chessboard corners:
int found = cvFindChessboardCorners( image, board_sz, corners,
&corner_count, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
// Get subpixel accuracy on those corners
cvCvtColor( image, gray_image, CV_BGR2GRAY );
cvFindCornerSubPix( gray_image, corners, corner_count, cvSize( 11, 11 ),
cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
// Draw it
cvDrawChessboardCorners( image, board_sz, corners, corner_count, found );
if( found )
{
cvSaveImage( "/tmp/grid_save.png", image);
}
// If we got a good board, add it to our data
if( corner_count == board_n ){
step = successes*board_n;
for( int i=step, j=0; j < board_n; ++i, ++j ){
CV_MAT_ELEM( *image_points, float, i, 0 ) = corners[j].x;
CV_MAT_ELEM( *image_points, float, i, 1 ) = corners[j].y;
CV_MAT_ELEM( *object_points, float, i, 0 ) = j/board_w;
CV_MAT_ELEM( *object_points, float, i, 1 ) = j%board_w;
CV_MAT_ELEM( *object_points, float, i, 2 ) = 0.0f;
}
CV_MAT_ELEM( *point_counts, int, successes, 0 ) = board_n;
successes++;
}
// }
if( start < total )
{
start++;
}
else if ( start == total )
{
start = 1;
// return -1;
}
loc = argv[start] ;
image = cvLoadImage( loc );
} // End collection while loop
开发者ID:kthakore, 项目名称:simcam, 代码行数:85, 代码来源:calibrate.c
示例8: cvGoodFeaturesToTrack
// поиск оптического потока
void OpticalFlowLK::make()
{
if(!imgA || !imgB || !eig_image || !tmp_image)
{
return;
}
int i=0;
#if 1
cornerCount = LK_MAX_CORNERS;
//
// находим точки для отслеживания перемещения
//
cvGoodFeaturesToTrack( imgA, eig_image, tmp_image,
cornersA, // возвращаемое значение найденых углов
&cornerCount, // возвращаемое значение числа найденых углов
0.01, // множитель, определяющий минимально допустимое качество углов
5.0, // предел, определяющий минимально-возможную дистанцию между углами
0, // маска, определяющая ROI (если NULL, то поиск по всему изображению)
5, // размер среднего блока
0, // если !=0 используется cvCornerHarris(), иначе cvCornerMinEigenVal()
0.04 ); // параметр для cvCornerHarris()
#else
//
// Покроем изображение равномерной сеткой из точек
//
int step_x = imgA->width / 5;
int step_y = imgA->height / 5;
int points_count = (imgA->width / step_x + 1) * (imgA->height / step_y + 1);
if(points_count>LK_MAX_CORNERS){
delete []cornersA;
cornersA=0;
delete []cornersB;
cornersB=0;
cornersA= new CvPoint2D32f[ points_count ];
cornersB= new CvPoint2D32f[ points_count ];
featuresFound = new char[ points_count ];
featureErrors = new float[ points_count ];
assert(cornersA);
assert(cornersB);
assert(featuresFound);
assert(featureErrors);
}
cornerCount = 0;
for ( j = 1; j < imgA->height; j += step_y){
for ( i = 1; i < imgA->width; i += step_x){
cornersA[cornerCount] = cvPoint2D32f((float)i, (float)j);
cornerCount++;
}
}
#endif
//
// уточнение координат точек с субпиксельной точностью
//
cvFindCornerSubPix( imgA, cornersA, cornerCount,
cvSize(LK_WINDOW_SIZE, LK_WINDOW_SIZE), // размер половины длины окна для поиска
cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, LK_ITER_COUNT, 0.03) );
// определяем размер пирамиды
CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
if(pyrA!=0)
{
cvReleaseImage(&pyrA);
cvReleaseImage(&pyrB);
}
pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
//
// находим оптический поток
//
cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB,
cornersA,
cornersB,
cornerCount,
cvSize( LK_WINDOW_SIZE, LK_WINDOW_SIZE ),// размер окна поиска каждого уровня пирамиды
5, // максимальный уровень пирамиды.
featuresFound, // если элемент массива установлен в 1, то соответсвующая особая точка была обнаружена
featureErrors, // массив разности между оригинальными и сдвинутыми точками (может быть NULL)
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, LK_ITER_COUNT, .3 ),
0 );
center.x=0.0;
center.y=0.0;
cornerCountGood = 0;
for( i=0; i<cornerCount; i++ )
{
// пропускаем ненайденные точки и точки с большой ошибкой
if( featuresFound[i]==0 || featureErrors[i]>LK_MAX_FEATURE_ERROR ) {
center.x += cornersB[i].x;
center.y += cornersB[i].y;
//.........这里部分代码省略.........
开发者ID:Aadi2110, 项目名称:openrobovision, 代码行数:101, 代码来源:optical_flow.cpp
示例9: cvTermCriteria
SVMConstructor::SVMConstructor(){
_params.svm_type = CvSVM::C_SVC;
_params.kernel_type = CvSVM::LINEAR;
_params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER,1000,1e-6);
}
开发者ID:alexschlueter, 项目名称:detect-emotion, 代码行数:5, 代码来源:svm.cpp
示例10: main222
int main222( int argc, char** argv )
{
CvCapture* capture = 0;
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
else if( argc == 2 )
capture = cvCaptureFromAVI( argv[1] );
if( !capture )
{
fprintf(stderr,"Could not initialize capturing...\n");
return -1;
}
printf( "Hot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"To initialize tracking, select the object with mouse\n" );
cvNamedWindow( "Histogram", 1 );
cvNamedWindow( "CamShiftDemo", 1 );
cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );
for(;;)
{
IplImage* frame = 0;
int i, bin_w, c;
if( !frame )
break;
if( !image )
{
/* allocate all the buffers */
image = cvCreateImage( cvGetSize(frame), 8, 3 );
image->origin = frame->origin;
hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
hue = cvCreateImage( cvGetSize(frame), 8, 1 );
mask = cvCreateImage( cvGetSize(frame), 8, 1 );
backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
histimg = cvCreateImage( cvSize(320,200), 8, 3 );
cvZero( histimg );
}
cvCopy( frame, image, 0 );
cvCvtColor( image, hsv, CV_BGR2HSV );
if( track_object )
{
int _vmin = vmin, _vmax = vmax;
cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
cvSplit( hsv, hue, 0, 0, 0 );
if( track_object < 0 )
{
float max_val = 0.f;
cvSetImageROI( hue, selection );
cvSetImageROI( mask, selection );
cvCalcHist( &hue, hist, 0, mask );
cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
cvResetImageROI( hue );
cvResetImageROI( mask );
track_window = selection;
track_object = 1;
cvZero( histimg );
bin_w = histimg->width / hdims;
for( i = 0; i < hdims; i++ )
{
int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
CvScalar color = hsv2rgb(i*180.f/hdims);
cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
cvPoint((i+1)*bin_w,histimg->height - val),
color, -1, 8, 0 );
}
}
cvCalcBackProject( &hue, backproject, hist );
cvAnd( backproject, mask, backproject, 0 );
cvCamShift( backproject, track_window,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&track_comp, &track_box );
track_window = track_comp.rect;
if( backproject_mode )
cvCvtColor( backproject, image, CV_GRAY2BGR );
if( !image->origin )
track_box.angle = -track_box.angle;
cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
//.........这里部分代码省略.........
开发者ID:flair2005, 项目名称:PineApple, 代码行数:101, 代码来源:PineApple.cpp
示例11: mexFunction
void mexFunction(int plhs_size, mxArray *plhs[], int prhs_size, const mxArray *prhs[])
{
// Load images
if (prhs_size ==4) {
win_size = *mxGetPr(prhs[3]);
}
int N = mxGetN(prhs[0]);
int M = mxGetM(prhs[0]);
grey0 = cvCreateImage( cvSize(N, M), 8, 1 );
grey1 = cvCreateImage( cvSize(N, M), 8, 1 );
loadImageFromMatlab(prhs[0],grey0);
loadImageFromMatlab(prhs[1],grey1);
// Load feature points
double *fp = mxGetPr(prhs[2]);
int num_pts = mxGetN(prhs[2]);
points[0] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
points[1] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
char *status = (char*)cvAlloc(num_pts);
float *error = (float*) cvAlloc(num_pts*sizeof(float));
for (int i = 0; i < num_pts; i++) {
points[0][i].x = fp[2*i];
points[0][i].y = fp[2*i+1];
}
// neni treba, urychleni z fpt 40 -> fps 200
//cvFindCornerSubPix( grey0, points[0], num_pts, cvSize(win_size,win_size), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
pyramid1 = cvCreateImage( cvGetSize(grey1), 8, 1 );
pyramid0 = cvCreateImage( cvGetSize(grey1), 8, 1 );
cvCalcOpticalFlowPyrLK( grey0, grey1, pyramid0, pyramid1, points[0], points[1], num_pts, cvSize(win_size,win_size), 6, status, error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0 );
// Output
plhs[0] = mxCreateDoubleMatrix(6, num_pts, mxREAL);
double *output = mxGetPr(plhs[0]);
for (int i = 0; i < num_pts; i++) {
output[6*i] = (double) points[0][i].x;
output[6*i+1] = (double) points[0][i].y;
output[6*i+2] = (double) points[1][i].x;
output[6*i+3] = (double) points[1][i].y;
output[6*i+4] = (double) error[i];
output[6*i+5] = (double) status[i];
//output[5*i+5] = (double) error[i];
}
// Tidy up
cvReleaseImage( &pyramid0 );
cvReleaseImage( &pyramid1 );
cvReleaseImage( &grey0 );
cvReleaseImage( &grey1 );
return;
}
开发者ID:bogdosarov, 项目名称:TLD, 代码行数:57, 代码来源:lk.backup.cpp
示例12: printf
/*!
\fn CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
*/
void CvBinGabAdaFeatureSelect::svmlearning(const char* path, int nofeatures, CvSVM * svm)
{
if( db_type == XM2VTS )
{
printf("Training an SVM classifier ................\n");
CvXm2vts *xm2vts = (CvXm2vts*)database;
int nTrainingExample = 200*4;
CvMat* trainData = cvCreateMat(nTrainingExample, nofeatures, CV_32FC1);
CvMat* response = cvCreateMat(nTrainingExample, 1, CV_32FC1);
for (int i = 0; i < nofeatures; i++)
{
/* load feature value */
CvGaborFeature *feature;
feature = new_pool->getfeature(i);
printf("Getting the %d feature ............\n", i+1);
char *filename = new char[50];
//training validation
double l, t;
int fal = 0;
for(int sub = 1; sub <= 200; sub++)
{
if (((CvXm2vts*)database)->getGender( sub )) t = 1.0;
else t = 2.0;
for(int pic = 1; pic <= 4; pic++)
{
sprintf(filename, "%s/%d_%d.bmp", path, sub, pic);
IplImage *img = cvLoadImage( filename, CV_LOAD_IMAGE_ANYCOLOR );
IplImage *grayimg = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
if ( img->nChannels == 1 ) cvCopy( img, grayimg, NULL );
else if (img->nChannels == 3) cvCvtColor( img, grayimg, CV_RGB2GRAY );
double vfeature = feature->val( img );
cvSetReal2D( trainData, ((sub-1)*4+(pic-1)), i, vfeature );
cvSetReal1D( response, ((sub-1)*4+(pic-1)), t );
cvReleaseImage(&img);
cvReleaseImage(&grayimg);
}
}
delete [] filename;
}
printf("building the svm classifier .........................\n");
CvTermCriteria term_crit = cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 200, 0.8);
/*Type of SVM, one of the following types:
CvSVM::C_SVC - n-class classification (n>=2), allows imperfect separation of classes with penalty multiplier C for outliers.
CvSVM::NU_SVC - n-class classification with possible imperfect separation. Parameter nu (in the range 0..1, the larger the value, the smoother the decision boundary) is used instead of C.
CvSVM::ONE_CLASS - one-class SVM. All the training data are from the same class, SVM builds a boundary that separates the class from the rest of the feature space.
CvSVM::EPS_SVR - regression. The distance between feature vectors from the training set and the fitting hyperplane must be less than p. For outliers the penalty multiplier C is used.
CvSVM::NU_SVR - regression; nu is used instead of p. */
int _svm_type = CvSVM::NU_SVC;
/*The kernel type, one of the following types:
CvSVM::LINEAR - no mapping is done, linear discrimination (or regression) is done in the original feature space. It is the fastest option. d(x,y) = x•y == (x,y)
CvSVM::POLY - polynomial kernel: d(x,y) = (gamma*(x•y)+coef0)degree
CvSVM::RBF - radial-basis-function kernel; a good choice in most cases: d(x,y) = exp(-gamma*|x-y|2)
CvSVM::SIGMOID - sigmoid function is used as a kernel: d(x,y) = tanh(gamma*(x•y)+coef0) */
int _kernel_type = CvSVM::POLY;
double _degree = 3.0;
double _gamma = 1.0;
double _coef0 = 0.0;
double _C = 1.0;
double _nu = 1.0;
double _p = 1.0;
CvSVMParams params( CvSVM::C_SVC, CvSVM::POLY, _degree, _gamma, _coef0, _C, _nu, _p,
0, term_crit );
svm->train( trainData, response, 0, 0, params );
svm->save( "svm.xml", "svm" );
cvReleaseMat(&response);
cvReleaseMat(&trainData);
}
}
开发者ID:Slipperboy, 项目名称:gaborboosting, 代码行数:82, 代码来源:cvbingabadafeatureselect.cpp
示例13: main
int main(int argc, char *argv[])
{
if (argc != 6) {
printf("\nERROR: too few parameters\n");
help();
return -1;
}
help();
//INPUT PARAMETERS:
int board_w = atoi(argv[1]);
int board_h = atoi(argv[2]);
int board_n = board_w * board_h;
CvSize board_sz = cvSize(board_w, board_h);
CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
CvMat *distortion = (CvMat *) cvLoad(argv[4]);
IplImage *image = 0, *gray_image = 0;
if ((image = cvLoadImage(argv[5])) == 0) {
printf("Error: Couldn't load %s\n", argv[5]);
return -1;
}
gray_image = cvCreateImage(cvGetSize(image), 8, 1);
cvCvtColor(image, gray_image, CV_BGR2GRAY);
//UNDISTORT OUR IMAGE
IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
IplImage *t = cvCloneImage(image);
cvRemap(t, image, mapx, mapy);
//GET THE CHECKERBOARD ON THE PLANE
cvNamedWindow("Checkers");
CvPoint2D32f *corners = new CvPoint2D32f[board_n];
int corner_count = 0;
int found = cvFindChessboardCorners(image,
board_sz,
corners,
&corner_count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_FILTER_QUADS);
if (!found) {
printf
("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
argv[5], corner_count, board_n);
return -1;
}
//Get Subpixel accuracy on those corners
cvFindCornerSubPix(gray_image, corners, corner_count,
cvSize(11, 11), cvSize(-1, -1),
cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
0.1));
//GET THE IMAGE AND OBJECT POINTS:
//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
//That means corners are at: corners[r*board_w + c]
CvPoint2D32f objPts[4], imgPts[4];
objPts[0].x = 0;
objPts[0].y = 0;
objPts[1].x = board_w - 1;
objPts[1].y = 0;
objPts[2].x = 0;
objPts[2].y = board_h - 1;
objPts[3].x = board_w - 1;
objPts[3].y = board_h - 1;
imgPts[0] = corners[0];
imgPts[1] = corners[board_w - 1];
imgPts[2] = corners[(board_h - 1) * board_w];
imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];
//DRAW THE POINTS in order: B,G,R,YELLOW
cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);
//DRAW THE FOUND CHECKERBOARD
cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
cvShowImage("Checkers", image);
//FIND THE HOMOGRAPHY
CvMat *H = cvCreateMat(3, 3, CV_32F);
CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
cvGetPerspectiveTransform(objPts, imgPts, H);
//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
float Z = 25;
int key = 0;
IplImage *birds_image = cvCloneImage(image);
cvNamedWindow("Birds_Eye");
while (key != 27) { //escape key stops
CV_MAT_ELEM(*H, float, 2, 2) = Z;
// cvInvert(H,H_invt); //If you want to invert the homography directly
// cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
//USE HOMOGRAPHY TO REMAP THE VIEW
cvWarpPerspective(image, birds_image, H,
CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
CV_WARP_FILL_OUTLIERS);
cvShowImage("Birds_Eye", birds_image);
key = cvWaitKey();
if (key == 'u')
//.........这里部分代码省略.........
开发者ID:emcute0319, 项目名称:LearningOpenCVCode, 代码行数:101, 代码来源:ch12_ex12_1.cpp
示例14: vpERROR_TRACE
void vpKltOpencv::track(const IplImage *I)
{
if (!initialized) {
vpERROR_TRACE("KLT Not initialized") ;
throw(vpException(vpTrackingException::initializationError,
"KLT Not initialized")) ;
}
if (!I) {
throw(vpException(vpTrackingException::initializationError,
"Image Not initialized")) ;
}
if (I->depth != IPL_DEPTH_8U || I->nChannels != 1) {
throw(vpException(vpTrackingException::initializationError,
"Bad Image format")) ;
}
CV_SWAP(prev_image, image, swap_temp);
CV_SWAP(prev_pyramid, pyramid, swap_temp);
cvCopy(I, image, 0);
if(!initial_guess){
// Save current features as previous features
countPrevFeatures = countFeatures;
for (int boucle=0; boucle<countFeatures;boucle++) {
prev_featuresid[boucle] = featuresid[boucle];
}
CvPoint2D32f *swap_features = 0;
CV_SWAP(prev_features, features, swap_features);
}
if (countFeatures <= 0) return;
cvCalcOpticalFlowPyrLK( prev_image, image, prev_pyramid, pyramid,
prev_features, features, countFeatures,
cvSize(win_size, win_size), pyramid_level,
status, 0, cvTermCriteria(CV_TERMCRIT_ITER
|CV_TERMCRIT_EPS,20,0.03),
flags );
if(!initial_guess)
flags |= CV_LKFLOW_PYR_A_READY;
else{
flags = CV_LKFLOW_PYR_A_READY;
initial_guess = false;
}
int i,k;
for (i = k = 0; i < countFeatures ; i++) {
if (!status[i]) {
lostDuringTrack[i] = 1;
if (OnFeatureLost)
OnFeatureLost(_tid, i, featuresid[i], features[i].x,
features[i].y);
continue;
}
if (IsFeatureValid) {
if (!IsFeatureValid(_tid, features[i].x, features[i].y)) {
lostDuringTrack[i] = 1;
if (OnFeatureLost)
OnFeatureLost(_tid, i, featuresid[i], features[i].x, features[i].y);
continue;
}
}
features[k] = features[i];
featuresid[k] = featuresid[i];
if (OnMeasureFeature) OnMeasureFeature(_tid, k, featuresid[k], features[k].x, features[k].y);
lostDuringTrack[i] = 0;
k++;
}
countFeatures = k;
}
开发者ID:ILoveFree2, 项目名称:visp-deb, 代码行数:80, 代码来源:vpKltOpencv.cpp
示例15: main
int main(int argc, char** argv)
{
// GLOBAL SETTINGS
static int framecounter=0;
const CvSize imsize = cvSize(320,240);
int delay = 0;
const int win_size = 10;
CvSize pyr_sz = cvSize( imsize.width+8, imsize.height/3 );
IplImage * pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage * pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage * rawImage_resized = cvCreateImage( imsize, IPL_DEPTH_8U, 3);
cvNamedWindow("Test");
CvGenericTracker tracker;
// LOAD INPUT FILE
CvCapture * capture = NULL;
if (argc==1) {
capture = cvCreateCameraCapture(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
}else{
capture = cvCreateFileCapture(argv[1]);
}
if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");return 0;}
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);
// START ENDLESS LOOP
while(1)
{
// GET NEXT FRAME
if (1){
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
}else{
framecounter++;
}
IplImage * rawImage = cvQueryFrame(capture);
cvResize(rawImage,rawImage_resized);
if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
if (tracker.initialized()){
tracker.update(rawImage_resized);
}else{
tracker.initialize(rawImage_resized);
tracker.m_framecounter=framecounter;
}
// START PROCESSING HERE
{
// Initialize, load two images from the file system, and
// allocate the images and other structures we will need for
// results.
CvMat * imgA = tracker.m_currImage;
IplImage * imgB = tracker.m_nextImage;
IplImage * imgC = cvCloneImage(rawImage_resized);
// The first thing we need to do is get the features
// we want to track.
IplImage * eig_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
IplImage * tmp_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
// Call the Lucas Kanade algorithm
char features_found[ MAX_CORNERS ];
float feature_errors[ MAX_CORNERS ];
CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_CORNERS ];
cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,
cornersA,cornersB,corner_count,cvSize( win_size,win_size ),
5,features_found,feature_errors,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
(framecounter<2)?0:CV_LKFLOW_PYR_B_READY);
// Now make some image of what we are looking at:
for( int i=0; i<corner_count; i++ ) {
if( features_found[i]==0|| feature_errors[i]>550 ) {
fprintf(stderr,"error=%f\n",feature_errors[i]);continue;
}
CvPoint p0 = cvPoint(cvRound( cornersA[i].x ),cvRound( cornersA[i].y ));
CvPoint p1 = cvPoint(cvRound( cornersB[i].x ),cvRound( cornersB[i].y ));
cvLine( imgC, p0, p1, CV_RGB(255,0,0), 1 );
}
cvShowImage("Test",imgC);
cvReleaseImage(&imgC);
cvReleaseImage(&eig_image);
cvReleaseImage(&tmp_image);
delete [] cornersA;
delete [] cornersB;
}
// DISPLAY PROCESSING RESULT
int key = cvWaitKey(delay)&0xff;
if (key==27){
break;
}else if (key==' '){
if (delay){ delay = 0; }else{ delay = 30; }
//.........这里部分代码省略.........
开发者ID:liangfu, 项目名称:pwp, 代码行数:101, 代码来源:test043_optflow.cpp
六六分期app的软件客服如何联系?不知道吗?加qq群【895510560】即可!标题:六六分期
阅读:18088| 2023-10-27
今天小编告诉大家如何处理win10系统火狐flash插件总是崩溃的问题,可能很多用户都不知
阅读:9616| 2022-11-06
今天小编告诉大家如何对win10系统删除桌面回收站图标进行设置,可能很多用户都不知道
阅读:8149| 2022-11-06
今天小编告诉大家如何对win10系统电脑设置节能降温的设置方法,想必大家都遇到过需要
阅读:8530| 2022-11-06
我们在使用xp系统的过程中,经常需要对xp系统无线网络安装向导设置进行设置,可能很多
阅读:8432| 2022-11-06
今天小编告诉大家如何处理win7系统玩cf老是与主机连接不稳定的问题,可能很多用户都不
阅读:9345| 2022-11-06
电脑对日常生活的重要性小编就不多说了,可是一旦碰到win7系统设置cf烟雾头的问题,很
阅读:8397| 2022-11-06
我们在日常使用电脑的时候,有的小伙伴们可能在打开应用的时候会遇见提示应用程序无法
阅读:7833| 2022-11-06
今天小编告诉大家如何对win7系统打开vcf文件进行设置,可能很多用户都不知道怎么对win
阅读:8387| 2022-11-06
今天小编告诉大家如何对win10系统s4开启USB调试模式进行设置,可能很多用户都不知道怎
阅读:7380| 2022-11-06
请发表评论