本文整理汇总了C++中cvResize函数的典型用法代码示例。如果您正苦于以下问题:C++ cvResize函数的具体用法?C++ cvResize怎么用?C++ cvResize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvResize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: detect_multiple1
int detect_multiple1( IplImage* img, CvRect* found_obj[], CvSize sz, int feature )
{
static CvRect face_rect;
IplImage *gray=NULL, *small_img=NULL;
int tot=0;
switch (feature) {
case FACE:
cascade=face_cascade;
break;
case NOSE:
cascade=nose_cascade;
break;
case MOUTH:
cascade=mouth_cascade;
break;
case EYE:
case LEYE:
cascade=leye_cascade;
break;
case REYE:
cascade=reye_cascade;
break;
case PROFILE_FACE:
cascade=profile_face_cascade;
break;
default:
cascade = face_cascade;
break;
}
if (feature == FACE) {
//cvResetImageROI(img);
if(gray) {
cvResetImageROI(gray);
cvReleaseImage(&gray);
}
if(small_img) {
cvResetImageROI(small_img);
cvReleaseImage(&small_img);
}
gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
cvRound (img->height/scale)), 8, 1 );
cvCvtColor( img, gray, CV_BGR2GRAY );
cvResize( gray, small_img, CV_INTER_LINEAR );
cvEqualizeHist( small_img, small_img );
}
else{
cvSetImageROI(small_img, face_rect);
// printf("found face (%d,%d,%d,%d) setting ROI to (%d,%d,%d,%d)\n",r->x,r->y,r->width,r->height,prev.x,prev.y,prev.width,prev.height);
}
cvClearMemStorage( storage );
// for(i=0;i<10;i++) {
// double t = (double)cvGetTickCount();
CvSeq* objects = cvHaarDetectObjects( small_img, cascade, storage,
1.1, 2, 0
//1.2, 0, 0
// |CV_HAAR_FIND_BIGGEST_OBJECT
// |CV_HAAR_DO_ROUGH_SEARCH
// |CV_HAAR_DO_CANNY_PRUNING
// |CV_HAAR_SCALE_IMAGE
,
sz);
//cvSize(30, 30) );
// t = (double)cvGetTickCount() - t;
// printf( "detection time = %gms, faces: %d\n", t/((double)cvGetTickFrequency()*1000.), faces->total );
tot=objects->total;
int i=0;
if(objects->total>0) {
if(tot > MAX_RECT) tot=MAX_RECT;
for(i=0; ((i < tot) &&( i < MAX_RECT)); i++) {
CvRect* r = (CvRect*)cvGetSeqElem( objects, i );
if(feature == FACE) {
if(i==0)
face_rect = cvRect(r->x, r->y, r->width, r->height);
}
else {
if(face_rect.width > 0 && face_rect.height > 0) {
r->x += face_rect.x;
r->y += face_rect.y;
}
}
found_obj[i]->x = (int)((double)r->x * scale);
found_obj[i]->y = (int)((double)r->y * scale);
found_obj[i]->width = (int)((double)r->width * scale);
found_obj[i]->height = (int)((double)r->height);
if(feature == FACE)
found_obj[i]->height = (int)((double)r->height * scale);
//.........这里部分代码省略.........
开发者ID:ashishlal,项目名称:iAngelEyes,代码行数:101,代码来源:facedetect.c
示例2: GetImageData
void GetImageData(int w,int h,int bpp,int channels,unsigned char *rawArray)
{
int nWidth,nHeight,nBpp;
unsigned char *ImgData = (unsigned char *)malloc(qhyusb->QCam.cameraW*qhyusb->QCam.cameraH*3*bpp/8);
#ifdef QHYCCD_DEBUG
printf("GetImageData:Malloc memory Size %d\n",qhyusb->QCam.cameraW*qhyusb->QCam.cameraH*3*bpp/8);
#endif
GetFrame(ImgData, &nWidth, &nHeight, &nBpp, NULL, NULL, NULL, NULL);
#ifdef QHYCCD_DEBUG
printf("GetImageData:nWidth %d nHeight %d nBpp %d\n",nWidth,nHeight,nBpp);
#endif
memcpy(rawArray, ImgData,nWidth*nHeight*nBpp/8);
if(bpp != nBpp)
{
if((bpp == 8) && (nBpp ==16))
{
int i = 1;
int j = 0;
while(j < w*h)
{
rawArray[j] = rawArray[i];
j += 1;
i += 2;
}
}
else if((bpp == 16) && (nBpp == 8))
{
int i = 1;
int j = 0;
unsigned char *tempArray = (unsigned char *)malloc(w*h*2);
memcpy(tempArray,rawArray,w*h);
while(j < w*h)
{
tempArray[i] = rawArray[j];
tempArray[i-1] = 0;
j += 1;
i += 2;
}
memcpy(rawArray,tempArray,w*h*2);
free(tempArray);
}
}
if(channels == 3)
{
IplImage *img = cvCreateImage(cvSize(nWidth,nHeight),bpp,1);
img->imageData = (char *)rawArray;
IplImage *colorimg = cvCreateImage(cvSize(nWidth,nHeight),bpp,channels);
if(qhyusb->QCam.CAMERA == DEVICETYPE_QHY5LII)
{
cvCvtColor(img,colorimg,CV_BayerGR2RGB);
}
memcpy(rawArray,colorimg->imageData,colorimg->imageSize);
cvReleaseImage(&img);
cvReleaseImage(&colorimg);
}
IplImage *img = cvCreateImage(cvSize(nWidth,nHeight),bpp,channels);
img->imageData = (char *)rawArray;
IplImage *rszimg = cvCreateImage(cvSize(w,h),bpp,channels);
cvResize(img,rszimg,CV_INTER_NN);
memcpy(rawArray,rszimg->imageData,rszimg->imageSize);
cvReleaseImage(&rszimg);
cvReleaseImage(&img);
free(ImgData);
}
开发者ID:ceterumnet,项目名称:QHYCCD_Linux,代码行数:76,代码来源:common.cpp
示例3: detect_and_draw
IplImage* detect_and_draw(IplImage* img, double scale = 1.3)
{
IplImage* img1;
char * str;
static CvScalar colors[] = {
{{0,0,255}}, {{0,128,255}},{{0,255,255}},{{0,255,0}},
{{255,128,0}},{{255,255,0}},{{255,0,0}}, {{255,0,255}}
}; //Just some pretty colors to draw with
// IMAGE PREPARATION:
//
IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
IplImage* small_img = cvCreateImage(
cvSize( cvRound(img->width/scale), cvRound(img->height/scale)), 8, 1);
cvCvtColor( img, gray, CV_BGR2GRAY );
cvResize( gray, small_img, CV_INTER_LINEAR );
cvEqualizeHist( small_img, small_img );
// DETECT OBJECTS IF ANY
//
cvClearMemStorage( storage );
fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
CvSeq* objects = cvHaarDetectObjects(
small_img,
cascade,
storage,
1.1,
2,
0 ,
cvSize(35, 35)
);
// LOOP THROUGH FOUND OBJECTS AND DRAW BOXES AROUND THEM
//
// for(int i = 0; i<(objects ? objects->total : 0); i++ )
fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
if( 0<(objects ? objects->total : 0))
{
CvRect* r = (CvRect*)cvGetSeqElem( objects, 0 );
cvSetImageROI(img,*r);
img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);
cvCopy(img,img1,NULL);
cvRectangle(
img,
cvPoint(r->x,r->y),
cvPoint(r->x+r->width,r->y+r->height),
colors[0]
);
cvResetImageROI(img);
HAND=1;
}
else
{
HAND=0;
img1=cvCreateImage(cvSize(100,100),img->depth,img->nChannels);
}
cvReleaseImage( &gray);
cvReleaseImage( &small_img );
return img1;
}
开发者ID:RokIrt,项目名称:HandGesture,代码行数:64,代码来源:cameraDSRimgMaha.cpp
示例4: cvQueryFrame
void *process2(void *arg)
{
do
{
frame2 = cvQueryFrame(capture2); // pointer to a cvCapture structure
if(!frame2)
break;
cvShowImage("video2", frame2);
roi2Image=cvCloneImage(frame2);
if ((orig2.x != dest2.x) && (orig2.y != dest2.y))
{ cvSetImageROI(roi2Image, cvRect(orig2.x<dest2.x?orig2.x:dest2.x, orig2.y<dest2.y?orig2.y:dest2.y,
abs(dest2.x-orig2.x),abs(dest2.y-orig2.y)));
roi2Adj = cvCreateImage(cvSize(abs(dest2.x-orig2.x)*input_resize_percent/100,
abs(dest2.y-orig2.y)*input_resize_percent/100),
roi2Image->depth, roi2Image->nChannels);
}
else
{
cvSetImageROI(roi2Image,cvRect(0,0,frame2->width,frame2->height));
roi2Adj = cvCreateImage(cvSize((int)((frame2->width*input_resize_percent)/100) , (int)((frame2->height*input_resize_percent)/100)),
frame2->depth, frame2->nChannels);
}
cvResize(roi2Image, roi2Adj, CV_INTER_LINEAR);
pthread_mutex_lock(&lock);
if (!changing && !analiza1)
{
Ncarros=detect(roi2Adj);
printf("Proceso 2 Numero de Carros: %d \n", Ncarros);
if (Ncarros>= 4)
{
reset=true;
pthread_create(&cambioSem,NULL,GreentoRed2,semaphore2);
analiza2=false;
analiza1=true;
}
}
if (timeout && !analiza1)
{
timeout=false;
pthread_create(&cambioSem,NULL,GreentoRed2,semaphore2);
analiza2=false;
analiza1=true;
}
pthread_mutex_unlock(&lock);
cvShowImage("image2", roi2Adj);
key = cvWaitKey(2);
if(key == KEY_ESC)
break;
usleep(10000);
}while(1);
cvDestroyAllWindows();
pthread_cancel(proc1);
pthread_cancel(timeOuts);
pthread_cancel(cambioSem);
cvReleaseImage(&frame1);
cvReleaseImage(&roi1Image);
cvReleaseImage(&roi1Adj);
cvReleaseImage(&semaphore1);
cvReleaseCapture(&capture1);
cvReleaseImage(&frame2);
cvReleaseImage(&roi2Image);
cvReleaseImage(&roi2Adj);
cvReleaseImage(&semaphore2);
cvReleaseCapture(&capture2);
cvReleaseHaarClassifierCascade(&cascade);
cvReleaseMemStorage(&storage);
finish=true;
return NULL;
}
开发者ID:jng756,项目名称:SEMAFORO-OPENCV,代码行数:83,代码来源:Operacion+threads+principales.c
示例5: main
int main(int argc, char* argv[]){
CCmdLine cmdLine;
cmdLine.SplitLine(argc, argv);
if ( !(cmdLine.HasSwitch("-i") && cmdLine.HasSwitch("-o") && (cmdLine.HasSwitch("-pos") || cmdLine.HasSwitch("-f") || cmdLine.HasSwitch("-pbOnly"))) ){
fprintf(stderr, "usage: %s -i <image> -o <output-directory> < -pos <x> <y> | -f <fixation-points-file> > [ -pb <probabilistic-boundary-prefix ] [ -flow <optical-flow-file> ] [ -sobel ]\n",argv[0]);
fprintf(stderr, "OR \t %s -pbOnly -i <image> -o <output-probabilistic-boundary-prefix>\n",argv[0]);
exit(1);
}
class segLayer frame1;
char tmp[80];
strcpy (tmp, cmdLine.GetArgument("-i", 0).c_str());
int64 tic1,tic2,tic3,tic4;
double ticFrequency = cvGetTickFrequency()*1000000;
tic1=cvGetTickCount();
IplImage *im=cvLoadImage(tmp), *im2;
#ifdef CUDA_SUPPORT
int maxWidth=640;
#else
int maxWidth=640;
#endif
bool resized=false;
float scale=1;
if(cvGetSize(im).width>maxWidth){
scale=maxWidth/(double)(cvGetSize(im).width);
printf("Image too big, resizing it for the segmentation...\n");
int newHeight=(int)(cvGetSize(im).height*scale);
im2=cvCreateImage( cvSize(maxWidth,newHeight), IPL_DEPTH_8U, 3 );
cvResize(im,im2);
resized=true;
}else{
im2=im;
}
frame1.setImage(im2);
if (cmdLine.HasSwitch("-pb")){
strcpy (tmp, cmdLine.GetArgument("-pb", 0).c_str());
frame1.readPbBoundary(tmp);
}else{
// Edge detection!
if (cmdLine.HasSwitch("-sobel"))
frame1.edgeSobel();
else{
#ifdef CUDA_SUPPORT
if(!get_lock()){
fprintf(stderr,"Impossible to get the lock...\n");
exit(1);
}
frame1.edgeGPU(false);
if(!release_lock()){
fprintf(stderr,"Impossible to release the lock...\n");
exit(1);
}
#else
frame1.edgeCGTG();
#endif
}
tic2=cvGetTickCount();
if (cmdLine.HasSwitch("-flow")){
strcpy (tmp, cmdLine.GetArgument("-flow", 0).c_str());
IplImage *flow=cvLoadImage(tmp);
IplImage *flow32 = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,3);
IplImage *flowU = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
IplImage *flowV = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
cvConvertScale(flow, flow32, 40/255.,-20);
cvSplit(flow32,flowU,NULL,NULL,NULL);
cvSplit(flow32,NULL,flowV,NULL,NULL);
frame1.setU(flowU);
frame1.setV(flowV);
cvReleaseImage(&flow);
cvReleaseImage(&flow32);
}
frame1.generatePbBoundary();
}
if (cmdLine.HasSwitch("-pbOnly")){
strcpy (tmp, cmdLine.GetArgument("-o", 0).c_str());
frame1.savePbBoundary(tmp);
}else{
frame1.allocateMemForContours();// Don't forget to allocate memory to store the region contours.
//select fixation point!
if(cmdLine.HasSwitch("-pos")){
float x,y;
sscanf(cmdLine.GetArgument("-pos", 0).c_str(),"%f",&x);
sscanf(cmdLine.GetArgument("-pos", 1).c_str(),"%f",&y);
frame1.assignFixPt((int)(x*scale), (int)(y*scale));
}else{
strcpy (tmp, cmdLine.GetArgument("-f", 0).c_str());
frame1.readFixPts(tmp,scale);
}
//segment
frame1.segmentAllFixs();
//.........这里部分代码省略.........
开发者ID:HongtaoYang,项目名称:x-inf3a,代码行数:101,代码来源:segment.cpp
示例6: main
//.........这里部分代码省略.........
apex2.y = cvRound(r->y + r->height);
cvRectangle (frame,apex1, apex2, colors[i % 8], 3, 8, 0);
//左目候補
if(i == 0){
lefteye_cand1 = *r;
}
if(i == 1){
lefteye_cand2 = *r;
}
}
//候補しぼり
if(righteye->total >= 1){
if(righteye->total >= 2){
if(righteye_cand1.x <= righteye_cand2.x){
right = righteye_cand1;
righteye_center.x = cvRound(right.x + right.width*0.5);
righteye_center.y = cvRound(right.y + right.height*0.5);
}
else{
right = righteye_cand2;
righteye_center.x = cvRound(right.x + right.width*0.5);
righteye_center.y = cvRound(right.y + right.height*0.5);
}
}
else{
right = righteye_cand1;
righteye_center.x = cvRound(right.x + right.width*0.5);
righteye_center.y = cvRound(right.y + right.height*0.5);
}
eyezone1 = cvCreateImage(cvSize(right.width, right.height), IPL_DEPTH_8U, 1);
cvGetRectSubPix(gray, eyezone1, cvPointTo32f(righteye_center));
cvEqualizeHist(eyezone1, eyezone1);
cvResize(eyezone1, minieyezone1, CV_INTER_LINEAR);
cvResize(minieyezone1, output1, CV_INTER_NN);
}
if(lefteye->total >= 1){
if(lefteye->total >= 2){
if(lefteye_cand1.x >= lefteye_cand2.x){
left = lefteye_cand1;
lefteye_center.x = cvRound(left.x + left.width*0.5);
lefteye_center.y = cvRound(left.y + left.height*0.5);
}
else{
left = lefteye_cand2;
lefteye_center.x = cvRound(left.x + left.width*0.5);
lefteye_center.y = cvRound(right.y + left.height*0.5);
}
}
else{
left = lefteye_cand1;
lefteye_center.x = cvRound(left.x + left.width*0.5);
lefteye_center.y = cvRound(left.y + left.height*0.5);
}
eyezone2 = cvCreateImage(cvSize(left.width, left.height), IPL_DEPTH_8U, 1);
cvGetRectSubPix(gray, eyezone2, cvPointTo32f(lefteye_center));
cvEqualizeHist(eyezone2, eyezone2);
cvResize(eyezone2, minieyezone2, CV_INTER_LINEAR);
cvResize(minieyezone2, output2, CV_INTER_NN);
}
printf("righteye width = %d, height = %d\n", right.width, right.height);
printf("lefteye width = %d, height = %d\n", left.width, left.height);
// printf("righteye x = %d\n", right.x);
开发者ID:k-n-k,项目名称:svm_eyetrack,代码行数:67,代码来源:bmpout.cpp
示例7: getHoGOpenMP
void getHoGOpenMP(IplImage* src, double* feat) {
IplImage* img = cvCreateImage(cvSize(RESIZE_X,RESIZE_Y), IPL_DEPTH_8U, 1);
cvResize(src, img);
const int width = RESIZE_X;
const int height = RESIZE_Y;
double hist[CELL_WIDTH][CELL_HEIGHT][CELL_BIN];
memset(hist, 0, CELL_WIDTH*CELL_HEIGHT*CELL_BIN*sizeof(double));
#pragma omp parallel for
for(int y=0; y<height; y++){
for(int x=0; x<width; x++){
if(x==0 || y==0 || x==width-1 || y==height-1){
continue;
}
double dx = img->imageData[y*img->widthStep+(x+1)] - img->imageData[y*img->widthStep+(x-1)];
double dy = img->imageData[(y+1)*img->widthStep+x] - img->imageData[(y-1)*img->widthStep+x];
double m = sqrt(dx*dx+dy*dy);
double deg = (atan2(dy, dx)+CV_PI) * 180.0 / CV_PI;
int bin = CELL_BIN * deg/360.0;
if(bin < 0) bin=0;
if(bin >= CELL_BIN) bin = CELL_BIN-1;
hist[(int)(x/CELL_X)][(int)(y/CELL_Y)][bin] += m;
}
}
#pragma omp parallel for
for(int y=0; y<BLOCK_HEIGHT; y++){
for(int x=0; x<BLOCK_WIDTH; x++){
double vec[BLOCK_DIM];
memset(vec, 0, BLOCK_DIM*sizeof(double));
//#pragma omp for
for(int j=0; j<BLOCK_Y; j++){
for(int i=0; i<BLOCK_X; i++){
for(int d=0; d<CELL_BIN; d++){
int index = j*(BLOCK_X*CELL_BIN) + i*CELL_BIN + d;
vec[index] = hist[x+i][y+j][d];
}
}
}
double norm = 0.0;
//#pragma omp for reduction(+:norm)
for(int i=0; i<BLOCK_DIM; i++){
norm += vec[i]*vec[i];
}
//#pragma omp for
for(int i=0; i<BLOCK_DIM; i++){
vec[i] /= sqrt(norm + 1.0);
}
//#pragma omp for
for(int i=0; i<BLOCK_DIM; i++){
int index = y*BLOCK_WIDTH*BLOCK_DIM + x*BLOCK_DIM + i;
feat[index] = vec[i];
}
}
}
cvReleaseImage(&img);
return;
}
开发者ID:IngenicC,项目名称:HandDetectorOpenMP,代码行数:63,代码来源:HandDetector.cpp
示例8: assert
void ASEF::ComputeEyeLocations(IplImage *img, CvRect bb){
assert(img->nChannels==1);
IplImage *roi = cvCreateImage(cvSize(this->nCols, this->nRows), img->depth, 1);
cvSetImageROI(img, bb);
cvResize(img, roi, CV_INTER_LINEAR);
cvResetImageROI(img);
IplImage *roi_64 = cvCreateImage(cvGetSize(roi), IPL_DEPTH_64F, 1);
cvConvertScale(roi, roi_64, 1./255., 0.);
FaceNormIllu::do_NormIlluRETINA(roi_64, this->face_real, 5.0);
cvMerge(this->face_real, this->face_im, 0, 0, this->complex_data);
// do DFT
cvDFT(this->complex_data, this->F, CV_DXT_FORWARD, this->complex_data->height);
// G left
cvMulSpectrums(this->F, this->LeftEyeDetector, this->Gl, CV_DXT_ROWS);
cvDFT(this->Gl, this->Gl, CV_DXT_INV_SCALE, this->Gl->height);
cvSplit(this->Gl, this->gl, 0, 0, 0);
// G right
cvMulSpectrums(this->F, this->RightEyeDetector, this->Gr, CV_DXT_ROWS);
cvDFT(this->Gr, this->Gr, CV_DXT_INV_SCALE, this->Gl->height);
cvSplit(this->Gr, this->gr,0,0,0);
// add both responses
double minV, maxV;
cvMinMaxLoc(this->gl, &minV, &maxV);
cvConvertScale(this->gl, this->gl, 1./(maxV-minV), -minV/(maxV-minV));
cvMinMaxLoc(this->gr, &minV, &maxV);
cvConvertScale(this->gr, this->gr, 1./(maxV-minV), -minV/(maxV-minV));
cvAdd(this->gl, this->gr, this->g);
cvMul(this->g, this->LeftEyeMask, this->gl);
cvMul(this->g, this->RightEyeMask, this->gr);
///////////////////////////////////////////////////
// Compute Eye Locations
///////////////////////////////////////////////////
float scale;
cvSetImageROI(this->gl, cvRect(0,0, this->nCols>>1, this->nRows>>1));
cvMinMaxLoc(this->gl, 0,0,0, &this->pEyeL);
cvResetImageROI(this->gl);
scale = (float)bb.width/(float)this->nCols;
this->pEyeL.x=cvRound((float)this->pEyeL.x * scale + bb.x);
this->pEyeL.y=cvRound((float)this->pEyeL.y * scale + bb.y);
cvSetImageROI(this->gr, cvRect(this->nCols>>1, 0, this->nCols>>1, this->nRows>>1));
cvMinMaxLoc(this->gr, 0,0,0, &this->pEyeR);
cvResetImageROI(this->gr);
scale = (float)bb.height/(float)this->nRows;
this->pEyeR.x=cvRound((float)(this->pEyeR.x + this->nCols*0.5)* scale + bb.x);
this->pEyeR.y=cvRound((float)this->pEyeR.y * scale + bb.y);
cvReleaseImage(&roi);
cvReleaseImage(&roi_64);
}
开发者ID:ILoveFree2,项目名称:ASEF,代码行数:80,代码来源:asef.cpp
示例9: cvPoint
void faceDetector::runFaceDetector(IplImage *input)
{
double t = (double)cvGetTickCount();
static tracker faceTracker;
static CvPoint fp1,fp2;
faceInformation.LT= cvPoint(0,0);
faceInformation.RB= cvPoint(0,0);
faceInformation.Width=0;
faceInformation.Height=0;
if (input==0)
return;
IplImage *gray, *small_img;
int i, j;
int scale=1;
gray = cvCreateImage( cvSize(input->width,input->height), 8, 1 );
small_img = cvCreateImage( cvSize( cvRound (input->width/scale),
cvRound (input->height/scale)), 8, 1 );
cvCvtColor( input, gray, CV_BGR2GRAY );
cvResize( gray, small_img, CV_INTER_LINEAR );
cvClearMemStorage( storage );
if ( cascade )
{
CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
1.4, 2, 0
// |CV_HAAR_FIND_BIGGEST_OBJECT
// |CV_HAAR_DO_ROUGH_SEARCH
|CV_HAAR_DO_CANNY_PRUNING
//|CV_HAAR_SCALE_IMAGE
,
cvSize(80/scale, 80/scale) );
int maxI=-1;
int max0=0;
for ( i = 0; i < (faces ? faces->total : 0); i++ )
{
CvRect* r = (CvRect*)cvGetSeqElem( faces, maxI);
if (max0<(r->width*r->height));
{
max0=(r->width*r->height);
maxI=i;
}
}
if (maxI!=-1)
{
CvRect* r = (CvRect*)cvGetSeqElem( faces, maxI);
faceInformation.LT.x=(r->x)*scale;
faceInformation.LT.y=(r->y)*scale;
faceInformation.RB.x=(r->x+ r->width)*scale;
faceInformation.RB.y=(r->y+ r->height)*scale;
faceInformation.Width=(r->width)*scale;
faceInformation.Height=(r->height)*scale;
IplImage *in=clipDetectedFace(input);
//faceTracker.setModel(in);
fp1=faceInformation.LT;
fp2=faceInformation.RB;
// cvRectangle( input, faceInformation.LT, faceInformation.RB, CV_RGB(255,0,0), 3, 8, 0 );
}
// else
// cvRectangle( input, faceInformation.LT, faceInformation.RB, CV_RGB(0,255,0), 3, 8, 0 );
}
cvReleaseImage(&gray);
cvReleaseImage(&small_img);
double t1 = (double)cvGetTickCount();
//printf( "detection time = %gms\n",(t1-t)/((double)cvGetTickFrequency()*1000.));
}
开发者ID:Nolaan,项目名称:pam-face-authentication,代码行数:82,代码来源:faceDetector.cpp
示例10: cos
//.........这里部分代码省略.........
{
CvPoint end = cvPoint(METER2PIXEL(laser[i].y) + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(laser[i].x));
if (laser[i].range == MAXIMUM_RANGE && laser[i-1].range == MAXIMUM_RANGE)
cvLine(_tmpImg, start, end, cvScalar(0,0,0));
if (laser[i].range < MAXIMUM_RANGE && laser[i-1].range < MAXIMUM_RANGE)
cvLine(_tmpImg, start, end, cvScalar(0,0,0));
start = end;
}
// draw the extremes
for (unsigned int i = 0; i < vEdge.size(); i++)
{
CvScalar color;
switch (vEdge[i].type)
{
case 'R':
color = cvScalar(0,0,255); // red
break;
case 'L':
color = cvScalar(255,0,0); // blue
break;
case 'r':
color = cvScalar(0,196,255); // yellow
break;
case 'l':
color = cvScalar(64,255,0); // green
break;
}
// draw min extremes
CvPoint center = cvPoint(METER2PIXEL(vEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(vEdge[i].p1.x));
cvCircle(_tmpImg, center, 2, color);
// draw max extremes
CvPoint c1 = cvPoint(METER2PIXEL(vEdge[i].p2.y) - 3 + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(vEdge[i].p2.x) - 3);
CvPoint c2 = cvPoint(METER2PIXEL(vEdge[i].p2.y) + 3 + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(vEdge[i].p2.x) + 3);
cvRectangle(_tmpImg, c1, c2, color);
}
}
// extract the horizontal lines of interest
vector< edge_t<point_t> > hEdge;
int temp = 1;
while ( temp > 0 ) { temp = getUpattern(vEdge, hEdge); }
temp = 1;
while ( _selectivity < 2 && temp > 0 ) { temp = getPpattern(vEdge, hEdge);}
temp = 1;
while ( _selectivity < 1 && temp > 0 ) { temp = getOpattern(vEdge, hEdge);}
// finally calculate distance and direction of each horizontal line
_target.clear();
vector< edge_t<point_t> >::iterator itend = hEdge.end();
for (vector< edge_t<point_t> >::iterator it = hEdge.begin(); it < itend; it++)
{
target_t t;
// the distance is an average between the two points
double xm = ((it->p1).x + (it->p2).x) / 2;
double ym = ((it->p1).y + (it->p2).y) / 2;
t.distance = sqrt(sqr(xm) + sqr(ym));
// left PI/2, right -PI/2
t.bearing = atan2(ym, xm);
// no height information of course...
t.pattern = it->type;
_target.push_back(t);
}
// final number of detected people
_howMany = _target.size();
// draw the last things for debugging
if (_debug)
{
// draw horizontal edges
for (unsigned int i = 0; i < hEdge.size(); i++)
{
CvPoint p1 = cvPoint(METER2PIXEL(hEdge[i].p1.y) + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(hEdge[i].p1.x));
CvPoint p2 = cvPoint(METER2PIXEL(hEdge[i].p2.y) + DEBUG_WINDOW_WIDTH/2,
METER2PIXEL(hEdge[i].p2.x));
// cvLine(_tmpImg, p1, p2, cvScalar(0,128,255), 2);
CvPoint pm = cvPoint((p1.x + p2.x) / 2, (p1.y + p2.y) / 2);
int thick;
if (hEdge[i].type == 'U')
thick = 3;
else if (hEdge[i].type == 'P')
thick = 2;
else
thick = 1;
cvLine(_tmpImg, cvPoint(DEBUG_WINDOW_WIDTH/2, 0), pm, cvScalar(0,128,255), thick);
}
cvFlip(_tmpImg, NULL, -1);
cvResize(_tmpImg, _debugImage, CV_INTER_NN);
cvShowImage("Legs detector", _debugImage);
if (_delay)
cvWaitKey(_delay); // handles event processing of HIGHGUI library
}
return;
}
开发者ID:HVisionSensing,项目名称:lirec,代码行数:101,代码来源:LegsDetector.cpp
示例11: main
int main() {
CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
int tempwidth,tempheight;
CvRect regt,rectROIbot,rectROItop;
rectROItop=cvRect(0,0,80,10);
rectROIbot=cvRect(0,50,80,10);
CvPoint b_cir_center,t_cir_center;
CvPoint frame_center;
CvPoint A,B,C,D;
CvPoint temp;
double angle,spinsize;
int cir_radius=1;
int frame_width=160, frame_height=120;
IplImage* frame;
IplImage* threshframe;
IplImage* hsvframe;
IplImage* threshframebot;
IplImage* threshframetop;
IplImage* modframe;
IplImage* dilframetop;
IplImage* dilframebot;
int moddiv=2,seq=0,seqdiv=2;
int release=0, rmax=100;
int modfheight, modfwidth;
unsigned char sendBuf;/*
int serial;
serial = openSerial("/dev/ttyACM0");
if (serial == -1)
serial = openSerial("/dev/ttyACM1");
if (serial == -1)
serial = openSerial("/dev/ttyACM2");
if (serial == -1)
serial = openSerial("/dev/ttyACM3");
if (serial == -1)
serial = openSerial("/dev/ttyACM4");
if (serial == -1)
serial = openSerial("/dev/ttyACM5");
if (serial == -1)
serial = openSerial("/dev/ttyACM6");
if (serial == -1)
serial = openSerial("/dev/ttyACM7");
if (serial == -1)
serial = openSerial("/dev/ttyACM8");
if( serial == -1 ) {
return -1;
}*/
//CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
CvCapture* capture = cvCaptureFromCAM( 1 );
if ( !capture ) {
fprintf(stderr, "ERROR: capture is NULL \n" );
getchar();
return -1;
}
cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160
cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
// cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10);
// cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5);
// Create a window in which the captured images will be presented
cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
// Show the image captured from the camera in the window and repeat
while ( 1 ) {
// Get one frame
frame = cvQueryFrame( capture );
if ( !frame ) {
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
modfheight = frame->height;
modfwidth = frame->width;
modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
cvResize(frame, modframe,CV_INTER_LINEAR);
// create HSV(Hue, Saturation, Value) frame
hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
// cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
cvInRangeS(hsvframe,cvScalar(70, 180, 40),cvScalar(100, 230, 90),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
threshframebot=cvCloneImage(threshframe);
cvSetImageROI(threshframebot,rectROIbot);
threshframetop=cvCloneImage(threshframe);
cvSetImageROI(threshframetop,rectROItop);
//////////////////////////////////////////////////////////////////////////////////////////
if (seq==0) {
threshframebot=cvCloneImage(threshframe);
cvSetImageROI(threshframebot,rectROIbot);
dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,
// tempwidth=cvGetSize(dilframebot).width;
// tempheight=cvGetSize(dilframebot).height;
// printf("dilframe: %d, %d \n",tempwidth,tempheight);
CBlobResult blobs_bot;
blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
//.........这里部分代码省略.........
开发者ID:bhuneshwar21,项目名称:AUV,代码行数:101,代码来源:opt1.cpp
示例12: cvReleaseImage
bool CxFaceAnalyzer::Face_Detection(IplImage *pImgSrc,int nMin_FaceSize = 80, char *ThumbnailImgFilename=NULL)
{
if(pImgSrc == NULL) return false;
if( m_pImgGray != NULL )
cvReleaseImage( &m_pImgGray );
m_pImgGray = cvCreateImage(cvGetSize(pImgSrc), IPL_DEPTH_8U, 1);
if( pImgSrc->nChannels == 4 )
cvCvtColor( pImgSrc, m_pImgGray, CV_BGRA2GRAY );
if( pImgSrc->nChannels == 3 )
cvCvtColor( pImgSrc, m_pImgGray, CV_BGR2GRAY );
if( pImgSrc->nChannels == 1 )
cvCopy( pImgSrc, m_pImgGray );
SetFaceSizeRange(m_nFaceDetectorNo, nMin_FaceSize, pImgSrc->width*0.5);
// do face tracking
//m_face_num = m_Facedetector->detect( m_pImgGray, m_rects, MAX_FACES);
FdRect FaceArea[MAX_FACES];
//m_face_num = FrontalView_FaceDetection(m_nFaceDetectorNo, m_pImgGray, FaceArea);
m_face_num = FrontalView_ColorImage_FaceDetection(m_nFaceDetectorNo, m_pImgGray, FaceArea, 0);
for(int i=0;i<m_face_num;i++)
{
m_rects[i].rc.x = FaceArea[i].x;
m_rects[i].rc.y = FaceArea[i].y;
m_rects[i].rc.width = FaceArea[i].width;
m_rects[i].rc.height = FaceArea[i].height;
m_rects[i].angle = FaceArea[i].view;
}
ClearFaceSizeRange(m_nFaceDetectorNo);
//detect and recognize each face
int nLargest_Face_Size = -1;
int nLargest_ID = -1;
bool bLandmark;
CvRect rect;
CvPoint2D32f* landmark6 ;
int angle;
for( int i=0; i < m_face_num; i++ )
{
m_cutface_flag[i] = 0;
// init
// get face rect and id from tracker
rect = m_rects[i].rc;
angle = m_rects[i].angle;
if(rect.x+rect.width > m_pImgGray->width || rect.x < 0) continue;
if(rect.y+rect.height > m_pImgGray->height || rect.y < 0) continue;
if(rect.width<nMin_FaceSize) continue;
// detect landmark
landmark6 = m_ldmks[i];
bLandmark = false;
bLandmark = m_plandmarkDetector->detect( m_pImgGray, &rect, landmark6, NULL, angle );
if(bLandmark ==false) continue;
if(rect.width> nLargest_Face_Size)
{
nLargest_Face_Size = rect.width;
nLargest_ID = i;
}
}
if(nLargest_ID>-1)
{
landmark6 = m_ldmks[nLargest_ID];
rect = m_rects[nLargest_ID].rc;
alignFace2(m_pImgGray, landmark6, &rect, m_cutface_big->width, m_cutface_big->height, false, m_age_sclxyud, m_cutface_big);
cvResize(m_cutface_big, m_cutface_small);
IplImage *lpTest = alignFace3(pImgSrc, landmark6, &rect, m_cutface_big->width * 2, m_cutface_big->height * 2, false, m_age_sclxyud, NULL);
cvSaveImage(ThumbnailImgFilename,lpTest);
cvReleaseImage(&lpTest);
}
cvReleaseImage(&m_pImgGray);
m_pImgGray = NULL;
if(nLargest_ID>-1)
return true;
else return false;
}
开发者ID:ruyiweicas,项目名称:FaceSmileAgeSex_Detection,代码行数:85,代码来源:cxfaceanalyzer.cpp
示例13: main
int main(int argc, char** argv)
{
// GLOBAL SETTINGS
static int framecounter=0;
const CvSize imsize = cvSize(320,240);
int delay = 0;
const int win_size = 10;
CvSize pyr_sz = cvSize( imsize.width+8, imsize.height/3 );
IplImage * pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage * pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage * rawImage_resized = cvCreateImage( imsize, IPL_DEPTH_8U, 3);
cvNamedWindow("Test");
CvGenericTracker tracker;
// LOAD INPUT FILE
CvCapture * capture = NULL;
if (argc==1) {
capture = cvCreateCameraCapture(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
}else{
capture = cvCreateFileCapture(argv[1]);
}
if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");return 0;}
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);
// START ENDLESS LOOP
while(1)
{
// GET NEXT FRAME
if (1){
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
}else{
framecounter++;
}
IplImage * rawImage = cvQueryFrame(capture);
cvResize(rawImage,rawImage_resized);
if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
if (tracker.initialized()){
tracker.update(rawImage_resized);
}else{
tracker.initialize(rawImage_resized);
tracker.m_framecounter=framecounter;
}
// START PROCESSING HERE
{
// Initialize, load two images from the file system, and
// allocate the images and other structures we will need for
// results.
CvMat * imgA = tracker.m_currImage;
IplImage * imgB = tracker.m_nextImage;
IplImage * imgC = cvCloneImage(rawImage_resized);
// The first thing we need to do is get the features
// we want to track.
IplImage * eig_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
IplImage * tmp_image = cvCreateImage( imsize, IPL_DEPTH_32F, 1 );
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
// Call the Lucas Kanade algorithm
char features_found[ MAX_CORNERS ];
float feature_errors[ MAX_CORNERS ];
CvPoint2D32f * cornersB = new CvPoint2D32f[ MAX_CORNERS ];
cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,
cornersA,cornersB,corner_count,cvSize( win_size,win_size ),
5,features_found,feature_errors,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
(framecounter<2)?0:CV_LKFLOW_PYR_B_READY);
// Now make some image of what we are looking at:
for( int i=0; i<corner_count; i++ ) {
if( features_found[i]==0|| feature_errors[i]>550 ) {
fprintf(stderr,"error=%f\n",feature_errors[i]);continue;
}
CvPoint p0 = cvPoint(cvRound( cornersA[i].x ),cvRound( cornersA[i].y ));
CvPoint p1 = cvPoint(cvRound( cornersB[i].x ),cvRound( cornersB[i].y ));
cvLine( imgC, p0, p1, CV_RGB(255,0,0), 1 );
}
cvShowImage("Test",imgC);
cvReleaseImage(&imgC);
cvReleaseImage(&eig_image);
cvReleaseImage(&tmp_image);
delete [] cornersA;
delete [] cornersB;
}
// DISPLAY PROCESSING RESULT
int key = cvWaitKey(delay)&0xff;
if (key==27){
break;
}else if (key==' '){
if (delay){ delay = 0; }else{ delay = 30; }
//.........这里部分代码省略.........
开发者ID:liangfu,项目名称:pwp,代码行数:101,代码来源:test043_optflow.cpp
示例14: detect_and_draw
// Function to detect and draw any faces that is present in an image
void detect_and_draw( IplImage* temp )
{
IplImage *grey = cvCreateImage(cvGetSize(temp), 8, 1);
cvCvtColor(temp, grey, CV_RGB2GRAY);
IplImage* face = cvCreateImage(cvSize(100,100), 8, 1);
IplImage *faces_hist[NUM_FACES];
int i,j;
for(i=0;i<NUM_FACES;i++) {
faces_hist[i] = cvCreateImage(cvSize(100,100), 8, 1);
cvZero(faces_hist[i]);
}
cvZero(face);
// Create two points to represent the face locations
CvPoint pt1, pt2, e_pt1, e_pt2;
// Clear the memory storage which was used before
cvClearMemStorage( storage );
// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{
// There can be more than one face in an image. So create a growable sequence of faces.
// Detect the objects and store them in the sequence
CvSeq* faces = cvHaarDetectObjects( grey, cascade, storage,
1.1, 2, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40) );
// Loop the number of faces found.
if (faces)
printf("Number of faces: %d\n", faces->total);
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
// Create a new rectangle for drawing the face
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
// Find the dimensions of the face,and scale it if necessary
pt1.x = r->x;
pt2.x = (r->x+r->width);
pt1.y = r->y;
pt2.y = (r->y+r->height);
cvSetImageROI(grey, cvRect(pt1.x, pt1.y, r->width, r->height));
CvSeq* eyes = cvHaarDetectObjects(grey, cascade_eyes, storage, 1.1, 5, 0, cvSize(25,15));
printf("Eyes: %p num: %d\n", eyes, eyes->total);
for( j=0;j < (eyes ? eyes->total : 0); j++ ) {
CvRect *e = (CvRect*)cvGetSeqElem(eyes, j);
e_pt1.x = e->x;
e_pt2.x = (e->x+e->width);
e_pt1.y = e->y;
e_pt2.y = (e->y+e->height);
cvRectangle(grey, e_pt1, e_pt2, CV_RGB(255,255,255), 3, 8, 0);
}
cvResize(grey, face, CV_INTER_LINEAR);
cvResetImageROI(grey);
if (i < NUM_FACES)
cvEqualizeHist(face, faces_hist[i]);
// Draw the rectangle in the input image
cvRectangle( grey, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
}
}
// Show the image in the window named "result"
//cvShowImage( "result", temp );
cvShowManyImages("result", 6, temp, grey, face, faces_hist[0], faces_hist[1], faces_hist[2]);
// Release the temp image created.
cvReleaseImage( &face );
cvReleaseImage( &grey );
for(i=0;i<NUM_FACES;i++)
cvReleaseImage(&faces_hist[i]);
}
开发者ID:atiti,项目名称:OFR,代码行数:79,代码来源:facerecog.c
示例15: camera_control_query_frame
IplImage *
camera_control_query_frame(CameraControl* cc)
{
IplImage* result;
#if defined(CAMERA_CONTROL_USE_CL_DRIVER)
// assign buffer-pointer to address of buffer
cvGetRawData(cc->frame4ch, &cc->pCapBuffer, 0, 0);
CLEyeCameraGetFrame(cc->camera, cc->pCapBuffer, 2000);
// convert 4ch image to 3ch image
const int from_to[] = { 0, 0, 1, 1, 2, 2 };
const CvArr** src = (const CvArr**) &cc->frame4ch;
CvArr** dst = (CvArr**) &cc->frame3ch;
cvMixChannels(src, 1, dst, 1, from_to, 3);
result = cc->frame3ch;
#else
long start = psmove_util_get_ticks();
result = cvQueryFrame(cc->capture);
psmove_DEBUG("cvQueryFrame: %ld ms\n", psmove_util_get_ticks() - start);
#endif
#if defined(PSMOVE_USE_DEINTERLACE)
/**
* Dirty hack follows:
* - Clone image
* - Hack internal variables to make an image of all odd lines
**/
IplImage *tmp = cvCloneImage(result);
tmp->imageData += tmp->widthStep; // odd lines
tmp->widthStep *= 2;
tmp->height /= 2;
/**
* Use nearest-neighbor to be faster. In my tests, this does not
* cause a speed disadvantage, and tracking quality is still good.
*
* This will scale the half-height image "tmp" to the original frame
* size by doubling lines (so we can still do normal circle tracking).
**/
cvResize(tmp, result, CV_INTER_NN);
/**
* Need to revert changes in tmp from above, otherwise the call
* to cvReleaseImage would cause a crash.
**/
tmp->height = result->height;
tmp->widthStep = result->widthStep;
tmp->imageData -= tmp->widthStep; // odd lines
cvReleaseImage(&tmp);
#endif
// undistort image
if (cc->mapx && cc->mapy) {
cvRemap(result, cc->frame3chUndistort,
cc->mapx, cc->mapy,
CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS,
cvScalarAll(0));
result = cc->frame3chUndistort;
}
return result;
}
开发者ID:fredoliveira,项目名称:psmoveapi,代码行数:65,代码来源:camera_control.c
示例16: process_and_show_images
//.........这里部分代码省略.........
cvPoint ((int) track_x_kalman (i, current_frame) + track_w_kalman (i, current_frame),
(int) track_y_kalman (i, current_frame) + track_h_kalman (i, current_frame)), CV_RGB (255,0,0), 2, 8, 0);
#endif
if (contcam[i] < 5) /* wait for having 5 values */
contcam[i]++;
if (contcam[i] == 5) {/* save stats */
/* get the current mean */
acumcam_x[i] = 0.0;
acumcam_y[i] = 0.0;
for (j = 0; j< 5; j++) {
acumcam_x[i] += track_px_global_kalman (i, current_frame - j);
acumcam_y[i] += track_py_global_kalman (i, current_frame - j);
}
px_global_mean_5 (i, current_frame) = acumcam_x[i] / (float) contcam[i];
py_global_mean_5 (i, current_frame) = acumcam_y[i] / (float) contcam[i];
for (j = 0; j < 5; j++) {
px_global_var_5 (i, current_frame) +=
|
请发表评论