本文整理汇总了C++中cvResetImageROI函数 的典型用法代码示例。如果您正苦于以下问题:C++ cvResetImageROI函数的具体用法?C++ cvResetImageROI怎么用?C++ cvResetImageROI使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvResetImageROI函数 的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: main
void main()
{
// capture image
IplImage* image1 = cvLoadImage(IMAGE_FILE_NAME_1);
IplImage* image2 = cvLoadImage(IMAGE_FILE_NAME_2);
IplImage* grayImage1 = cvCreateImage(cvGetSize(image1), IPL_DEPTH_8U, 1);
IplImage* grayImage2 = cvCreateImage(cvGetSize(image2), IPL_DEPTH_8U, 1);
IplImage* resultImage = cvCreateImage(cvSize(image1->width*2, image1->height), IPL_DEPTH_8U, 3);
cvCvtColor(image1, grayImage1, CV_BGR2GRAY);
cvCvtColor(image2, grayImage2, CV_BGR2GRAY);
windage::Algorithms::FeatureDetector* detector = new windage::Algorithms::SIFTGPUdetector();
windage::Algorithms::SearchTree* tree = new windage::Algorithms::FLANNtree();
windage::Algorithms::HomographyEstimator* estimator = new windage::Algorithms::RANSACestimator();
windage::Algorithms::OutlierChecker* checker = new windage::Algorithms::OutlierChecker();
std::vector<windage::FeaturePoint>* feature = NULL;
std::vector<windage::FeaturePoint> feature1;
std::vector<windage::FeaturePoint> feature2;
std::vector<windage::FeaturePoint> matching1;
std::vector<windage::FeaturePoint> matching2;
tree->SetRatio(0.3);
estimator->AttatchReferencePoint(&matching1);
estimator->AttatchScenePoint(&matching2);
estimator->SetReprojectionError(REPROJECTION_ERRPR);
checker->AttatchEstimator(estimator);
checker->SetReprojectionError(REPROJECTION_ERRPR);
cvNamedWindow("result");
bool processing = true;
while(processing)
{
feature1.clear();
feature2.clear();
matching1.clear();
matching2.clear();
detector->DoExtractKeypointsDescriptor(grayImage1);
feature = detector->GetKeypoints();
for(unsigned int i=0; i<feature->size(); i++)
{
feature1.push_back((*feature)[i]);
}
detector->DoExtractKeypointsDescriptor(grayImage2);
feature = detector->GetKeypoints();
for(unsigned int i=0; i<feature->size(); i++)
{
feature2.push_back((*feature)[i]);
}
Matching(tree, &feature1, &feature2, &matching1, &matching2);
estimator->Calculate();
checker->Calculate();
cvSetImageROI(resultImage, cvRect(0, 0, image1->width, image1->height));
cvCopyImage(image1, resultImage);
cvSetImageROI(resultImage, cvRect(image1->width, 0, image1->width, image1->height));
cvCopyImage(image2, resultImage);
cvResetImageROI(resultImage);
int count = (int)matching1.size();
for(int i=0; i<count; i++)
{
double R = (count - i)/(double)count * 255.0;
double G = (i+1)/(double)count * 255.0;
if(matching1[i].IsOutlier() == false)
cvLine(resultImage, cvPoint((int)matching1[i].GetPoint().x, (int)matching1[i].GetPoint().y), cvPoint(image1->width + (int)matching2[i].GetPoint().x, (int)matching2[i].GetPoint().y), CV_RGB(0, 255, 0));
else
cvLine(resultImage, cvPoint((int)matching1[i].GetPoint().x, (int)matching1[i].GetPoint().y), cvPoint(image1->width + (int)matching2[i].GetPoint().x, (int)matching2[i].GetPoint().y), CV_RGB(255, 0, 0));
}
cvShowImage("result", resultImage);
char ch = cvWaitKey(1);
switch(ch)
{
case 's':
case 'S':
cvSaveImage("FeaturePairMatching.png", resultImage);
break;
case 'q':
case 'Q':
case 27:
processing = false;
break;
}
}
cvDestroyAllWindows();
}
开发者ID:Barbakas, 项目名称:windage, 代码行数:97, 代码来源:main.cpp
示例2: recognize
void recognize(IplImage* original_images, IplImage* processed_original_images, IplImage** templates)
{
double matching_result = 0;
double matching_max = 0;
double matching_first_max = 0;
int template_flag = -1;
IplImage* drawed_original_images = cvCloneImage(original_images);
//extract the sub images first and do the matching image by image.
sub_image* sub_images = extract_sub_image(processed_original_images,MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);
int ori_flag = 0;
while(sub_images != NULL)
{
//different original sub image to be matched.
printf("ori_flag = %d\n",ori_flag++);
for(int i = 0; i < TEMPLATES_NUM; i++)
{
//match the given image with the provided templates.
//extract the template as well to cut the black edges which might leads negative impacts to the matching.
sub_image* temp_template = extract_sub_image(templates[i],MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);
//match the original sub image with template.
matching_result = template_original_match(sub_images->image, temp_template->image);
printf("with template %d, result = %f\n",i,matching_result);
//find the maximum
if(i == 0)
{
matching_first_max = matching_result;
matching_max = matching_result;
}
else
{
if(matching_result > matching_max)
{
matching_max = matching_result ;
template_flag = i;
}
}
}
if(matching_first_max == matching_max)
{
template_flag = 0;
}
//if the object not found or the match result is not reasonable, do nothing.
if(template_flag == -1 || matching_max < MATCHING_FALIED_THRESHOLD)
{
printf("image not matched\n");
}
else
{
//draw the matched template on the original image.
IplImage* drawed_templates_resized = cvCreateImage(cvSize(30,30),processed_original_images->depth,processed_original_images-> nChannels);
sub_image* min_temp_template = extract_sub_image(templates[template_flag],MIN_SUB_IMAGE_WIDTH,MIN_SUB_IMAGE_HEIGHT);
cvResize(min_temp_template->image,drawed_templates_resized);
printf("draw left = %d, top = %d\n",sub_images->image_left,sub_images->image_top);
//draw the template.
cvSetImageROI(drawed_original_images, cvRect(sub_images->image_left, sub_images->image_top, drawed_templates_resized->width, drawed_templates_resized->height));
cvCopy(drawed_templates_resized, drawed_original_images);
cvResetImageROI(drawed_original_images);
}
sub_images = sub_images->next_image;
}
cvShowImage("result",drawed_original_images);
}
开发者ID:hahalaugh, 项目名称:ComputerVision, 代码行数:72, 代码来源:road_signs.cpp
示例3: cvLoadImage
//.........这里部分代码省略.........
}
}
for (ILR ci = line.begin();ci != line.end();ci++)
{
int rate = ((double)ci->width/(double)ci->height) /((double)avgw/(double)avgh*2);
rate++;
if (rate > 1)
{
int x = ci->x;
int y = ci->y;
int h = ci->height;
int w = ci->width;
ci = line.erase(ci);
for(int a = rate;a > 0 ;a--)
{
CvRect add = {x+w/rate*(rate-a),y,w/rate,h};
if (ci == line.end())
{
line.push_back(add);
ci = line.end();
}
else
{
line.insert(ci,add);
}
}
ci--;
}
}
int c = 0;
i = line.begin();
IplImage* imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1);
cvSetImageROI(img_check, *i);
cvCopyImage(img_check, imgNo);
cvResetImageROI(img_check);
char temp;
temp = classify(imgNo,0);
printf("%c",temp);
c = c*10 + classify(imgNo,0);
output += temp;
//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
int lastX = i->x+i->width;
lastY = i->y;
for (i++;i != line.end(); ++i)
{
buu++;
if (i->x - lastX > (countSpace / (line.size() - 1)))
{
/*
//cvDrawRect(imgSrc, cvPoint(lastX, lastY), cvPoint(lastX + avgw, lastY + avgh), CV_RGB(255, 0, 0));
CvRect space = {lastX, lastY, avgw, avgh};
imgNo = cvCreateImage(cvSize(avgw, avgh), IPL_DEPTH_8U, 1);
cvSetImageROI(img_check, space);
cvCopyImage(img_check, imgNo);
cvResetImageROI(img_check);
temp = classify(imgNo,0);
c = c*10 + classify(imgNo,0);
output += temp;
imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1);
cvSetImageROI(img_check, *i);
cvCopyImage(img_check, imgNo);
cvResetImageROI(img_check);
temp = classify(imgNo,0);
*/
printf(" ",temp);
}
lastX = i->x+i->width;
lastY = i->y;
imgNo = cvCreateImage(cvSize(i->width, i->height), IPL_DEPTH_8U, 1);
cvSetImageROI(img_check, *i);
cvCopyImage(img_check, imgNo);
cvResetImageROI(img_check);
temp = classify(imgNo,0);
printf("%c",temp);
c = c*10 + classify(imgNo,0);
output += temp;
//char szName[56] = {0};
//sprintf(szName, "%d", idx++);
//cvNamedWindow(szName);
//cvShowImage(szName, imgNo);
//cvDrawRect(imgSrc, cvPoint(i->x, i->y), cvPoint(i->x + i->width, i->y + i->height), CV_RGB(255, 0, 0));
}
output += "\n";
printf("\n");
//printf("%d\n",c);
}
printf("轮廓个数:%d",++buu);
/*
cvNamedWindow("src");
cvShowImage("src", imgSrc);
cvWaitKey(0);
cvReleaseMemStorage(&storage);
cvReleaseImage(&imgSrc);
cvReleaseImage(&img_gray);
cvDestroyAllWindows();
*/
return output;
}
开发者ID:kanhaolong, 项目名称:OCR-ASCII, 代码行数:101, 代码来源:basicOCR.cpp
示例4: cvGetSize
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
CvSize size = cvGetSize(srcImage);
IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);
CvPoint2D32f srcQuad[4], dstQuad[4];
CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
CvMat* result = cvCreateMat(3, 1, CV_32FC1);
CvMat* dst = cvCreateMat(3, 1,CV_32FC1);
int keyButtonCornerCount = 316;
cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
cvSetImageROI(grayImage, rect);
cvSetImageROI(mask, rect);
cvSetImageROI(dstImage, rect);
cvSetImageROI(mask2, rect);
// 150~255사이의 값만 추출해서 마스크에 저장
cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
cvCopy(mask, mask2);
//cvShowImage("mask", mask);
//cvShowImage("mask2", mask2);
// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
cvXor(mask2, mask, dstImage);
//cvShowImage("mask3", mask);
//cvShowImage("mask4", mask2);
//cvShowImage("dstImage", dstImage);
// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
cvResetImageROI(dstImage);
for(int i =0 ; i < 316 ; i++){
keyButtonCorner[i].x += rect.x;
keyButtonCorner[i].y += rect.y;
}
initKeyButtonCorner();
srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
dstQuad[CLOCKWISE_1].x = 640;
dstQuad[CLOCKWISE_1].y = 0;
dstQuad[CLOCKWISE_5].x = 640;
dstQuad[CLOCKWISE_5].y = 480;
dstQuad[CLOCKWISE_7].x = 0;
dstQuad[CLOCKWISE_7].y = 480;
dstQuad[CLOCKWISE_11].x = 0;
dstQuad[CLOCKWISE_11].y = 0;
cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
cvWarpPerspective(dstImage, veriImage, warp_matrix);
detectKeyButtonCorner(veriImage);
cvInvert(warp_matrix, warp_matrix_invert);
for(int i = 0 ; i < 316 ; i++){
cvmSet(dst, 0, 0, keyButtonCorner[i].x);
cvmSet(dst, 1, 0, keyButtonCorner[i].y);
cvmSet(dst, 2, 0, 1);
cvMatMul(warp_matrix_invert, dst, result);
float t = cvmGet(result, 2,0);
keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
}
cvResetImageROI(srcImage);
cvResetImageROI(mask);
cvReleaseImage(&eigImage);
cvReleaseImage(&tempImage);
cvReleaseImage(&grayImage);
cvReleaseImage(&veriImage);
cvReleaseImage(&dstImage);
cvReleaseImage(&mask);
cvReleaseImage(&mask2);
cvReleaseMat(&warp_matrix);
cvReleaseMat(&warp_matrix_invert);
cvReleaseMat(&result);
cvReleaseMat(&dst);
}
开发者ID:FingerKeyboard-jh-sm, 项目名称:FingerKeyboard, 代码行数:98, 代码来源:FkPaperKeyboard_TypeA.cpp
示例5: drawSquaresAndCrop
// the function draws all the squares in the image and crop and save images
void drawSquaresAndCrop(char *pFileName, IplImage* imgFilter, IplImage* img, CvSeq* squares )
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage( imgFilter );
IplImage* cpyc = cvCloneImage( imgFilter );
int i;
char sFileNameCroped[255];
// initialize reader of the sequence
cvStartReadSeq( squares, &reader, 0 );
// read 4 sequence elements at a time (all vertices of a square)
for(int iCnt=0, i = 0; i < squares->total; i += 4,iCnt++ )
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM( pt[0], reader );
CV_READ_SEQ_ELEM( pt[1], reader );
CV_READ_SEQ_ELEM( pt[2], reader );
CV_READ_SEQ_ELEM( pt[3], reader );
// draw the square as a closed polyline
cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
// Get Area to crop
CvRect rc = GetRect(pt);
// Filter the area full image
if (abs(rc.width-img->width)>POINTS_NEAR ||
abs(rc.height-img->height)>POINTS_NEAR){
// Draw area
CvPoint pt1, pt2;
pt1.x = rc.x;
pt1.y = rc.y;
pt2.x = pt1.x+rc.width;
pt2.y = pt1.y+rc.height;
cvRectangle(cpy, pt1, pt2, CV_RGB(0,0,255),2);
// sets the Region of Interest
// Note that the rectangle area has to be __INSIDE__ the image
cvSetImageROI(cpyc, rc);
// create destination image
// Note that cvGetSize will return the width and the height of ROI
IplImage *img1 = cvCreateImage(cvGetSize(cpyc),
cpyc->depth,
cpyc->nChannels);
// copy subimage
cvCopy(cpyc, img1, NULL);
// save file
char stype[32];
char sFile[255];
strcpy(sFile, pFileName);
strcpy(stype, &(pFileName[strlen(pFileName)-3]));
sFile[strlen(pFileName)-4]=NULL;
sprintf(sFileNameCroped, "%s_%d.%s", sFile,iCnt,stype);
cvSaveImage(sFileNameCroped, img1);
// always reset the Region of Interest
cvResetImageROI(img1);
}
}
// show the resultant image
cvShowImage( wndname, cpy );
cvReleaseImage( &cpy );
cvReleaseImage( &cpyc );
}
开发者ID:AnthonyNystrom, 项目名称:Pikling, 代码行数:73, 代码来源:Croper.cpp
示例6: cvSetImageROI
void WindscreenLocator::locateBottomTop()
{
cvSetImageROI(imgGradH, cvRect(winLeft, 0, winRight - winLeft, imgGradH->height));
histoStat(imgGradH, NULL, horizon2Y, thresholdGrad, 0);
cvResetImageROI(imgGradH);
histoSmooth(horizon2Y, imgGrad->height, 5);
histoSmooth(horizon2Y, imgGrad->height, 10);
histoSmooth(horizon2Y, imgGrad->height, 10);
int yCrest[20];
int yCrestNr = 20;
histoCrest(horizon2Y, imgGrad->height, yCrest, yCrestNr);
int crest = 0;
bool usePrediction = false;
winTop = 0;
winBottom = imgRGB->height;
// if(plate->isValid() && reviser != NULL){
if(plate->isValid()
&& (plate->getMiddlePoint().x > winLeft
&& plate->getMiddlePoint().x < winRight)){
int plateWidth = plate->getPlateWidth();
if(reviser && reviser->getPlateWidth() > 0)
plateWidth = reviser->getPlateWidth();
CvPoint mid = plate->getMiddlePoint();
if(winRight - winLeft > 4.5 * plateWidth)
winBottom = mid.y - 80 / 35.0 * plateWidth;
else
winBottom = mid.y - 60.0/35.0 * plateWidth;
// crest = findLikelytCrest(horizon2Y, imgGrad->height, winBottom, winBottom - plateWidth * 0.3, winBottom + plateWidth * 0.3);
pdebug("winBottom=%d, crest=%d\n", winBottom, crest);
if(crest > 0 && crest < imgRGB->height)
winBottom = crest;
if(reviser)
reviser->statLowerBorder(plate, winLeft, winRight, winBottom);
}else if(reviser != NULL){
int plateWidth = reviser->getPlateWidth();
usePrediction = true;
pdebug("License not Found, Use Prediction.\n");
winBottom = reviser->predictLowerBorder(plate, winLeft, winRight);
// crest = findLikelytCrest(horizon2Y, imgGrad->height, winBottom, winBottom - plateWidth * 0.3, winBottom + plateWidth * 0.3);
crest = reviser->predictLowerBorder(plate, winLeft, winRight); //
if(crest == 0){
crest = imgRGB->height - 1;
winTop = imgRGB->height * 0.25;
winBottom = imgRGB->height * 0.9;
winBottom = std::max(0, winBottom);
winBottom = std::min(imgRGB->height - 1, winBottom);
winTop = std::min(winTop, winBottom - 1);
winTop = std::max(0, winTop);
return;
}
if(crest <= winBottom)
crest += 0.5 * plateWidth; // 预测的放宽范围
if(crest > 0 && crest < imgRGB->height)
winBottom = crest;
}
if(!usePrediction){
if(plate->isValid())
winTop = winBottom - (winRight - winLeft) * 0.65;
else
winTop = 0;
}else{
// winTop = winBottom - (winRight - winLeft) * 0.60; // 预测的放宽范围
winTop = winBottom - (winRight - winLeft) * 0.75; // 预测的放宽范围
}
winBottom = std::max(0, winBottom);
winBottom = std::min(imgRGB->height - 1, winBottom);
winTop = std::min(winTop, winBottom - 1);
winTop = std::max(0, winTop);
}
开发者ID:dalinhuang, 项目名称:my-anquandai, 代码行数:80, 代码来源:windscreen_locator.cpp
示例7: main
//.........这里部分代码省略.........
fatigueState = 1;
cvWaitKey(0);
}
continue;
}
else{
// 统计连续未检测到人脸的次数中的最大数值
(failFaceDuration > maxFailFaceDuration) ? maxFailFaceDuration = failFaceDuration : maxFailFaceDuration;
failFaceDuration = 0;
// 找到检测到的最大的人脸矩形区域
temp = 0;
for(i = 0; i < (objectsTemp ? objectsTemp->total : 0); i ++) {
CvRect* rect = (CvRect*) cvGetSeqElem(objectsTemp, i);
if ( (rect->height * rect->width) > temp ){
largestFaceRect = rect;
temp = rect->height * rect->width;
}
}
// 根据人脸的先验知识分割出大致的人眼区域
temp = largestFaceRect->width / 8;
largestFaceRect->x = largestFaceRect->x + temp;
largestFaceRect->width = largestFaceRect->width - 3*temp/2;
largestFaceRect->height = largestFaceRect->height / 2;
largestFaceRect->y = largestFaceRect->y + largestFaceRect->height / 2;
largestFaceRect->height = largestFaceRect->height / 2;
cvSetImageROI(img, *largestFaceRect); // 设置ROI为检测到的最大的人脸区域
faceImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
cvCopy(img, faceImg, NULL);
cvResetImageROI(img); // 释放ROI
cvShowImage("分割后的人脸", faceImg);
eyeRectTemp = *largestFaceRect;
// 根据人脸的先验知识分割出大致的左眼区域
largestFaceRect->width /= 2;
cvSetImageROI(img, *largestFaceRect); // 设置ROI为检测到的最大的人脸区域
lEyeImg = cvCreateImage(cvSize(largestFaceRect->width, largestFaceRect->height), IPL_DEPTH_8U, 1);
cvCopy(img, lEyeImg, NULL);
cvResetImageROI(img); // 释放ROI
cvShowImage("大致的左眼区域", lEyeImg);
// 根据人脸的先验知识分割出大致的右眼区域
eyeRectTemp.x += eyeRectTemp.width / 2;
eyeRectTemp.width /= 2;
cvSetImageROI(img, eyeRectTemp); // 设置ROI为检测到的最大的人脸区域
rEyeImg = cvCreateImage(cvSize(eyeRectTemp.width, eyeRectTemp.height), IPL_DEPTH_8U, 1);
cvCopy(img, rEyeImg, NULL);
cvResetImageROI(img); // 释放ROI
cvShowImage("大致的右眼区域", rEyeImg);
/********************************** 二值化处理 ***********************************/
// 图像增强:直方图均衡化在detectFace中实现了一次;可尝试非线性点运算
/*** 二值化左眼大致区域的图像 ***/
//lineTrans(lEyeImg, lEyeImg, 1.5, 0); // 线性点运算
cvSmooth(lEyeImg, lEyeImg, CV_MEDIAN); // 中值滤波 默认窗口大小为3*3
nonlineTrans(lEyeImg, lEyeImg, 0.8); // 非线性点运算
memset(hist, 0, sizeof(hist)); // 初始化直方图的数组为0
histogram(lEyeImg, hist); // 计算图片直方图
// 计算最佳阈值
pixelSum = lEyeImg->width * lEyeImg->height;
threshold = ostuThreshold(hist, pixelSum, 45);
cvThreshold(lEyeImg, lEyeImg, threshold, 255, CV_THRESH_BINARY);// 对图像二值化
开发者ID:9reyson, 项目名称:FatigueDrivingReco, 代码行数:67, 代码来源:main.cpp
示例8: camKalTrack
//=========================================
CvRect camKalTrack(IplImage* frame, camshift_kalman_tracker& camKalTrk) {
//=========================================
if (!frame)
printf("Input frame empty!\n");
cvCopy(frame, camKalTrk.image, 0);
cvCvtColor(camKalTrk.image, camKalTrk.hsv, CV_BGR2HSV); // BGR to HSV
if (camKalTrk.trackObject) {
int _vmin = vmin, _vmax = vmax;
cvInRangeS(camKalTrk.hsv, cvScalar(0, smin, MIN(_vmin,_vmax), 0), cvScalar(180, 256, MAX(_vmin,_vmax), 0), camKalTrk.mask); // MASK
cvSplit(camKalTrk.hsv, camKalTrk.hue, 0, 0, 0); // HUE
if (camKalTrk.trackObject < 0) {
float max_val = 0.f;
boundaryCheck(camKalTrk.originBox, frame->width, frame->height);
cvSetImageROI(camKalTrk.hue, camKalTrk.originBox); // for ROI
cvSetImageROI(camKalTrk.mask, camKalTrk.originBox); // for camKalTrk.mask
cvCalcHist(&camKalTrk.hue, camKalTrk.hist, 0, camKalTrk.mask); //
cvGetMinMaxHistValue(camKalTrk.hist, 0, &max_val, 0, 0);
cvConvertScale(camKalTrk.hist->bins, camKalTrk.hist->bins, max_val ? 255. / max_val : 0., 0); // bin [0,255]
cvResetImageROI(camKalTrk.hue); // remove ROI
cvResetImageROI(camKalTrk.mask);
camKalTrk.trackWindow = camKalTrk.originBox;
camKalTrk.trackObject = 1;
camKalTrk.lastpoint = camKalTrk.predictpoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2,
camKalTrk.trackWindow.y + camKalTrk.trackWindow.height / 2);
getCurrState(camKalTrk.kalman, camKalTrk.lastpoint, camKalTrk.predictpoint);//input curent state
}
//(x,y,vx,vy),
camKalTrk.prediction = cvKalmanPredict(camKalTrk.kalman, 0);//predicton=kalman->state_post
camKalTrk.predictpoint = cvPoint(cvRound(camKalTrk.prediction->data.fl[0]), cvRound(camKalTrk.prediction->data.fl[1]));
camKalTrk.trackWindow = cvRect(camKalTrk.predictpoint.x - camKalTrk.trackWindow.width / 2, camKalTrk.predictpoint.y
- camKalTrk.trackWindow.height / 2, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);
camKalTrk.trackWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.trackWindow);
camKalTrk.searchWindow = cvRect(camKalTrk.trackWindow.x - region, camKalTrk.trackWindow.y - region, camKalTrk.trackWindow.width + 2
* region, camKalTrk.trackWindow.height + 2 * region);
camKalTrk.searchWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.searchWindow);
cvSetImageROI(camKalTrk.hue, camKalTrk.searchWindow);
cvSetImageROI(camKalTrk.mask, camKalTrk.searchWindow);
cvSetImageROI(camKalTrk.backproject, camKalTrk.searchWindow);
cvCalcBackProject( &camKalTrk.hue, camKalTrk.backproject, camKalTrk.hist ); // back project
cvAnd(camKalTrk.backproject, camKalTrk.mask, camKalTrk.backproject, 0);
camKalTrk.trackWindow = cvRect(region, region, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);
if (camKalTrk.trackWindow.height > 5 && camKalTrk.trackWindow.width > 5) {
// calling CAMSHIFT
cvCamShift(camKalTrk.backproject, camKalTrk.trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
&camKalTrk.trackComp, &camKalTrk.trackBox);
/*cvMeanShift( camKalTrk.backproject, camKalTrk.trackWindow,
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
&camKalTrk.trackComp);*/
}
else {
camKalTrk.trackComp.rect.x = 0;
camKalTrk.trackComp.rect.y = 0;
camKalTrk.trackComp.rect.width = 0;
camKalTrk.trackComp.rect.height = 0;
}
cvResetImageROI(camKalTrk.hue);
cvResetImageROI(camKalTrk.mask);
cvResetImageROI(camKalTrk.backproject);
camKalTrk.trackWindow = camKalTrk.trackComp.rect;
camKalTrk.trackWindow = cvRect(camKalTrk.trackWindow.x + camKalTrk.searchWindow.x, camKalTrk.trackWindow.y
+ camKalTrk.searchWindow.y, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);
camKalTrk.measurepoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2, camKalTrk.trackWindow.y
+ camKalTrk.trackWindow.height / 2);
camKalTrk.realposition->data.fl[0] = camKalTrk.measurepoint.x;
camKalTrk.realposition->data.fl[1] = camKalTrk.measurepoint.y;
camKalTrk.realposition->data.fl[2] = camKalTrk.measurepoint.x - camKalTrk.lastpoint.x;
camKalTrk.realposition->data.fl[3] = camKalTrk.measurepoint.y - camKalTrk.lastpoint.y;
camKalTrk.lastpoint = camKalTrk.measurepoint;//keep the current real position
//measurement x,y
cvMatMulAdd( camKalTrk.kalman->measurement_matrix/*2x4*/, camKalTrk.realposition/*4x1*/,/*measurementstate*/0, camKalTrk.measurement );
cvKalmanCorrect(camKalTrk.kalman, camKalTrk.measurement);
cvRectangle(frame, cvPoint(camKalTrk.trackWindow.x, camKalTrk.trackWindow.y), cvPoint(camKalTrk.trackWindow.x
+ camKalTrk.trackWindow.width, camKalTrk.trackWindow.y + camKalTrk.trackWindow.height), CV_RGB(255,128,0), 4, 8, 0);
}
// set new selection if it exists
if (camKalTrk.selectObject && camKalTrk.selection.width > 0 && camKalTrk.selection.height > 0) {
cvSetImageROI(camKalTrk.image, camKalTrk.selection);
cvXorS(camKalTrk.image, cvScalarAll(255), camKalTrk.image, 0);
cvResetImageROI(camKalTrk.image);
}
return camKalTrk.trackWindow;
//.........这里部分代码省略.........
开发者ID:miguelao, 项目名称:gst_plugins_tsunami, 代码行数:101, 代码来源:camshift.cpp
示例9: detect_and_draw
IplImage* detect_and_draw(IplImage* img, double scale = 1.3)
{
IplImage* img1;
char * str;
static CvScalar colors[] = {
{{0,0,255}}, {{0,128,255}},{{0,255,255}},{{0,255,0}},
{{255,128,0}},{{255,255,0}},{{255,0,0}}, {{255,0,255}}
}; //Just some pretty colors to draw with
// IMAGE PREPARATION:
//
IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
IplImage* small_img = cvCreateImage(
cvSize( cvRound(img->width/scale), cvRound(img->height/scale)), 8, 1);
cvCvtColor( img, gray, CV_BGR2GRAY );
cvResize( gray, small_img, CV_INTER_LINEAR );
cvEqualizeHist( small_img, small_img );
// DETECT OBJECTS IF ANY
//
cvClearMemStorage( storage );
fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
CvSeq* objects = cvHaarDetectObjects(
small_img,
cascade,
storage,
1.1,
2,
0 ,
cvSize(35, 35)
);
// LOOP THROUGH FOUND OBJECTS AND DRAW BOXES AROUND THEM
//
// for(int i = 0; i<(objects ? objects->total : 0); i++ )
fprintf(stderr,"size: %d %d\n",cvGetSize(small_img).width,cvGetSize(small_img).height);
if( 0<(objects ? objects->total : 0))
{
CvRect* r = (CvRect*)cvGetSeqElem( objects, 0 );
cvSetImageROI(img,*r);
img1=cvCreateImage(cvSize(r->width,r->height),img->depth,img->nChannels);
cvCopy(img,img1,NULL);
cvRectangle(
img,
cvPoint(r->x,r->y),
cvPoint(r->x+r->width,r->y+r->height),
colors[0]
);
cvResetImageROI(img);
HAND=1;
}
else
{
HAND=0;
img1=cvCreateImage(cvSize(100,100),img->depth,img->nChannels);
}
cvReleaseImage( &gray);
cvReleaseImage( &small_img );
return img1;
}
开发者ID:RokIrt, 项目名称:HandGesture, 代码行数:64, 代码来源:cameraDSRimgMaha.cpp
示例10: GetFrame
void GetFrame(void *data,int *pW,int *pH,int *pBpp,int *lvlStat,
int *lvlstatR,int *lvlstatG,int *lvlstatB)
{
int ret = 0;
*pW = qhyusb->QCam.cameraW;
*pH = qhyusb->QCam.cameraH;
*pBpp = qhyusb->QCam.transferBit;
switch(qhyusb->QCam.CAMERA)
{
case DEVICETYPE_QHY5LII:
case DEVICETYPE_QHY5II:
{
while((ret != (qhyusb->QCam.cameraW * qhyusb->QCam.cameraH + 5)) && (qhyusb->liveabort == 0))
{
ret = qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.cameraW * qhyusb->QCam.cameraH + 5,1,&qhyusb->QCam.pos);
#ifdef QHYCCD_DEBUG
printf("%d\n",ret);
#endif
}
if(qhyusb->QCam.transferBit == 16 && qhyusb->QCam.CAMERA == DEVICETYPE_QHY5LII)
{
q5lii->SWIFT_MSBLSBQHY5LII((unsigned char *)data);
}
IplImage *cvImg, *cropImg;
cvImg = cvCreateImage(cvSize(qhyusb->QCam.ImgX, qhyusb->QCam.ImgY), qhyusb->QCam.transferBit, 1);
cropImg = cvCreateImage(cvSize(qhyusb->QCam.ShowImgX, qhyusb->QCam.ShowImgY), qhyusb->QCam.transferBit, 1);
cvImg->imageData = (char *)data;
cvSetImageROI(cvImg, cvRect(qhyusb->QCam.ShowImgX_Start, qhyusb->QCam.ShowImgY_Start, qhyusb->QCam.ShowImgX,qhyusb->QCam.ShowImgY));
cvCopy(cvImg, cropImg, NULL);
cvResetImageROI(cvImg);
memcpy(data,cropImg->imageData,cropImg->imageSize);
cvReleaseImage(&cvImg);
cvReleaseImage(&cropImg);
break;
}
case DEVICETYPE_QHY16000:
{
qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.cameraW * qhyusb->QCam.cameraH,1,&qhyusb->QCam.pos);
break;
}
case DEVICETYPE_QHY9:
case DEVICETYPE_IC8300:
case DEVICETYPE_QHY11:
case DEVICETYPE_QHY21:
case DEVICETYPE_QHY22:
case DEVICETYPE_QHY23:
case DEVICETYPE_QHY6:
{
qhyusb->qhyccd_readUSB2B(qhyusb->QCam.ccd_handle,(unsigned char *)data,qhyusb->QCam.P_Size,qhyusb->QCam.Total_P,&qhyusb->QCam.pos);
if(qhyusb->QCam.CAMERA == DEVICETYPE_IC8300 || qhyusb->QCam.CAMERA == DEVICETYPE_QHY22 || qhyusb->QCam.CAMERA == DEVICETYPE_QHY21 ||
qhyusb->QCam.CAMERA == DEVICETYPE_QHY23)
{
if(qhyusb->ccdreg.VBIN == 1)
{
ic8300->ConvertIC8300DataBIN11((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
}
else if(qhyusb->ccdreg.VBIN == 2)
{
ic8300->ConvertIC8300DataBIN22((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
}
else if(qhyusb->ccdreg.VBIN == 4)
{
ic8300->ConvertIC8300DataBIN44((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH,qhyusb->ccdreg.TopSkipPix);
}
}
else if(qhyusb->QCam.CAMERA == DEVICETYPE_QHY6)
{
if(qhyusb->ccdreg.VBIN == 1)
{
qhy6->ConvertQHY6PRODataBIN11((unsigned char *)data);
}
else if(qhyusb->ccdreg.VBIN == 2)
{
qhy6->ConvertQHY6PRODataBIN22((unsigned char *)data);
}
}
break;
}
}
if(qhyusb->QCam.bin == 22)
{
Bin2x2((unsigned char *)data,qhyusb->QCam.cameraW,qhyusb->QCam.cameraH);
*pW /= 2;
*pH /= 2;
}
}
开发者ID:ceterumnet, 项目名称:QHYCCD_Linux, 代码行数:92, 代码来源:common.cpp
示例11: cvReleaseImage
//.........这里部分代码省略.........
int rect = 0;
//만일을 대비하여 준비한 시퀸스 배열의 크기를 초과하지 않도록 조절
//일단 한곳에서만 영상이 나오도록 조절..
if(m_nBlobs_out > 1)
{
m_nBlobs_out = 1;
}
//레이블링 영역 내의 처리 시작
for( int i=0; i < m_nBlobs_out; i++ )
{
//사각형 그리기에 필요한 두점 저장
CvPoint pt1 = cvPoint( m_rec_out[i].x, m_rec_out[i].y );
CvPoint pt2 = cvPoint( pt1.x + m_rec_out[i].width,pt1.y + m_rec_out[i].height );
// 컬러값 설정
CvScalar color = cvScalar( 0, 0, 255 );
//레이블 사각형 그리기 - 확인용
//cvDrawRect( m_image_dest, pt1, pt2, color);
//레이블을 관심영역으로 지정할 이미지 생성
temp_mask = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
temp_mask2 = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
//관심영역 지정
cvSetImageROI(m_image_th, m_rec_out[i]);
//관심영역 추출
cvCopy(m_image_th, temp_mask, 0);
//관심영역 해제
cvResetImageROI(m_image_th);
//관심영역 내의 오브젝트 처리를 위한 시퀸스 생성
seq[i] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage1);
//관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성
for(int j =0; j < temp_mask ->height ; j++)
{
for(int k = 0; k < temp_mask ->width; k++)
{
if((unsigned char)temp_mask->imageData[j*temp_mask->widthStep+k] == 255)
{
point.x = k; //흰색 픽셀 x좌표 저장
point.y = j; //흰색 픽셀 y좌표 저장
cvSeqPush(seq[i], &point); //시퀸스 구조체에 해당 좌표 삽입
temp_x += point.x; //좌표 누적
temp_y += point.y; //좌표 누적
num++; //픽셀 수 카운트
}
}
}
//좌표 초기화
point.x = 0;
point.y = 0;
end_pt.x = 0;
end_pt.y = 0;
center.x = 0;
center.y = 0;
CvPoint dist_pt; //중심점과의 최대거리를 찾을 컨백스헐 저장
开发者ID:OPRoS, 项目名称:Component, 代码行数:67, 代码来源:HandsMotionTracking.cpp
示例12: FindArenaObjects
void FindArenaObjects(IplImage* Image, CvFont Font, _ArenaObject *pRamp, _ArenaObject* pPlatform, _ArenaObject* pRightPit, _ArenaObject* pLeftPit, _Robot* pRobot)
{
IplImage* ImageCopy = cvCloneImage(Image);
IplImage* ImageCopy2 = cvCloneImage(Image);
SelectionNumber = 0;
Select_Object = 0;
int PrevSelectionNumber = -1;
cvNamedWindow("Arena");
cvShowImage("Arena", ImageCopy);
cvSetMouseCallback("Arena", OnMouse);
while(SelectionNumber < 6 && cvWaitKey(10) != 27)
{
if(SelectionNumber - PrevSelectionNumber > 0)
{
PrevSelectionNumber = SelectionNumber;
cvCopyImage(Image, ImageCopy);
switch(SelectionNumber)
{
case 0:
cvPutText(ImageCopy, "Select Temp Ramp", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 1:
if(pRamp)
{
pRamp->BoundingRect = Selection;
pRamp->Center = cvPoint(pRamp->BoundingRect.x + pRamp->BoundingRect.width/2, pRamp->BoundingRect.y + pRamp->BoundingRect.height/2);
}
cvPutText(ImageCopy, "Select Temp Platform", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 2:
if(pPlatform)
{
pPlatform->BoundingRect = Selection;
pPlatform->Center = cvPoint(pPlatform->BoundingRect.x + pPlatform->BoundingRect.width/2, pPlatform->BoundingRect.y + pPlatform->BoundingRect.height/2);
}
cvPutText(ImageCopy, "Select Right Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 3:
if(pRightPit)
{
pRightPit->BoundingRect = Selection;
pRightPit->Center = cvPoint(pRightPit->BoundingRect.x + pRightPit->BoundingRect.width/2, pRightPit->BoundingRect.y + pRightPit->BoundingRect.height/2);
}
cvPutText(ImageCopy, "Select Left Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 4:
if(pLeftPit)
{
pLeftPit->BoundingRect = Selection;
pLeftPit->Center = cvPoint(pLeftPit->BoundingRect.x + pLeftPit->BoundingRect.width/2, pLeftPit->BoundingRect.y + pLeftPit->BoundingRect.height/2);
}
cvPutText(ImageCopy, "Select Robot", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 5:
if(pRobot)
{
pRobot->BoundingRect = Selection;
}
cvPutText(ImageCopy, "Select Robot Patch", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
case 6:
if(pRobot)
{
pRobot->Patch = Selection;
pRobot->PatchCenter = cvPoint(pRobot->Patch.x + pRobot->Patch.width/2, pRobot->Patch.y + pRobot->Patch.height/2);
pRobot->Updated = 1;
}
cvPutText(ImageCopy, "Press Escape to Continue...", cvPoint(0, 20), &Font, cvScalarAll(255));
break;
default:
break;
}
cvShowImage("Arena", ImageCopy);
}
if(Select_Object && Selection.width > 0 && Selection.height > 0 )
{
cvCopyImage(ImageCopy, ImageCopy2);
cvSetImageROI(ImageCopy2, Selection);
cvXorS(ImageCopy2, cvScalarAll(255), ImageCopy2);
cvResetImageROI(ImageCopy2);
cvShowImage("Arena", ImageCopy2);
}
}
cvReleaseImage(&ImageCopy);
cvReleaseImage(&ImageCopy2);
cvDestroyWindow("Arena");
}
开发者ID:gauravahuja, 项目名称:Avalanche, 代码行数:91, 代码来源:ArenaObjects.cpp
示例13: catcierge_haar_matcher_match
//.........这里部分代码省略.........
roi = result->match_rects[0];
// If we're saving steps, include the original haar cascade
// match rectangle image.
if (save_steps)
{
cvSetImageROI(img_eq, roi);
catcierge_haar_matcher_save_step_image(ctx,
img_eq, result, "haar_roi", "Haar match", save_steps);
}
catcierge_haar_matcher_calculate_roi(ctx, &roi);
cvSetImageROI(img_eq, roi);
catcierge_haar_matcher_save_step_image(ctx,
img_eq, result, "roi", "Cropped region of interest", save_steps);
if (args->prey_method == PREY_METHOD_ADAPTIVE)
{
inverted = 1;
flags = CV_THRESH_BINARY_INV | CV_THRESH_OTSU;
find_prey = catcierge_haar_matcher_find_prey_adaptive;
}
else
{
inverted = 0;
flags = CV_THRESH_BINARY | CV_THRESH_OTSU;
find_prey = catcierge_haar_matcher_find_prey;
}
// Both "find prey" and "guess direction" needs
// a thresholded image, so perform it before calling those.
thr_img = cvCreateImage(cvGetSize(img_eq), 8, 1);
cvThreshold(img_eq, thr_img, 0, 255, flags);
if (ctx->super.debug) cvShowImage("Haar image binary", thr_img);
catcierge_haar_matcher_save_step_image(ctx,
thr_img, result, "thresh", "Global thresholded binary image", save_steps);
result->direction = catcierge_haar_guess_direction(ctx, thr_img, inverted);
if (ctx->super.debug) printf("Direction: %s\n", catcierge_get_direction_str(result->direction));
// Don't bother looking for prey when the cat is going outside.
if ((result->direction) == MATCH_DIR_OUT)
{
if (ctx->super.debug) printf("Skipping prey detection!\n");
snprintf(result->description, sizeof(result->description) - 1,
"Skipped prey detection when going out");
goto done;
}
// Note that thr_img will be modified.
if (find_prey(ctx, img_eq, thr_img, result, save_steps))
{
if (ctx->super.debug) printf("Found prey!\n");
ret = HAAR_FAIL;
snprintf(result->description, sizeof(result->description) - 1,
"Prey detected");
}
else
{
ret = HAAR_SUCCESS;
snprintf(result->description, sizeof(result->description) - 1,
"No prey detected");
}
}
else
{
snprintf(result->description, sizeof(result->description) - 1,
"%sNo cat head detected",
(ret == HAAR_SUCCESS_NO_HEAD_IS_FAIL) ? "Fail ": "");
}
done:
fail:
cvResetImageROI(img);
if (args->eq_histogram)
{
cvReleaseImage(&img_eq);
}
if (tmp)
{
cvReleaseImage(&tmp);
}
if (thr_img)
{
cvReleaseImage(&thr_img);
}
result->result = ret;
result->success = (result->result > 0.0);
return ret;
}
开发者ID:JoakimSoderberg, 项目名称:catcierge, 代码行数:101, 代码来源:catcierge_haar_matcher.c
示例14: catcierge_haar_matcher_find_prey_adaptive
int catcierge_haar_matcher_find_prey_adaptive(catcierge_haar_matcher_t *ctx,
IplImage *img, IplImage *inv_thr_img,
match_result_t *result, int save_steps)
{
IplImage *inv_adpthr_img = NULL;
IplImage *inv_combined = NULL;
IplImage *open_combined = NULL;
IplImage *dilate_combined = NULL;
CvSeq *contours = NULL;
size_t contour_count = 0;
CvSize img_size;
assert(ctx);
assert(img);
assert(ctx->args);
img_size = cvGetSize(img);
// We expect to be given an inverted global thresholded image (inv_thr_img)
// that contains the rough cat profile.
// Do an inverted adaptive threshold of the original image as well.
// This brings out small details such as a mouse tail that fades
// into the background during a global threshold.
inv_adpthr_img = cvCreateImage(img_size, 8, 1);
cvAdaptiveThreshold(img, inv_adpthr_img, 255,
CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY_INV, 11, 5);
catcierge_haar_matcher_save_step_image(ctx,
inv_adpthr_img, result, "adp_thresh", "Inverted adaptive threshold", save_steps);
// Now we can combine the two thresholded images into one.
inv_combined = cvCreateImage(img_size, 8, 1);
cvAdd(inv_thr_img, inv_adpthr_img, inv_combined, NULL);
catcierge_haar_matcher_save_step_image(ctx,
inv_combined, result, "inv_combined", "Combined global and adaptive threshold", save_steps);
// Get rid of noise from the adaptive threshold.
open_combined = cvCreateImage(img_size, 8, 1);
cvMorphologyEx(inv_combined, open_combined, NULL, ctx->kernel2x2, CV_MOP_OPEN, 2);
catcierge_haar_matcher_save_step_image(ctx,
open_combined, result, "opened", "Opened image", save_steps);
dilate_combined = cvCreateImage(img_size, 8, 1);
cvDilate(open_combined, dilate_combined, ctx->kernel3x3, 3);
catcierge_haar_matcher_save_step_image(ctx,
dilate_combined, result, "dilated", "Dilated image", save_steps);
// Invert back the result so the background is white again.
cvNot(dilate_combined, dilate_combined);
catcierge_haar_matcher_save_step_image(ctx,
dilate_combined, result, "combined", "Combined binary image", save_steps);
cvFindContours(dilate_combined, ctx->storage, &contours,
sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0));
// If we get more than 1 contour we count it as a prey.
contour_count = catcierge_haar_matcher_count_contours(ctx, contours);
if (save_steps)
{
IplImage *img_contour = cvCloneImage(img);
IplImage *img_final_color = NULL;
CvScalar color;
cvDrawContours(img_contour, contours, cvScalarAll(255), cvScalarAll(0), 1, 1, 8, cvPoint(0, 0));
catcierge_haar_matcher_save_step_image(ctx,
img_contour, result, "contours", "Background contours", save_steps);
// Draw a final color combined image with the Haar detection + contour.
cvResetImageROI(img_contour);
img_final_color = cvCreateImage(cvGetSize(img_contour), 8, 3);
cvCvtColor(img_contour, img_final_color, CV_GRAY2BGR);
color = (contour_count > 1) ? CV_RGB(255, 0, 0) : CV_RGB(0, 255, 0);
cvRectangleR(img_final_color, result->match_rects[0], color, 2, 8, 0);
catcierge_haar_matcher_save_step_image(ctx,
img_final_color, result, "final", "Final image", save_steps);
cvReleaseImage(&img_contour);
cvReleaseImage(&img_final_color);
}
cvReleaseImage(&inv_adpthr_img);
cvReleaseImage(&inv_combined);
cvReleaseImage(&open_combined);
cvReleaseImage(&dilate_combined);
return (contour_count > 1);
}
开发者ID:JoakimSoderberg, 项目名称:catcierge, 代码行数:90, 代码来源:catcierge_haar_matcher.c
六六分期app的软件客服如何联系?不知道吗?加qq群【895510560】即可!标题:六六分期
阅读:18088| 2023-10-27
今天小编告诉大家如何处理win10系统火狐flash插件总是崩溃的问题,可能很多用户都不知
阅读:9616| 2022-11-06
今天小编告诉大家如何对win10系统删除桌面回收站图标进行设置,可能很多用户都不知道
阅读:8149| 2022-11-06
今天小编告诉大家如何对win10系统电脑设置节能降温的设置方法,想必大家都遇到过需要
阅读:8530| 2022-11-06
我们在使用xp系统的过程中,经常需要对xp系统无线网络安装向导设置进行设置,可能很多
阅读:8432| 2022-11-06
今天小编告诉大家如何处理win7系统玩cf老是与主机连接不稳定的问题,可能很多用户都不
阅读:9345| 2022-11-06
电脑对日常生活的重要性小编就不多说了,可是一旦碰到win7系统设置cf烟雾头的问题,很
阅读:8397| 2022-11-06
我们在日常使用电脑的时候,有的小伙伴们可能在打开应用的时候会遇见提示应用程序无法
阅读:7833| 2022-11-06
今天小编告诉大家如何对win7系统打开vcf文件进行设置,可能很多用户都不知道怎么对win
阅读:8387| 2022-11-06
今天小编告诉大家如何对win10系统s4开启USB调试模式进行设置,可能很多用户都不知道怎
阅读:7380| 2022-11-06
请发表评论