本文整理汇总了C++中cvAbsDiff函数的典型用法代码示例。如果您正苦于以下问题:C++ cvAbsDiff函数的具体用法?C++ cvAbsDiff怎么用?C++ cvAbsDiff使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvAbsDiff函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: CheckImage
static int CheckImage(IplImage* image, char* file, char* /*funcname*/)
{
//printf("loading %s\n", file );
IplImage* read = cvLoadImage( file, 1 );
if( !read )
{
trsWrite( ATS_CON | ATS_LST, "can't read image\n" );
return 1;
}
int err = 0;
#if 0
{
IplImage* temp = cvCloneImage( read );
cvAbsDiff( image, read, temp );
cvThreshold( temp, temp, 0, 255, CV_THRESH_BINARY );
cvvNamedWindow( "Original", 0 );
cvvNamedWindow( "Diff", 0 );
cvvShowImage( "Original", read );
cvvShowImage( "Diff", temp );
cvvWaitKey(0);
cvvDestroyWindow( "Original" );
cvvDestroyWindow( "Diff" );
}
#endif
cvAbsDiff( image, read, read );
cvThreshold( read, read, 0, 1, CV_THRESH_BINARY );
err = cvRound( cvNorm( read, 0, CV_L1 ))/3;
cvReleaseImage( &read );
return err;
}
开发者ID:Avatarchik,项目名称:EmguCV-Unity,代码行数:35,代码来源:adrawing_regress.cpp
示例2: cvAbsDiff
IplImage* CMotionDetector::calculus() {
if (m_pImageTrois == NULL) return NULL;
if (m_pImageUnDeux == NULL) m_pImageUnDeux = cvCloneImage(m_pImageUn);
if (m_pImageDeuxTrois == NULL) m_pImageDeuxTrois = cvCloneImage(m_pImageUn);
if (m_pImageResult == NULL) m_pImageResult = cvCloneImage(m_pImageUn);
cvAbsDiff(m_pImageUn, m_pImageDeux, m_pImageUnDeux);
cvAbsDiff(m_pImageDeux, m_pImageTrois, m_pImageDeuxTrois);
cvAnd(m_pImageUnDeux, m_pImageDeuxTrois, m_pImageResult);
cvThreshold(m_pImageResult, m_pImageResult, 50, 255, CV_THRESH_BINARY);
cvDilate(m_pImageResult, m_pImageResult, 0, 4);
emit calculusNewImage(m_pImageResult);
return m_pImageResult;
}
开发者ID:Ahziel,项目名称:TrafficDetector,代码行数:15,代码来源:MotionDetector.cpp
示例3: cvZero
/* Standard Deviation */
IplImage* motionDetection::getStandardDeviationFrame(void) {
// Initialize
cvZero(mSum);
for (int i = 0; i < mFrameNumber; ++i) {
// frame[i] <= | frame[i] - Background Model |
cvAbsDiff(mpFrame[i], m_imgBackgroundModel, mTmp8U);
// uchar->float
cvConvert(mTmp8U, mTmp);
// mTmp = mTmp * mTmp
cvPow(mTmp, mTmp, 2.0);
// add mSum += mTmp
cvAdd(mSum, mTmp, mSum);
}
// variance: mTmp <= mSum / (mFrameNumber-1)
for (int i = 0; i < mSize.height; ++i) {
for (int j = 0; j < mSize.width; ++j) {
((float*)(mTmp->imageData + i*mTmp->widthStep))[j] = ((float*)(mSum->imageData + i*mSum->widthStep))[j] / (mFrameNumber - 1);
}
}
// standard deviation
cvPow(mTmp, mTmp, 0.5);
// float->uchar
cvConvert(mTmp, m_imgStandardDeviation);
return m_imgStandardDeviation;
}
开发者ID:KevinGuo0211,项目名称:EarlyFireDetection,代码行数:31,代码来源:motionDetection.cpp
示例4: detect_object
void detect_object(IplImage *image, IplImage *pBkImg, IplImage *pFrImg, CvMat *pFrameMat, CvMat *pBkMat, CvMat *pFrMat,int thre_limit)
{
nFrmNum++;
cvCvtColor(image, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
//高斯滤波
cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
//当前帧减去背景图像并取绝对值
cvAbsDiff(pFrameMat, pBkMat, pFrMat);
//二值化前景图像
cvThreshold(pFrMat, pFrImg,thre_limit, 255.0, CV_THRESH_BINARY);
/*形态学滤波*/
//IplConvKernel* element = cvCreateStructuringElementEx(2, 2, 0, 0, CV_SHAPE_RECT);
//cvErode(pFrImg, pFrImg,element, 1); // 腐蚀
//delete element;
//element = cvCreateStructuringElementEx(2, 2, 1, 1, CV_SHAPE_RECT);
//cvDilate(pFrImg, pFrImg, element, 1); //膨胀
//delete element;
cvErode(pFrImg, pFrImg,0, 1); // 腐蚀
cvDilate(pFrImg, pFrImg,0, 1); //膨胀
//滑动平均更新背景(求平均)
cvRunningAvg(pFrameMat, pBkMat, 0.004, 0);
//将背景矩阵转化为图像格式,用以显示
cvConvert(pBkMat, pBkImg);
cvShowImage("background", pFrImg);
// cvShowImage("background", pBkImg);
}
开发者ID:JiayuanHu,项目名称:Target-Tracking,代码行数:31,代码来源:tracking.cpp
示例5: ofLogError
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
ofxCvGrayscaleImage& dad ) {
if( !mom.bAllocated ){
ofLogError("ofxCvGrayscaleImage") << "absDiff(): first source image (mom) not allocated";
return;
}
if( !dad.bAllocated ){
ofLogError("ofxCvGrayscaleImage") << "absDiff(): second source image (dad) not allocated";
return;
}
if( !bAllocated ){
ofLogNotice("ofxCvGrayscaleImage") << "absDiff(): allocating to match dimensions: "
<< mom.getWidth() << " " << mom.getHeight();
allocate(mom.getWidth(), mom.getHeight());
}
ofRectangle roi = getROI();
ofRectangle momRoi = mom.getROI();
ofRectangle dadRoi = dad.getROI();
if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
(dadRoi.width == roi.width && dadRoi.height == roi.height ) )
{
cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
flagImageChanged();
} else {
ofLogError("ofxCvGrayscaleImage") << "absDiff(): source image size mismatch between first (mom) & second (dad) image";
}
}
开发者ID:omarieclaire,项目名称:closer,代码行数:30,代码来源:ofxCvGrayscaleImage.cpp
示例6: frame_dif
void frame_dif(IplImage* image, IplImage* image_pass, IplImage* res,IplImage* res0, IplImage* pFrImg,IplImage* pFrame,int thre_limit)
{
cvZero(pFrame);
cvAbsDiff(image, image_pass, res0);
cvCvtColor(res0, res, CV_RGB2GRAY);
cvThreshold(res, res, thre_limit, 255, CV_THRESH_BINARY);
unsigned char data1, data2, data;
int i, j;
int width = pFrame->width;
int height = pFrame->height;
for (i = 0; i < height; i++)
for (j = 0; j < width; j++)
{
data1 = (unsigned char)res->imageData[i * width + j];
data2 = (unsigned char)pFrImg->imageData[i * width + j];
if (data1 == 255 || data2 == 255)
{
data = 255;
pFrame->imageData[i * width + j] = (char)data;
}
}
cvCopy(image, image_pass, NULL);
}
开发者ID:JiayuanHu,项目名称:Target-Tracking,代码行数:27,代码来源:tracking.cpp
示例7: x
void x(IplImage *img1, IplImage *img2, IplImage *imgsize)
{
IplImage *imggray1;
IplImage *imggray2;
IplImage *imggray3;
// grayscale buffers
imggray1 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
imggray2 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
imggray3 = cvCreateImage( cvSize( imgsize->width, imgsize->height ), IPL_DEPTH_8U, 1);
IplImage *hsv1 = cvCloneImage(img1);
IplImage *hsv2 = cvCloneImage(img2);
cvCvtColor( img2, imggray2, CV_RGB2GRAY );
cvCvtColor(img1, hsv2, CV_BGR2HSV);
cvSetImageCOI(hsv2, 1);
cvCopy(hsv2, imggray2, 0);
// convert rgb to grayscale
cvCvtColor( img1, imggray1, CV_RGB2GRAY );
cvCvtColor(img2, hsv1, CV_BGR2HSV);
cvSetImageCOI(hsv1, 1);
cvCopy(hsv1, imggray1, 0);
// compute difference
cvAbsDiff( imggray1, imggray2, imggray3 );
cvShowImage( "video", imggray3 );
cvReleaseImage(&imggray1);
cvReleaseImage(&imggray2);
cvReleaseImage(&imggray3);
cvReleaseImage(&hsv1);
cvReleaseImage(&hsv2);
}
开发者ID:sumitsrv,项目名称:vk,代码行数:32,代码来源:motion_detect.c
示例8: gst_motiondetect_apply
static gboolean gst_motiondetect_apply (
IplImage * cvReferenceImage, const IplImage * cvCurrentImage,
const IplImage * cvMaskImage, float noiseThreshold)
{
IplConvKernel *kernel = cvCreateStructuringElementEx (3, 3, 1, 1,
CV_SHAPE_ELLIPSE, NULL);
int threshold = (int)((1 - noiseThreshold) * 255);
IplImage *cvAbsDiffImage = cvReferenceImage;
double maxVal = -1.0;
cvAbsDiff( cvReferenceImage, cvCurrentImage, cvAbsDiffImage );
cvThreshold (cvAbsDiffImage, cvAbsDiffImage, threshold, 255,
CV_THRESH_BINARY);
cvErode (cvAbsDiffImage, cvAbsDiffImage, kernel, 1);
cvReleaseStructuringElement(&kernel);
cvMinMaxLoc(cvAbsDiffImage, NULL, &maxVal, NULL, NULL, cvMaskImage );
if (maxVal > 0) {
return TRUE;
} else {
return FALSE;
}
}
开发者ID:ekelly30,项目名称:stb-tester,代码行数:25,代码来源:gstmotiondetect.c
示例9: main
int main ( int argc, char **argv )
{
// use first camera attached to computer
// image data structures
IplImage *img1;
IplImage *img2;
IplImage *imggray1;
IplImage *imggray2;
IplImage *imggray3;
// load image one
img1 = cvLoadImage( argv[1] );
// grayscale buffers
imggray1 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);
imggray2 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);
imggray3 = cvCreateImage( cvGetSize( img1 ), IPL_DEPTH_8U, 1);
// convert rgb to grayscale
cvCvtColor( img1, imggray1, CV_RGB2GRAY );
// load image two
img2 = cvLoadImage( argv[2] );
// convert rgb to grayscale
cvCvtColor( img2, imggray2, CV_RGB2GRAY );
// compute difference
cvAbsDiff( imggray1, imggray2, imggray3 );
cvSaveImage( argv[3], imggray3 );
return 0;
}
开发者ID:kthakore,项目名称:simcam,代码行数:34,代码来源:diff.c
示例10: ofLog
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
ofxCvGrayscaleImage& dad ) {
if( !mom.bAllocated ){
ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");
return;
}
if( !dad.bAllocated ){
ofLog(OF_LOG_ERROR, "in absDiff, dad needs to be allocated");
return;
}
if( !bAllocated ){
ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");
allocate(mom.getWidth(), mom.getHeight());
}
ofRectangle roi = getROI();
ofRectangle momRoi = mom.getROI();
ofRectangle dadRoi = dad.getROI();
if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
(dadRoi.width == roi.width && dadRoi.height == roi.height ) )
{
cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
}
}
开发者ID:3snail,项目名称:openFrameworks,代码行数:30,代码来源:ofxCvGrayscaleImage.cpp
示例11: detect_motion
static int detect_motion(struct motion_detection *md, AVFrame *frame) {
IplImage *tmp;
AVPicture pict;
tmp = md->cur;
md->cur = md->prev;
md->prev = tmp;
avpicture_fill(&pict, md->buffer, PIX_FMT_GRAY8, md->cam->codec->width, md->cam->codec->height);
sws_scale(md->img_convert_ctx, (const uint8_t* const*)frame->data, frame->linesize, 0, md->cam->codec->height, (uint8_t* const*)pict.data, pict.linesize);
memcpy(md->cur->imageData, pict.data[0], md->cur->imageSize);
md->cur->widthStep = pict.linesize[0];
cvAbsDiff(md->cur, md->prev, md->silh);
cvThreshold(md->silh, md->silh, md->cam->threshold, 250, CV_THRESH_BINARY);
int density = 0;
for(int i=0; i < md->silh->height; i++) {
uint8_t* ptr = (uint8_t*)md->silh->imageData + i * md->silh->widthStep;
for(int j=0; j < md->silh->width; j++)
if(*(ptr+j) > 0)
density += 1;
}
if((float)density / (float)(md->silh->height * md->silh->width) > 0.01) {
return 1;
} else {
return 0;
}
}
开发者ID:sanek701,项目名称:CCTV-linux-msiu,代码行数:30,代码来源:recorder_thread.c
示例12: main
int main( int argc, char** argv )
{
char* filename = argc == 2 ? argv[1] : (char*)"1-small.jpg";
if( (image = cvLoadImage( filename, CV_LOAD_IMAGE_COLOR)) == 0 )
return -1;
//cvNamedWindow("orig", CV_WINDOW_AUTOSIZE);
//cvShowImage("orig", image);
// Extract red channel of image
red = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
green = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
cvSplit(image, NULL, green, red, NULL);
red_edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
green_edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
cvCanny(red, red_edge, low, high, 3);
cvCanny(green, green_edge, low, high, 3);
edge = cvCreateImage(cvSize(image->width,image->height), IPL_DEPTH_8U, 1);
cvAbsDiff(red_edge, green_edge, edge);
final = cvCreateImage(cvSize((image->width&-2)/2,(image->height&-2)/2), IPL_DEPTH_8U, 1);
开发者ID:roofilin,项目名称:roofilin,代码行数:26,代码来源:chalk.c
示例13: cvCreateImage
/// ****************************************************
///
/// CARTOON FILTER
///
/// ****************************************************
bool testApp::cvFilterCartoon(ofxCvColorImage &src, ofxCvColorImage &dst, int w, int h)
{
//CvtColor(src, dst, code)
//cv::cvtColor(inputFrame, bgr, CV_BGRA2BGR);
// cv::pyrMeanShiftFiltering(bgr.clone(), bgr, sp, sr);
// PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))
// Temporary storage.
IplImage* pyr = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
IplImage* edges = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 1 );
IplImage* edgesRgb = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
//cvSet(s, cvScalar(0,0,0));
ofxCvGrayscaleImage tempGrayImg;
tempGrayImg.allocate(w, h);
tempGrayImg.setFromColorImage(src);
//------------------------------
cvPyrMeanShiftFiltering(src.getCvImage(), pyr, 10, 10);
// cv::Canny(gray, edges, 150, 150);
cvCanny(tempGrayImg.getCvImage(), edges, 150,150);
cvCvtColor(edges, edgesRgb, CV_GRAY2RGB);
cvAbsDiff(pyr, edgesRgb, pyr);
//cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage());
dst.setFromPixels((unsigned char *)pyr->imageData, w, h);
return true;
}
开发者ID:dasaki,项目名称:cvcinema,代码行数:39,代码来源:testApp.cpp
示例14: ObtenerMaximo
double* ObtenerMaximo(IplImage* Imagen, STFrame* FrameData, CvRect Roi) {
// obtener matriz de distancias normalizadas al background
if (SHOW_VALIDATION_DATA == 1)
printf(" \n\n Busqueda del máximo umbral...");
IplImage* IDif = 0;
IplImage* peso = 0;
CvSize size = cvSize(Imagen->width, Imagen->height); // get current frame size
if (!IDif || IDif->width != size.width || IDif->height != size.height) {
cvReleaseImage(&IDif);
cvReleaseImage(&peso);
IDif = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_8U, 1); // imagen diferencia abs(I(pi)-u(p(i))
peso = cvCreateImage(cvSize(FrameData->BGModel->width,
FrameData->BGModel->height), IPL_DEPTH_32F, 1);//Imagen resultado wi ( pesos)
cvZero(IDif);
cvZero(peso);
}
// |I(p)-u(p)|/0(p)
cvAbsDiff(Imagen, FrameData->BGModel, IDif);
cvDiv(IDif, FrameData->IDesvf, peso);
// Buscar máximo
double* Maximo = 0;
cvMinMaxLoc(peso, Maximo, 0, 0, 0, FrameData->FG);
return Maximo;
}
开发者ID:beetecu,项目名称:trackingdrosophila,代码行数:28,代码来源:validacion2.cpp
示例15: cvReleaseCapture
void MainWindow::BackgroundDiff()
{
ui->alpha_slider->setEnabled(true);
cvReleaseCapture(&pCapture);
pCapture=cvCaptureFromCAM(0);
// IplImage* pFrame=NULL;
nFrameNum=0;
while(pFrame = cvQueryFrame( pCapture ))
{
nFrameNum++;
//如果是第一帧,需要申请内存,并初始化
if(nFrameNum == 1)
{
pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height),IPL_DEPTH_8U,1);
pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U,1);
pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
//转化成单通道图像再处理
cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
cvConvert(pFrImg, pFrMat);
cvConvert(pFrImg, pBkMat);
}
else
{
cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
cvConvert(pFrImg, pFrameMat);
//先做高斯滤波,以平滑图像
cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);
//当前帧跟背景图相减
cvAbsDiff(pFrameMat, pBkMat, pFrMat);
//二值化前景图
cvDilate(pFrMat,pFrMat);
cvErode(pFrMat,pFrMat);
cvThreshold(pFrMat, pFrImg, lowThreshold, 255.0, CV_THRESH_BINARY);
//更新背景
cvRunningAvg(pFrameMat, pBkMat, alpha,0);
//将背景转化为图像格式,用以显示
cvConvert(pBkMat, pBkImg);
pFrame->origin = IPL_ORIGIN_BL;
pFrImg->origin = IPL_ORIGIN_BL;
pBkImg->origin = IPL_ORIGIN_BL;
}
if(27==cvWaitKey(33))
break;
MainWindow::Display(pFrame,pBkImg,pFrImg);
}
}
开发者ID:lkpjj,项目名称:qt_demo,代码行数:59,代码来源:mainwindow.cpp
示例16: cvAbsDiff
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom ) {
if( matchingROI(getROI(), mom.getROI()) ) {
cvAbsDiff( cvImage, mom.getCvImage(), cvImageTemp );
swapTemp();
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in *=, ROI mismatch");
}
}
开发者ID:madshobye,项目名称:moodCam,代码行数:10,代码来源:ofxCvGrayscaleImage.cpp
示例17: get_frame_difference
gboolean get_frame_difference( IplImage* in, IplImage* inprev, IplImage* output)
{
cvSmooth(in, in, CV_GAUSSIAN, 5);
cvSmooth(inprev, inprev, CV_GAUSSIAN, 5);
cvAbsDiff( in, inprev, output);
cvThreshold( output, output, 5, 255, CV_THRESH_BINARY);
cvMorphologyEx( output, output, 0, 0, CV_MOP_CLOSE, 1 );
return(TRUE);
}
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:10,代码来源:gstgcs.c
示例18: update_mhi
// parameters:
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold, int frameCount){
if(DEBUG){
std::cout << "- UPDATING_MHI" << std::endl;
}
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
int i, idx1 = last, idx2;
CvSeq* seq;
CvRect comp_rect;
CvRect roi;
double count;
double angle;
CvPoint center;
double magnitude;
CvScalar color;
// Allocate images at the beginning or reallocate them if the frame size is changed
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
if( buf == 0 ) {
buf = (IplImage**)malloc(N*sizeof(buf[0]));
memset( buf, 0, N*sizeof(buf[0]));
}
for( i = 0; i < N; i++ ) {
cvReleaseImage( &buf[i] );
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
}
cvReleaseImage( &mhi );
cvReleaseImage( &orient );
cvReleaseImage( &segmask );
cvReleaseImage( &mask );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); // clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
}
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
last = idx2;
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
cvThreshold( silh, silh, diff_threshold, 255, CV_THRESH_BINARY); // and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
// convert MHI to blue 8u image
cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION );
cvZero( dst );
cvMerge( mask, 0, 0, 0, dst );
}
开发者ID:sabs231,项目名称:hand-gesture-recon,代码行数:59,代码来源:trainAndClassify.cpp
示例19: AddError
void THISCLASS::OnStep() {
// Get and check input image
IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
if (! inputimage) {
AddError(wxT("No input image."));
return;
}
if (inputimage->nChannels != 3) {
AddError(wxT("The input image is not a color image."));
return;
}
// Check and update the background
if (! mOutputImage) {
mOutputImage = cvCloneImage(inputimage);
} else {
cvCopyImage(inputimage, mOutputImage);
}
if (! mBackgroundImage) {
mBackgroundImage = cvCloneImage(mOutputImage);
} else if (mUpdateProportion > 0) {
if ((cvGetSize(mOutputImage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(mOutputImage).width != cvGetSize(mBackgroundImage).width)) {
AddError(wxT("Input and background images do not have the same size."));
return;
}
cvAddWeighted(mOutputImage, mUpdateProportion, mBackgroundImage, 1.0 - mUpdateProportion, 0, mBackgroundImage);
}
try {
// Correct the tmpImage with the difference in image mean
if (mCorrectMean) {
mBackgroundImageMean = cvAvg(mBackgroundImage);
CvScalar tmpScalar = cvAvg(mOutputImage);
cvAddS(mOutputImage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), mOutputImage);
}
// Background subtraction
if (mMode == sMode_SubImageBackground) {
cvSub(mOutputImage, mBackgroundImage, mOutputImage);
} else if (mMode == sMode_SubBackgroundImage) {
cvSub(mBackgroundImage, mOutputImage, mOutputImage);
} else {
cvAbsDiff(mOutputImage, mBackgroundImage, mOutputImage);
}
} catch (...) {
AddError(wxT("Background subtraction failed."));
}
mCore->mDataStructureImageColor.mImage = mOutputImage;
// Set the display
DisplayEditor de(&mDisplayOutput);
if (de.IsActive()) {
de.SetMainImage(mOutputImage);
}
}
开发者ID:dtbinh,项目名称:swistrackplus,代码行数:55,代码来源:ComponentAdaptiveBackgroundSubtractionColor.cpp
示例20: main
int main ( int argc, char **argv )
{
// use first camera attached to computer
CvCapture *capture;
capture = cvCaptureFromCAM( 0 );
assert( capture );
// image data structures
IplImage *img1;
IplImage *img2;
IplImage *imggray1;
IplImage *imggray2;
IplImage *imggray3;
// get the camera image size
IplImage *imgsize;
imgsize = cvQueryFrame( capture );
if( !imgsize ) return -1;
// grayscale buffers
imggray1 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
imggray2 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
imggray3 = cvCreateImage( cvGetSize( imgsize ), IPL_DEPTH_8U, 1);
int key = 0;
while ( key != 'q' ) {
// load image one
img1 = cvQueryFrame( capture );
// convert rgb to grayscale
cvCvtColor( img1, imggray1, CV_RGB2GRAY );
// quit if user press 'q' and wait a bit between images
key = cvWaitKey( 500 );
// load image two
img2 = cvQueryFrame( capture );
// convert rgb to grayscale
cvCvtColor( img2, imggray2, CV_RGB2GRAY );
// compute difference
cvAbsDiff( imggray1, imggray2, imggray3 );
// display difference
cvNamedWindow( "video", 1 );
cvShowImage( "video", imggray3 );
}
// release camera and clean up resources when "q" is pressed
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
return 0;
}
开发者ID:squidforce,项目名称:spot_hustle,代码行数:54,代码来源:image-diff.cpp
注:本文中的cvAbsDiff函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论