本文整理汇总了C++中cvReleaseImage函数的典型用法代码示例。如果您正苦于以下问题:C++ cvReleaseImage函数的具体用法?C++ cvReleaseImage怎么用?C++ cvReleaseImage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvReleaseImage函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: cvReleaseImage
CV_INLINE void CvvImage::Destroy()
{
cvReleaseImage( &m_img );
}
开发者ID:awesomeleo,项目名称:homeServiceRobot,代码行数:4,代码来源:CvvImage.cpp
示例2: main
//.........这里部分代码省略.........
//! [Re-init tracker]
// Restart the initialization to detect new keypoints
if (reader.getFrameIndex() == 25) {
std::cout << "Re initialize the tracker" << std::endl;
#if (VISP_HAVE_OPENCV_VERSION >= 0x020408)
// Save of previous features
std::vector<cv::Point2f> prev_features = tracker.getFeatures();
// Start a new feature detection
tracker.initTracking(cvI);
std::vector<cv::Point2f> new_features = tracker.getFeatures();
// Add previous features if they are not to close to detected one
double distance, minDistance_ = tracker.getMinDistance();
bool is_redundant;
for (size_t i=0; i < prev_features.size(); i++) {
// Test if a previous feature is not redundant with one of the newly detected
is_redundant = false;
for (size_t j=0; j < new_features.size(); j++){
distance = sqrt(vpMath::sqr(new_features[j].x-prev_features[i].x) + vpMath::sqr(new_features[j].y-prev_features[i].y));
if(distance < minDistance_){
is_redundant = true;
break;
}
}
if(is_redundant){
continue;
}
//std::cout << "Add previous feature with index " << i << std::endl;
tracker.addFeature(prev_features[i]);
}
#else
// Save of previous features
int prev_nfeatures = tracker.getNbFeatures();
float x,y;
long id;
int j=0;
CvPoint2D32f *prev_features = (CvPoint2D32f*)cvAlloc(prev_nfeatures*sizeof(CvPoint2D32f));
for (int i=0; i <prev_nfeatures ; i ++) {
tracker.getFeature(i, id, x, y);
prev_features[i].x=x;
prev_features[i].y=y;
//printf("prev feature %d: id %d coord: %g %g\n", i, id, x, y);
}
// Start a new feature detection
tracker.initTracking(cvI);
std::cout << "Detection of " << tracker.getNbFeatures() << " new features" << std::endl;
// Add previous features if they are not to close to detected one
double distance, minDistance_ = tracker.getMinDistance();
for(int i = tracker.getNbFeatures() ;
j<prev_nfeatures && i<tracker.getMaxFeatures() ;
j++){
// Test if a previous feature is not redundant with new the one that are newly detected
bool is_redundant = false;
for(int k=0; k<tracker.getNbFeatures(); k++){
tracker.getFeature(k,id,x,y);
//printf("curr feature %d: id %d coord: %g %g\n", k, id, x, y);
distance = sqrt(vpMath::sqr(x-prev_features[j].x) + vpMath::sqr(y-prev_features[j].y));
if(distance < minDistance_){
is_redundant = true;
break;
}
}
if(is_redundant){
continue;
}
//std::cout << "Add previous feature with index " << i << std::endl;
tracker.addFeature(i, prev_features[j].x, prev_features[j].y);
i++;
}
cvFree(&prev_features);
#endif
}
// Track the features
tracker.track(cvI);
//! [Re-init tracker]
std::cout << "tracking of " << tracker.getNbFeatures() << " features" << std::endl;
tracker.display(I, vpColor::red);
vpDisplay::flush(I);
}
vpDisplay::getClick(I);
#if (VISP_HAVE_OPENCV_VERSION < 0x020408)
cvReleaseImage(&cvI);
#endif
return 0;
}
catch(vpException &e) {
std::cout << "Catch an exception: " << e << std::endl;
}
#endif
}
开发者ID:ricsp,项目名称:visp,代码行数:101,代码来源:tutorial-klt-tracker-with-reinit.cpp
示例3: cvMat
bool CvCaptureCAM_DC1394_v2_CPP::initVidereRectifyMaps( const char* info,
IplImage* ml[2], IplImage* mr[2] )
{
float identity_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
CvMat l_rect = cvMat(3, 3, CV_32F, identity_data), r_rect = l_rect;
float l_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
float r_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
CvMat l_intrinsic = cvMat(3, 3, CV_32F, l_intrinsic_data);
CvMat r_intrinsic = cvMat(3, 3, CV_32F, r_intrinsic_data);
float l_distortion_data[] = {0,0,0,0,0}, r_distortion_data[] = {0,0,0,0,0};
CvMat l_distortion = cvMat(1, 5, CV_32F, l_distortion_data);
CvMat r_distortion = cvMat(1, 5, CV_32F, r_distortion_data);
IplImage* mx = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
IplImage* my = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
int k, j;
for( k = 0; k < 2; k++ )
{
const char* section_name = k == 0 ? "[left_camera]" : "[right_camera]";
static const char* param_names[] = { "f ", "fy", "Cx", "Cy" "kappa1", "kappa2", "tau1", "tau2", "kappa3", 0 };
const char* section_start = strstr( info, section_name );
CvMat* intrinsic = k == 0 ? &l_intrinsic : &r_intrinsic;
CvMat* distortion = k == 0 ? &l_distortion : &r_distortion;
CvMat* rectification = k == 0 ? &l_rect : &r_rect;
IplImage** dst = k == 0 ? ml : mr;
if( !section_start )
break;
section_start += strlen(section_name);
for( j = 0; param_names[j] != 0; j++ )
{
const char* param_value_start = strstr(section_start, param_names[j]);
float val=0;
if(!param_value_start)
break;
sscanf(param_value_start + strlen(param_names[j]), "%f", &val);
if( j < 4 )
intrinsic->data.fl[j == 0 ? 0 : j == 1 ? 4 : j == 2 ? 2 : 5] = val;
else
distortion->data.fl[j - 4] = val;
}
if( param_names[j] != 0 )
break;
// some sanity check for the principal point
if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.1 ||
fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.1 )
{
cvScale( &intrinsic, &intrinsic, 0.5 ); // try the corrected intrinsic matrix for 2x lower resolution
if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.05 ||
fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.05 )
cvScale( &intrinsic, &intrinsic, 2 ); // revert it back if the new variant is not much better
intrinsic->data.fl[8] = 1;
}
cvInitUndistortRectifyMap( intrinsic, distortion,
rectification, intrinsic, mx, my );
cvConvertMaps( mx, my, dst[0], dst[1] );
}
cvReleaseImage( &mx );
cvReleaseImage( &my );
return k >= 2;
}
开发者ID:112000,项目名称:opencv,代码行数:63,代码来源:cap_dc1394_v2.cpp
示例4: update_mhi
// parameters:
// img - input video frame
// dst - resultant motion picture
// args - optional parameters
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold )
{
double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
CvSize size = cvSize(img->width,img->height); // get current frame size
int i, idx1 = last, idx2;
IplImage* silh;
CvSeq* seq;
CvRect comp_rect;
double count;
double angle;
CvPoint center;
double magnitude;
CvScalar color;
// allocate images at the beginning or
// reallocate them if the frame size is changed
if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
if( buf == 0 ) {
buf = (IplImage**)malloc(N*sizeof(buf[0]));
memset( buf, 0, N*sizeof(buf[0]));
}
for( i = 0; i < N; i++ ) {
cvReleaseImage( &buf[i] );
buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
cvZero( buf[i] );
}
cvReleaseImage( &mhi );
cvReleaseImage( &orient );
cvReleaseImage( &segmask );
cvReleaseImage( &mask );
mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
cvZero( mhi ); // clear MHI at the beginning
orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
}
cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
idx2 = (last + 1) % N; // index of (last - (N-1))th frame
last = idx2;
silh = buf[idx2];
cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames
cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it
cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI
// convert MHI to blue 8u image
cvCvtScale( mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION );
cvZero( dst );
cvMerge( mask, 0, 0, 0, dst );
// calculate motion gradient orientation and valid orientation mask
cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
printf("Nonzero count %d\n", cvCountNonZero(mask));
if( !storage )
storage = cvCreateMemStorage(0);
else
cvClearMemStorage(storage);
// segment motion: get sequence of motion components
// segmask is marked motion components map. It is not used further
seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA );
// iterate through the motion components,
// One more iteration (i == -1) corresponds to the whole image (global motion)
for( i = -1; i < seq->total; i++ ) {
if( i < 0 ) { // case of the whole image
comp_rect = cvRect( 0, 0, size.width, size.height );
color = CV_RGB(255,255,255);
magnitude = 100;
}
else { // i-th motion component
comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect;
if( comp_rect.width + comp_rect.height < 100 ) // reject very small components
continue;
color = CV_RGB(255,0,0);
magnitude = 30;
}
// select component ROI
cvSetImageROI( silh, comp_rect );
cvSetImageROI( mhi, comp_rect );
cvSetImageROI( orient, comp_rect );
cvSetImageROI( mask, comp_rect );
// calculate orientation
angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
angle = 360.0 - angle; // adjust for images with top-left origin
//.........这里部分代码省略.........
开发者ID:ericperko,项目名称:cwrucam,代码行数:101,代码来源:motempl.c
示例5: getFeatureMaps
//.........这里部分代码省略.........
}
}
}
alfa[j * width * 2 + i * 2 ] = maxi % NUM_SECTOR;
alfa[j * width * 2 + i * 2 + 1] = maxi;
}/*for(i = 0; i < width; i++)*/
}/*for(j = 0; j < height; j++)*/
//подсчет весов и смещений
nearest = (int *)malloc(sizeof(int ) * k);
w = (float*)malloc(sizeof(float) * (k * 2));
for(i = 0; i < k / 2; i++)
{
nearest[i] = -1;
}/*for(i = 0; i < k / 2; i++)*/
for(i = k / 2; i < k; i++)
{
nearest[i] = 1;
}/*for(i = k / 2; i < k; i++)*/
for(j = 0; j < k / 2; j++)
{
b_x = k / 2 + j + 0.5f;
a_x = k / 2 - j - 0.5f;
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
}/*for(j = 0; j < k / 2; j++)*/
for(j = k / 2; j < k; j++)
{
a_x = j - k / 2 + 0.5f;
b_x =-j + k / 2 - 0.5f + k;
w[j * 2 ] = 1.0f/a_x * ((a_x * b_x) / ( a_x + b_x));
w[j * 2 + 1] = 1.0f/b_x * ((a_x * b_x) / ( a_x + b_x));
}/*for(j = k / 2; j < k; j++)*/
//интерполяция
for(i = 0; i < sizeY; i++)
{
for(j = 0; j < sizeX; j++)
{
for(ii = 0; ii < k; ii++)
{
for(jj = 0; jj < k; jj++)
{
if ((i * k + ii > 0) &&
(i * k + ii < height - 1) &&
(j * k + jj > 0) &&
(j * k + jj < width - 1))
{
d = (k * i + ii) * width + (j * k + jj);
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 ]] +=
r[d] * w[ii * 2] * w[jj * 2];
(*map)->map[ i * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2] * w[jj * 2];
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1))
{
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
(*map)->map[(i + nearest[ii]) * stringSize + j * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 ];
}
if ((j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2] * w[jj * 2 + 1];
(*map)->map[i * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2] * w[jj * 2 + 1];
}
if ((i + nearest[ii] >= 0) &&
(i + nearest[ii] <= sizeY - 1) &&
(j + nearest[jj] >= 0) &&
(j + nearest[jj] <= sizeX - 1))
{
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 ] ] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
(*map)->map[(i + nearest[ii]) * stringSize + (j + nearest[jj]) * (*map)->numFeatures + alfa[d * 2 + 1] + NUM_SECTOR] +=
r[d] * w[ii * 2 + 1] * w[jj * 2 + 1];
}
}
}/*for(jj = 0; jj < k; jj++)*/
}/*for(ii = 0; ii < k; ii++)*/
}/*for(j = 1; j < sizeX - 1; j++)*/
}/*for(i = 1; i < sizeY - 1; i++)*/
cvReleaseImage(&dx);
cvReleaseImage(&dy);
free(w);
free(nearest);
free(r);
free(alfa);
return LATENT_SVM_OK;
}
开发者ID:Ashwini7,项目名称:smart-python-programs,代码行数:101,代码来源:featurepyramid.cpp
示例6: run
//.........这里部分代码省略.........
break; //End of GRAB_DOTS
case SELECT_TRANSFORM:
//Falling through here. Poor man's multi-case clause. Not putting this in default as we might
//want to do different things in these two some day.
case SELECT_MASK:
snprintf( strbuf, sizeof( strbuf ), "Select %s point", pointTranslationTable[clickParams.currentPoint]);
cvDisplayOverlay( imagewindowname, strbuf, 5 );
break; //End of SELECT_MASK and SELECT_TRANSFORM
}
// Paint the corners of the detecting area and the calibration area
paintOverlayPoints( grabbedImage, &DD_transform );
//Print some statistics to the image
if( show ) {
snprintf( strbuf, sizeof( strbuf ), "Dots: %i", detected_dots ); //Print number of detected dots to the screen
cvPutText( grabbedImage, strbuf, cvPoint( 10, 20 ), &font, cvScalar( WHITE ));
snprintf( strbuf, sizeof( strbuf ), "FPS: %.1f", lastKnownFPS );
cvPutText( grabbedImage, strbuf, cvPoint( 10, 40 ), &font, cvScalar( WHITE ));
cvCircle( grabbedImage, cvPoint( 15, 55 ), minDotRadius, cvScalar( min.blue, min.green, min.red, min.alpha ), -1, 8, 0 ); // Colors given in order BGR-A, Blue, Green, Red, Alpha
}
//Show images
PROFILING_PRO_STAMP();
if( show ) {
cvShowImage( configwindowname, imgThreshold );
cvShowImage( imagewindowname, grabbedImage );
if( warp ) cvShowImage( warpwindowname, coloredMask );
}
PROFILING_POST_STAMP("Showing images");
//Release the temporary images
cvReleaseImage( &imgThreshold );
cvReleaseImage( &mask );
cvReleaseImage( &coloredMask );
/* Update exposure if needed */
updateAbsoluteExposure( captureControl, currentExposure );
cvSetTrackbarPos( exposure_lable, configwindowname, currentExposure );
//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7( linux version ),
//remove higher bits using AND operator
i = ( cvWaitKey( 10 ) & 0xff );
switch( i ) {
case 'g':
makeCalibrate( &DD_transform, &DD_transform_to, transMat, capture, captureControl, 20 );
updateAbsoluteExposure( captureControl, currentExposure+1 );
break;
case 'e':
toggleCalibrationMode( &calibrate_exposure, ¤tExposure );
break; /* Toggles calibration mode */
case 'c':
openCamera( &capture, &captureControl );
break;
case 's':
show = ~show;
break; //Toggles updating of the image. Can be useful for performance of slower machines... Or as frame freeze
case 'm':
state = SELECT_MASK;
clickParams.currentPoint = TOP_LEFT;
clickParams.DD_box = &DD_mask;
开发者ID:Zazcallabah,项目名称:dotdetector,代码行数:67,代码来源:main.c
示例7: main
//.........这里部分代码省略.........
printf("Start processing frames...\n\n");
start = clock(); // Start timer
bgr_frame=cvQueryFrame(capture); // Grab first frame
previous_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels); // Create the previous frame
current_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels); // Create the current frame
cvCopy(bgr_frame,previous_frame,NULL); // Save the copy
// Grab frames from the video until NULL
while((bgr_frame=cvQueryFrame(capture)) != NULL) {
/* When entering this loop, we have already grabbed a frame
* so the frame counter starts from 2
*/
frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES); // Get the current frame number
cvCopy(bgr_frame,current_frame,NULL); // Save the copy
/**** START PROCESSING ****/
ecrdiff_v2(current_frame, previous_frame, size, frame, fp, &index);
/**** END PROCESSING ****/
cvCopy(bgr_frame,previous_frame,NULL); // Save the copy
if(index==1) {
check_frames[frame]=1; // It means that the specific frame is marked
}
printf("Processing frame %d...\r",frame);
fflush(stdout);
}
cvReleaseImage(&bgr_frame); // Release bgr_frame
cvReleaseImage(&previous_frame); // Release previous_frame
cvReleaseImage(¤t_frame); // Release current_frame
cvReleaseCapture(&capture); // Release capture
stop = clock(); // Stop timer
diff = stop - start; // Get difference between start time and current time;
printf("\n\nTotal time processing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
printf("Processing completed!\n");
fprintf(fp,"\n\n\n\nMarked frames\n\n");
printf("\n\n\n\nMarked frames\n\n");
for(i=0;i<total_frames;i++) {
if(check_frames[i]==1) {
list_of_frames[i]=i;
fprintf(fp,"frame %d\n",i); // Write to file only marked frames
printf("frame %d\n",i); // Write to file only marked frames
marked_frames++;
}
}
fprintf(fp,"\n\nTotal marked frames\t:\t%d\n",marked_frames);
printf("\n\nTotal marked frames\t:\t%d\n\n",marked_frames);
//If there is no markeed frames, exit
if(marked_frames == 0) {
return EXIT_SUCCESS;
}
/**** STAGE 2: WRITE VIDEO ****/
开发者ID:nlabiris,项目名称:marking_video_frames_OpenCV,代码行数:67,代码来源:ecrdiff_v2_write_video.c
示例8: cvCreateImage
////////////////////////////////////////////////////////////////////////////////////
//以彩色图像显示每一尺度的张量信息
////////////////////////////////////////////////////////////////////////////////////
void Tensor::ShowTensorByColorImage()
{
double ret_minr=0.0;
double ret_maxr=0.0;
double ret_ming=0.0;
double ret_maxg=0.0;
double ret_minb=0.0;
double ret_maxb=0.0;
int x,y,i;
//纹理特征
IplImage **pImg= new IplImage *[m_levels];
for (i = 0;i < m_levels;i++)
{
pImg[i] = cvCreateImage( cvGetSize(m_img), m_img->depth, 3);
cvZero(pImg[i]);
}
CString * ptitle=new CString [m_levels];
for (i=0;i<m_levels;i++)
{
//找到每幅图像颜色通道的上限与下限值
for (y=0; y<m_h;y++)
{
for (x=0;x<m_w;x++)
{
if((*m_pImageTensorRGB[i])(x,y).r>ret_maxr)
{
ret_maxr=(*m_pImageTensorRGB[i])(x,y).r;
}
if ((*m_pImageTensorRGB[i])(x,y).r<ret_minr)
{
ret_minr=(*m_pImageTensorRGB[i])(x,y).r;
}
if((*m_pImageTensorRGB[i])(x,y).g>ret_maxg)
{
ret_maxg=(*m_pImageTensorRGB[i])(x,y).g;
}
if ((*m_pImageTensorRGB[i])(x,y).g<ret_ming)
{
ret_ming=(*m_pImageTensorRGB[i])(x,y).g;
}
if((*m_pImageTensorRGB[i])(x,y).b>ret_maxb)
{
ret_maxb=(*m_pImageTensorRGB[i])(x,y).b;
}
if ((*m_pImageTensorRGB[i])(x,y).b<ret_minb)
{
ret_minb=(*m_pImageTensorRGB[i])(x,y).b;
}
}
}
uchar * dst=(uchar *)pImg[i]->imageData;
for (y=0; y<m_h;y++)
{
for (x=0;x<m_w;x++)
{
int temp=y*(pImg[i]->widthStep)+3*x;
dst[temp+2]=(uchar)(((*m_pImageTensorRGB[i])(x,y).r-ret_minr)/(ret_maxr-ret_minr)*256);
dst[temp+1]=(uchar)(((*m_pImageTensorRGB[i])(x,y).g-ret_ming)/(ret_maxg-ret_ming)*256);
dst[temp+0]=(uchar)(((*m_pImageTensorRGB[i])(x,y).b-ret_minb)/(ret_maxb-ret_minb)*256);
}
}
ptitle[i].Format(_T("Image Texture of Level %d"),i);
cvNamedWindow((char *)(LPCTSTR)ptitle[i],CV_WINDOW_AUTOSIZE);
cvShowImage((char *)(LPCTSTR)ptitle[i],pImg[i]);
}
if (pImg != NULL)
{
for (i=0;i<m_levels;i++)
{
cvReleaseImage(&pImg[i]);
}
delete [] pImg;
}
}
开发者ID:xwlaina,项目名称:GrabCut,代码行数:82,代码来源:Tensor.cpp
示例9: main
//.........这里部分代码省略.........
cvNamedWindow( "VideoDisplay1", 1 );
cvNamedWindow( "VideoDisplay2", 1 );
cvNamedWindow( "VideoDisplay3", 1 );
cvNamedWindow( "VideoDisplay4", 1 );
// Capture
m_pCapture = cvCreateFileCapture("MVI_8833.AVI");
contour = cvCreateSeq(CV_SEQ_ELTYPE_POINT,sizeof(CvSeq),sizeof(CvPoint),storage);
if( !m_pCapture )
{
fprintf(stderr,"Could not initialize capturing! \n");
return -1;
}
// Display
while ( (m_pPreImage = cvQueryFrame(m_pCapture)))
{
imgSize = cvSize(m_pPreImage->width, m_pPreImage->height);
if(!m_pGrayImage)
m_pGrayImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!pCurr)
pCurr = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!m_pSmoothImage)
m_pSmoothImage = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
//图像预处理
cvCvtColor(m_pPreImage, m_pGrayImage, CV_BGR2GRAY);//转化为灰度图像
cvSmooth(m_pGrayImage,m_pSmoothImage,CV_GAUSSIAN,3,0,0,0 );//GAUSSIAN平滑去噪声
cvEqualizeHist(m_pSmoothImage,pCurr );//直方图均衡
if(!pPrevF)
pPrevF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
if(!pCurrF)
pCurrF = cvCreateMat(m_pGrayImage->width,m_pPreImage->height, CV_32FC1);
if(!pPrev)
pPrev = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!pMask)
pMask = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!pMaskDest)
pMaskDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!dst)
dst = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
if(!pDest)
{
pDest = cvCreateImage(imgSize, IPL_DEPTH_8U, 1);
}
cvAbsDiff(pPrev, pCurr, pDest); //帧差
cvCopy(pCurr, pPrev, NULL); // 当前帧存入前一帧
cvThreshold(pDest, pMask, 80, 255, CV_THRESH_BINARY); // 二值化
element = cvCreateStructuringElementEx( 9, 9, 3, 3, CV_SHAPE_RECT, NULL);
cvMorphologyEx( pMask, pMaskDest, NULL, element, CV_MOP_CLOSE, 1);//形态学处理
//查找并且画出团块轮廓
cvFindContours( pMaskDest, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
//画出包含目标的最小矩形
for(;contour;contour=contour->h_next)
{
r=((CvContour*)contour)->rect;
if(r.height*r.width>100)
{
cvRectangle(m_pPreImage,cvPoint(r.x,r.y),cvPoint(r.x+r.width,r.y+r.height),CV_RGB(255,0,0),1,CV_AA,0);
}
}
cvShowImage( "VideoDisplay1", m_pPreImage );
cvShowImage( "VideoDisplay2", pMask);
cvShowImage( "VideoDisplay3", pMaskDest );
cvShowImage( "VideoDisplay4", pPrev );
if(cvWaitKey(50)>0)
return 0;
}
// Realease
cvReleaseImage( &m_pPreImage );
cvReleaseImage( &m_pGrayImage );
cvReleaseImage( &m_pSmoothImage );
cvReleaseImage( &pCurr );
cvReleaseImage( &pDest );
cvReleaseImage( &pMask );
cvReleaseImage( &pMaskDest );
cvReleaseImage( &dst );
cvReleaseMemStorage( &storage );
cvDestroyWindow("VideoDisplay1");
cvDestroyWindow("VideoDisplay2");
cvDestroyWindow("VideoDisplay3");
cvDestroyWindow("VideoDisplay4");
cvReleaseStructuringElement( &element );
return 0;
}
开发者ID:RogerFederer03,项目名称:Tennis_System,代码行数:101,代码来源:frame_difference.cpp
示例10: cvCopyImage
//.........这里部分代码省略.........
//将dx,dy分别赋给cv_dx,cv_dy
CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, dx.GetData());
CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, dy.GetData());
//初始化cv_tensor0,cv_tensor1,cv_tensor2,此时m_tensor[0],m_tensor[1],m_tensor[2]均初始化0
CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[0])->GetData());
CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[1])->GetData());
CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[2])->GetData());
//计算图像的梯度,保存在cv_gradX,cv_gradY中,并赋值给m_gradient[0],m_gradient[1]
if (isComputeGradient)
{
//cv_gradX,cv_gradY初始化并计算
CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[0])->GetData());
CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[1])->GetData());
cvAdd(&cv_gradX, &cv_dx, &cv_gradX);//对于三个通道进行累加
cvAdd(&cv_gradY, &cv_dy, &cv_gradY);
}
//计算结构张量,cv_tensor0=dx*dx,cv_tensor1=dy*dy,cv_tensor2=dx*dy
cvMul(&cv_dx, &cv_dx, &cv_dx2);
cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0);
cvMul(&cv_dy, &cv_dy, &cv_dy2);
cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1);
cvMul(&cv_dx, &cv_dy, &cv_dxdy);
cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2);
//单尺度计算完毕,以下为多尺度非线性结构张量的计算方法
if (m_levels > 1)
{
unsigned int wavelet_levels = m_levels - 1; //-1的原因是因为之前没有if (m_levels==1)的判断语句
double dMaxValue,dMinValue;
cvMinMaxLoc(cv_channels[n], &dMinValue, &dMaxValue);//Finds global minimum, maximum
//将图像的像素值归一化到[0,1]
Wavelet *wave = new Wavelet(&image, dMinValue, dMaxValue, wavelet_levels); //调用Wavelet的构造函数
//新建WaveletDetailImages结构体的数组
WaveletDetailImages *D_images = new WaveletDetailImages[wavelet_levels];
for (i = 0; i < wavelet_levels; i++)
{
D_images[i].Detail_1 = new CMatrix(m_h, m_w);
D_images[i].Detail_2 = new CMatrix(m_h, m_w);
}
wave->execute(D_images);//得到D(s,x),D(s,y)
for (i = 0; i < wavelet_levels; i++)
{
//默认多尺度结构张量的比例因子a=2
double scale = pow((float)0.25, (int)(i + 1)); //见公式(2-15)
CvMat cv_dx = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_1->GetData());
CvMat cv_dy = cvMat(m_h, m_w, CV_64FC1, D_images[i].Detail_2->GetData());
CvMat cv_tensor0 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM])->GetData());
CvMat cv_tensor1 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 1])->GetData());
CvMat cv_tensor2 = cvMat(m_h, m_w, CV_64FC1, (m_tensor[(i+1) * SiNGLE_TENSOR_DIM + 2])->GetData());
//计算梯度
if (isComputeGradient)
{
CvMat cv_gradX = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt])->GetData());
CvMat cv_gradY = cvMat(m_h, m_w, CV_64FC1, (m_gradient[(i+1) * m_axes_cnt + 1])->GetData());
cvAdd(&cv_gradX, &cv_dx, &cv_gradX);
cvAdd(&cv_gradY, &cv_dy, &cv_gradY);
}
//计算张量
cvMul(&cv_dx, &cv_dx, &cv_dx2, scale);
cvAdd(&cv_tensor0, &cv_dx2, &cv_tensor0);
cvMul(&cv_dy, &cv_dy, &cv_dy2, scale);
cvAdd(&cv_tensor1, &cv_dy2, &cv_tensor1);
cvMul(&cv_dx, &cv_dy, &cv_dxdy, scale);
cvAdd(&cv_tensor2, &cv_dxdy, &cv_tensor2);
}
for (i = 0; i < wavelet_levels; i++)
{
delete D_images[i].Detail_1;
delete D_images[i].Detail_2;
}
delete [] D_images;
delete wave;
}
cvReleaseImage(&cv_channels[n]);
}
//将每一尺度的结构张量转换为彩色图像存储起来
for (i=0;i<m_levels;i++)
{
for (y=0;y<m_h;y++)
{
for (x=0;x<m_w;x++)
{
(*m_pImageTensorRGB[i])(x,y).r=(m_tensor[i*SiNGLE_TENSOR_DIM])->GetElement(y,x);
(*m_pImageTensorRGB[i])(x,y).g=(m_tensor[i*SiNGLE_TENSOR_DIM+1])->GetElement(y,x);
(*m_pImageTensorRGB[i])(x,y).b=(m_tensor[i*SiNGLE_TENSOR_DIM+2])->GetElement(y,x);
}
}
}
m_tensors = NULL;
}
开发者ID:xwlaina,项目名称:GrabCut,代码行数:101,代码来源:Tensor.cpp
示例11: delete
Tensor::~Tensor()
{
if (m_tensor != NULL)
{
for (int i=0;i<m_dim;i++)
{
if (m_tensor[i] != NULL)
{
delete m_tensor[i];
m_tensor[i] = NULL;
}
}
delete [] m_tensor;
m_tensor = NULL;
}
if (m_pImageTensorRGB != NULL)
{
for (int i=0;i<m_levels;i++)
{
if (m_pImageTensorRGB[i] != NULL)
{
delete m_pImageTensorRGB[i];
m_pImageTensorRGB[i] = NULL;
}
}
delete [] m_pImageTensorRGB;
m_pImageTensorRGB = NULL;
}
if (m_gradient != NULL)
{
for (int i=0;i<m_grad_dim;i++)
{
if (m_gradient[i] != NULL)
{
delete m_gradient[i];
m_gradient[i] = NULL;
}
}
delete [] m_gradient;
m_gradient = NULL;
}
if (m_tensors != NULL)
{
unsigned int x,y;
for (y = 0; y < m_h; y++)
{
for (x = 0; x < m_w; x++)
{
if ((*m_tensors)(x,y) != NULL)
{
delete (*m_tensors)(x,y);
(*m_tensors)(x,y) = NULL;
}
}
}
delete m_tensors;
m_tensors = NULL;
}
if (m_img!=NULL)
{
cvReleaseImage(&m_img);
m_img=NULL;
}
}
开发者ID:xwlaina,项目名称:GrabCut,代码行数:68,代码来源:Tensor.cpp
示例12: cvReleaseImage
COpenCVMFCDoc::~COpenCVMFCDoc()
{
// Release Image
if (pImg)
cvReleaseImage(&pImg);
}
开发者ID:huihui891,项目名称:OpenCVMFC,代码行数:6,代码来源:OpenCVMFCDoc.cpp
示例13: cvCreateImage
static IplImage *_threshold(IplImage *in) {
IplImage *img = cvCreateImage(cvGetSize(in), 8, 1);
// convert to grayscale
cvCvtColor(in, img, CV_BGR2GRAY);
// compute the mean intensity. This is used to adjust constant_reduction value below.
long total = 0;
for (int x = 0; x < img->width; ++x) {
for (int y = 0; y < img->height; ++y) {
CvScalar s = cvGet2D(img, y, x);
total += s.val[0];
}
}
int mean_intensity = (int)(total / (img->width * img->height));
// apply thresholding (converts it to a binary image)
// block_size observations: higher value does better for images with variable lighting (e.g.
// shadows).
// may eventually need to paramaterize this, to some extent, because the different callers
// seem to do better with different values (e.g. contour location is better with smaller numbers,
// but cage location is better with larger...) but for now, have been able to settle on value
// which works pretty well for most cases.
int block_size = (int)(img->width / 9);
if ((block_size % 2) == 0) {
// must be odd
block_size += 1;
}
// constant_reduction observations: magic, but adapting this value to the mean intensity of the
// image as a whole seems to help.
int constant_reduction = (int)(mean_intensity / 3.6 + 0.5);
IplImage *threshold_image = cvCreateImage(cvGetSize(img), 8, 1);
cvAdaptiveThreshold(img, threshold_image, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV,
block_size, constant_reduction);
cvReleaseImage(&img);
// try to get rid of "noise" spots.
int min_blob_size = 2;
for (int x = 0; x < threshold_image->width; ++x) {
for (int y = 0; y < threshold_image->height; ++y) {
CvScalar s = cvGet2D(threshold_image, y, x);
int ink_neighbors = 0;
if (s.val[0] == 255) {
for (int dx = -1; dx <= 1; ++dx) {
if ((x + dx >= 0) && (x + dx < threshold_image->width)) {
for (int dy = -1; dy <= 1; ++dy) {
if ((y + dy >= 0) && (y + dy < threshold_image->height)) {
if (! ((dy == 0) && (dx == 0))) {
CvScalar m = cvGet2D(threshold_image, y + dy, x + dx);
if (m.val[0] == 255) {
++ink_neighbors;
if (ink_neighbors > min_blob_size) {
break;
}
}
}
}
}
if (ink_neighbors > min_blob_size) {
break;
}
}
}
if (ink_neighbors <= min_blob_size) {
s.val[0] = 0;
cvSet2D(threshold_image, y, x, s);
}
}
}
}
return threshold_image;
}
开发者ID:dlowe,项目名称:kenken,代码行数:74,代码来源:kenken.c
示例14: locate_puzzle
const CvPoint2D32f* locate_puzzle(IplImage *in, IplImage **annotated) {
IplImage *grid_image = _grid(in);
*annotated = cvCloneImage(in);
// find lines using Hough transform
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
double distance_resolution = 1;
double angle_resolution = CV_PI / 60;
int threshold = 60;
int minimum_line_length = in->width / 2;
int maximum_join_gap = in->width / 10;
lines = cvHoughLines2(grid_image, storage, CV_HOUGH_PROBABILISTIC, distance_resolution, angle_resolution, threshold, minimum_line_length, maximum_join_gap);
cvCvtColor(grid_image, *annotated, CV_GRAY2RGB);
cvReleaseImage(&grid_image);
double most_horizontal = INFINITY;
for (int i = 0; i < lines->total; ++i) {
CvPoint *line = (CvPoint*)cvGetSeqElem(lines,i);
double dx = abs(line[1].x - line[0].x);
double dy = abs(line[1].y - line[0].y);
double slope = INFINITY;
if (dx != 0) {
slope = dy / dx;
}
if (slope != INFINITY) {
if (slope < most_horizontal) {
//printf("most horizontal seen: %0.2f\n", slope);
most_horizontal = slope;
}
}
}
int top = -1;
int left = -1;
int bottom = -1;
int right = -1;
for (int i = 0; i < lines->total; i++) {
CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i);
double dx = abs(line[1].x - line[0].x);
double dy = abs(line[1].y - line[0].y);
double slope = INFINITY;
if (dx) {
slope = dy / dx;
}
cvLine(*annotated, line[0], line[1], CV_RGB(255, 0, 0), 1, 8, 0);
if (abs(slope - most_horizontal) <= 1) {
if ((top == -1) || (line[1].y < ((CvPoint*)cvGetSeqElem(lines,top))[1].y)) {
top = i;
}
if ((bottom == -1) || (line[1].y > ((CvPoint*)cvGetSeqElem(lines,bottom))[1].y)) {
bottom = i;
}
} else {
if ((left == -1) || (line[1].x < ((CvPoint*)cvGetSeqElem(lines,left))[1].x)) {
left = i;
}
if ((right == -1) || (line[1].x > ((CvPoint*)cvGetSeqElem(lines,right))[1].x)) {
right = i;
}
}
}
//printf("number of lines: %d\n", lines->total);
if ((top == -1) || (left == -1) || (bottom == -1) || (right == -1)) {
return NULL;
}
CvPoint *top_line = (CvPoint*)cvGetSeqElem(lines,top);
cvLine(*annotated, top_line[0], top_line[1], CV_RGB(0, 0, 255), 6, 8, 0);
CvPoint *bottom_line = (CvPoint*)cvGetSeqElem(lines,bottom);
cvLine(*annotated, bottom_line[0], bottom_line[1], CV_RGB(0, 255, 255), 6, 8, 0);
CvPoint *left_line = (CvPoint*)cvGetSeqElem(lines,left);
cvLine(*annotated, left_line[0], left_line[1], CV_RGB(0, 255, 0), 6, 8, 0);
CvPoint *right_line = (CvPoint*)cvGetSeqElem(lines,right);
cvLine(*annotated, right_line[0], right_line[1], CV_RGB(255, 255, 0), 6, 8, 0);
CvPoint2D32f *coordinates;
coordinates = malloc(sizeof(CvPoint2D32f) * 4);
// top left
intersect(top_line, left_line, &(coordinates[0]));
cvLine(*annotated, cvPointFrom32f(coordinates[0]), cvPointFrom32f(coordinates[0]), CV_RGB(255, 255, 0), 10, 8, 0);
//printf("top_left: %.0f, %.0f\n", coordinates[0].x, coordinates[0].y);
// top right
intersect(top_line, right_line, &(coordinates[1]));
cvLine(*annotated, cvPointFrom32f(coordinates[1]), cvPointFrom32f(coordinates[1]), CV_RGB(255, 255, 0), 10, 8, 0);
//printf("top_right: %.0f, %.0f\n", coordinates[1].x, coordinates[1].y);
//.........这里部分代码省略.........
开发者ID:dlowe,项目名称:kenken,代码行数:101,代码来源:kenken.c
示例15: setPreviousFrame
void Capture::setPreviousFrame(IplImage* image) {
if (previousFrame != NULL) cvReleaseImage(&previousFrame);
previousFrame = image;
}
开发者ID:Vorago,项目名称:iwb,代码行数:4,代码来源:capture.cpp
示例16: int
//.........这里部分代码省略.........
int xSt = new_width,xEn = 0;
int ySt = new_height,yEn = 0;
for(int i = 2 ;i < throld->width -2 ;i++)
{
for(int j = 2;j < throld->height - 2;j++)
{
if((uchar)throld->imageData[j*throld->widthStep + i] == 0)
{
if(i < xSt)
xSt = i;
if(i > xEn)
xEn = i;
if(j < ySt)
ySt = j;
if(j > yEn)
yEn = j;
}
}
}
/*for(int i = xSt;i < xEn ;i++)
{
for(int j = ySt; j< yEn ;j++)
{
newSrc->imageData[j*newSrc->widthStep + i] = 0;
}
}*/
struct OutLine tempoutline;
tempoutline.xSt = xSt + left;
tempoutline.xEnd = xEn + left;
tempoutline.ySt = ySt + up;
tempoutline.yEnd = yEn + up;
//判断在哪一行
int cent = (ySt + yEn)/2 + up;
int temp = 1000;
int mark;
for(int i = 0;i<lines.size();i++)
{
if(abs(cent - lines[i]) < temp)
{
temp = abs(cent - lines[i]);
mark = i;
}
}
//插入改行
bool st = false;
int tempCode = 0;
for(int i = mark;i < outlineSs.size();i++)
{
for(int j = 0; j< outlineSs.at(i).size();j++)
{
if(outlineSs.at(i).at(j).Code > tempCode)
tempCode = outlineSs.at(i).at(j).Code;
if(!st && i== mark && j == outlineSs.at(i).size()-1)
{
tempoutline.Code = tempCode;
outlineSs.at(i).push_back(tempoutline);
st = true;
j++;
}
if(outlineSs.at(i).at(j).Code == -1)
continue;
if(!st)
{
if(outlineSs.at(i).at(j).xSt > tempoutline.xSt)
{
tempoutline.Code = outlineSs.at(i).at(j).Code;
//outlineSs.at(i).at(j).Code ++;
//插入该元素
outlineSs.at(i).insert(outlineSs.at(i).begin()+j,tempoutline);
st = true;
}
}
else
{
outlineSs.at(i).at(j).Code++;
}
}
}
cvReleaseImage(&newSrc);
cvReleaseImage(&edge);
cvReleaseImage(&throld);
OnPaint();
}
开发者ID:GigaLove,项目名称:WordRecognition,代码行数:101,代码来源:RecognitionTextView.cpp
示例17: profileCoord
//.........这里部分代码省略.........
ShowFeature(centerCoord);
ShowFeature(rightCoord);
}
Graphic FindLine;
for(int numStitch = 0; numStitch < 2;numStitch++)
{
for(int num = 0;num < 3;num++)
|
请发表评论