本文整理汇总了C++中cvCreateImage函数的典型用法代码示例。如果您正苦于以下问题:C++ cvCreateImage函数的具体用法?C++ cvCreateImage怎么用?C++ cvCreateImage使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvCreateImage函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: main
int main (int argc, const char * argv[]) {
char *imagefilename=(char*)malloc(sizeof(char)*16);
char *dscfilename=(char*)malloc(sizeof(char)*16);
if (argc<3) {
printf("Usage: ./dump-descr image-file-name descriptor-file-name");
strcpy(imagefilename, "savekkkk.jpg");
strcpy(dscfilename, "saveD.jpg.dsc");
}
else {
strcpy(imagefilename,argv[1]);
strcpy(dscfilename,argv[2]);
}
FILE* dscfile;
int w=1280,h=720;
int i=0;
int nkeypoints=0;
vl_bool render=1;
vl_bool first=1;
VlSiftFilt * myFilter=0;
VlSiftKeypoint const* keys;
char img2_file[] = "/Users/quake0day/ana2/MVI_0124.MOV";
//printf("sizeof(VlSiftKeypoint)=%d, filt=%d, pix=%d\n", sizeof(VlSiftKeypoint), sizeof(VlSiftFilt),sizeof(vl_sift_pix));
dscfile=fopen(dscfilename, "wb");
vl_sift_pix* fim;
int err=0;
int octave, nlevels, o_min;
//vl_sift_pix descr[128];
//CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
CvCapture * camera = cvCreateFileCapture(img2_file);
cvNamedWindow("Hello", 1);
IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage(imagefilename, 0);
IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
octave=3;
nlevels=10;
o_min=1;
myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
vl_sift_set_peak_thresh(myFilter, 0.5);
fim=malloc(sizeof(vl_sift_pix)*w*h);
int press=0;
while (myCVImage) {
dprintf("%d*%d\n",myCVImage->width,myCVImage->height);
//w=myCVImage->width;
//h=myCVImage->height;
cvResize(myCVImage, resizingImg, CV_INTER_AREA);
dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
cvConvertImage(resizingImg, afterCVImage, 0);
for (i=0; i<h; i++) {
for (int j=0; j<w; j++) {
fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
//printf("%f ", fim[i*w+j]);
}
}
//vl_sift_set_peak_thresh(myFilter, 0.5);
//vl_sift_set_edge_thresh(myFilter, 10.0);
first=1;
while (1) {
if (first) {
first=0;
err=vl_sift_process_first_octave(myFilter, fim);
}
else {
err=vl_sift_process_next_octave(myFilter);
}
if (err) {
err=VL_ERR_OK;
break;
}
vl_sift_detect(myFilter);
nkeypoints=vl_sift_get_nkeypoints(myFilter);
dprintf("insider numkey:%d\n",nkeypoints);
keys=vl_sift_get_keypoints(myFilter);
dprintf("final numkey:%d\n",nkeypoints);
if (render) {
for (i=0; i<nkeypoints; i++) {
cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 255, 50, 0), 1, CV_AA, 0);
//printf("x:%f,y:%f,s:%f,sigma:%f,\n",keys->x,keys->y,keys->s,keys->sigma);
if (press=='d') {
double angles [4] ;
int nangles ;
//.........这里部分代码省略.........
开发者ID:mdqyy,项目名称:SIFT_Project,代码行数:101,代码来源:dump.c
示例2: xnGetStatusString
bool AirCursor::init(bool makeDebugImage)
{
if (m_init) return true;
m_debugImageEnabled = makeDebugImage;
XnStatus rc = XN_STATUS_OK;
// init OpenNI context
rc = m_context.Init();
m_context.SetGlobalMirror(true);
if (rc != XN_STATUS_OK)
{
std::cout << "ERROR: init failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
// create a DepthGenerator node
rc = m_depthGenerator.Create(m_context);
if (rc != XN_STATUS_OK)
{
std::cout << "node creation failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
// create the gesture and hands generators
rc = m_gestureGenerator.Create(m_context);
if (rc != XN_STATUS_OK)
{
std::cout << "gesture generator creation failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
rc = m_handsGenerator.Create(m_context);
if (rc != XN_STATUS_OK)
{
std::cout << "hands generator creation failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
// register to callbacks
XnCallbackHandle h1, h2;
m_gestureGenerator.RegisterGestureCallbacks(gestureRecognizedCB, gestureProcessCB, this, h1);
m_handsGenerator.RegisterHandCallbacks(handCreateCB, handUpdateCB, handDestroyCB, this, h2);
// init session manager
rc = m_sessionManager.Initialize(&m_context, "Wave,Click", NULL);
if (rc != XN_STATUS_OK)
{
std::cout << "session manager init failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
// register to session callbacks
m_sessionManager.RegisterSession(this, &sessionStartCB, &sessionEndCB);
// start generating data
rc = m_context.StartGeneratingAll();
if (rc != XN_STATUS_OK)
{
std::cout << "data generating start failed: " << xnGetStatusString(rc) << std::endl;
return false;
}
m_pushDetector.RegisterPush(this, pushCB);
m_sessionManager.AddListener(&m_pushDetector);
m_swipeDetector.RegisterSwipeUp(this, &swipeUpCB);
m_swipeDetector.RegisterSwipeDown(this, &swipeDownCB);
m_swipeDetector.RegisterSwipeLeft(this, &swipeLeftCB);
m_swipeDetector.RegisterSwipeRight(this, &swipeRightCB);
m_sessionManager.AddListener(&m_swipeDetector);
// 8bit depth map
m_iplDepthMap = cvCreateImage(cvSize(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y), IPL_DEPTH_8U, 1);
// opencv mem storage
m_cvMemStorage = cvCreateMemStorage(0);
if (m_debugImageEnabled)
{
// 24bit rgb888 debug image
m_iplDebugImage = cvCreateImage(cvSize(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y), IPL_DEPTH_8U, 3);
// Same debug image as a QImage
m_debugImage = new QImage(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y, QImage::Format_RGB888);
}
m_init = true;
return true;
}
开发者ID:madamiky,项目名称:Qt_AirCursor,代码行数:91,代码来源:aircursor.cpp
示例3: main
int main() {
CvPoint pt1,pt2;
CvRect regt;
CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
if ( !capture ) {
fprintf(stderr, "ERROR: capture is NULL \n" );
getchar();
return -1;
}
cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144);
cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216);
// Create a window in which the captured images will be presented
cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
// Show the image captured from the camera in the window and repeat
while ( 1 ) {
// Get one frame
IplImage* frame = cvQueryFrame( capture );
if ( !frame ) {
fprintf( stderr, "ERROR: frame is null...\n" );
getchar();
break;
}
int modfheight, modfwidth;
modfheight = frame->height;
modfwidth = frame->width;
// create modified frame with 1/4th the original size
IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
cvResize(frame, modframe,CV_INTER_LINEAR);
// create HSV(Hue, Saturation, Value) frame
IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
// create a frame within threshold.
IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
// created dilated image
IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)
CBlobResult blobs;
blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
CBlob biggestblob;
blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
// get 4 points to define the rectangle
pt1.x = biggestblob.MinX()*4;
pt1.y = biggestblob.MinY()*4;
pt2.x = biggestblob.MaxX()*4;
pt2.y = biggestblob.MaxY()*4;
cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
cvShowImage( "mywindow", frame); // show output image
// Do not release the frame!
//If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
//remove higher bits using AND operator
if ( (cvWaitKey(10) & 255) == 27 ) break;
}
// Release the capture device housekeeping
cvReleaseCapture( &capture );
cvDestroyWindow( "mywindow" );
return 0;
}
开发者ID:bhuneshwar21,项目名称:AUV,代码行数:62,代码来源:colortest1.cpp
示例4: main
/**
* main
*/
int main(int argc, const char **argv)
{
// Our main data storage vessel..
RASPIVID_STATE state;
MMAL_STATUS_T status = -1;
MMAL_PORT_T *camera_video_port = NULL;
MMAL_PORT_T *camera_still_port = NULL;
MMAL_PORT_T *preview_input_port = NULL;
MMAL_PORT_T *encoder_input_port = NULL;
MMAL_PORT_T *encoder_output_port = NULL;
time_t timer_begin,timer_end;
double secondsElapsed;
bcm_host_init();
signal(SIGINT, signal_handler);
// read default status
default_status(&state);
// init windows and OpenCV Stuff
cvNamedWindow("camcvWin", CV_WINDOW_AUTOSIZE);
int w=state.width;
int h=state.height;
dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);
py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); // Y component of YUV I420 frame
pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // U component of YUV I420 frame
pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // V component of YUV I420 frame
pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
image = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); // final picture to display
// create camera
if (!create_camera_component(&state))
{
vcos_log_error("%s: Failed to create camera component", __func__);
}
else if ( (status = raspipreview_create(&state.preview_parameters)) != MMAL_SUCCESS)
{
vcos_log_error("%s: Failed to create preview component", __func__);
destroy_camera_component(&state);
}
else
{
PORT_USERDATA callback_data;
camera_video_port = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
camera_still_port = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
VCOS_STATUS_T vcos_status;
callback_data.pstate = &state;
vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);
vcos_assert(vcos_status == VCOS_SUCCESS);
// assign data to use for callback
camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
// init timer
time(&timer_begin);
// start capture
if (mmal_port_parameter_set_boolean(camera_video_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
{
goto error;
}
// Send all the buffers to the video port
int num = mmal_queue_length(state.video_pool->queue);
int q;
for (q=0;q<num;q++)
{
MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state.video_pool->queue);
if (!buffer)
vcos_log_error("Unable to get a required buffer %d from pool queue", q);
if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS)
vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
}
// Now wait until we need to stop
vcos_sleep(state.timeout);
error:
mmal_status_to_int(status);
// Disable all our ports that are not handled by connections
check_disable_port(camera_still_port);
//.........这里部分代码省略.........
开发者ID:Spiernik,项目名称:gesichtserkennung,代码行数:101,代码来源:camcv.c
示例5: tracker
EyeExtractor::EyeExtractor(const PointTracker &tracker):
tracker(tracker),
eyefloat2(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
eyegrey(cvCreateImage( eyesize, 8, 1 )),
eyefloat(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
eyeimage(cvCreateImage( eyesize, 8, 3 )),
histogram_horizontal(cvCreateImage( eyesize, 8, 3 )),
histogram_vertical(cvCreateImage( cvSize(eyesize.height, eyesize.width), 8, 3 )),
vector_horizontal(new vector<int> (eyesize.width,0)),
vector_vertical(new vector<int> (eyesize.height,0)),
eyeGraySegmented(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
// ONUR DUPLICATED CODE FOR LEFT EYE
eyefloat2_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
eyegrey_left(cvCreateImage( eyesize, 8, 1 )),
eyefloat_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
eyeimage_left(cvCreateImage( eyesize, 8, 3 )),
histogram_horizontal_left(cvCreateImage( eyesize, 8, 3 )),
histogram_vertical_left(cvCreateImage( cvSize(eyesize.height, eyesize.width), 8, 3 )),
vector_horizontal_left(new vector<int> (eyesize.width,0)),
vector_vertical_left(new vector<int> (eyesize.height,0)),
eyeGraySegmented_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
blink(false),
histPositionSegmentedPixels (new vector<vector<int> >),
histPositionSegmentedPixels_left (new vector<vector<int> >),
extractFeatures(eyesize)
{
}
开发者ID:alc1218,项目名称:OpenGazer,代码行数:29,代码来源:EyeExtractor.cpp
示例6: aplicar_umbralizar
void aplicar_umbralizar (int tiempo, int cant_iteraciones, const char *nomb_impl, const char *nomb_arch_entrada, unsigned char min, unsigned char max, unsigned char q) {
IplImage *src = 0;
IplImage *dst = 0;
CvSize dst_size;
// Cargo la imagen
if( (src = cvLoadImage (nomb_arch_entrada, CV_LOAD_IMAGE_GRAYSCALE)) == 0 )
exit(EXIT_FAILURE);
dst_size.width = src->width;
dst_size.height = src->height;
// Creo una IplImage para cada salida esperada
if( (dst = cvCreateImage (dst_size, IPL_DEPTH_8U, 1) ) == 0 )
exit(EXIT_FAILURE);
// Chequeo de parametros
if (!(min <= max && min >= 0 && max <= 255 && q >= 0 && q <= 255)) {
imprimir_ayuda();
cvReleaseImage(&src);
cvReleaseImage(&dst);
exit ( EXIT_SUCCESS );
}
typedef void (umbralizar_fn_t) (unsigned char*, unsigned char*, int, int, int, unsigned char, unsigned char, unsigned char);
umbralizar_fn_t *proceso;
if (strcmp(nomb_impl, "c") == 0) {
proceso = umbralizar_c;
} else {
proceso = umbralizar_asm;
}
if (tiempo) {
unsigned long long int start, end;
MEDIR_TIEMPO_START(start);
for(int i=0; i<cant_iteraciones; i++) {
proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, min, max, q);
}
MEDIR_TIEMPO_STOP(end);
imprimir_tiempos_ejecucion(start, end, cant_iteraciones);
} else {
proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, min, max, q);
}
// Guardo imagen y libero las imagenes
char nomb_arch_salida[256];
memset(nomb_arch_salida, 0, 256);
sprintf(nomb_arch_salida, "%s.umbralizar.min-%d.max-%d.q-%d.%s.bmp", nomb_arch_entrada, min, max, q, nomb_impl);
cvSaveImage(nomb_arch_salida, dst, NULL);
cvReleaseImage(&src);
cvReleaseImage(&dst);
}
开发者ID:LeandroLovisolo,项目名称:Orga2-TP2,代码行数:64,代码来源:tp2.c
示例7: identifyClip
//.........这里部分代码省略.........
// unknownH[(i-1)*numParkingSpots + j - 1].VValues[x] = 0;
// }
// if(nullPtr[(i-1)*numParkingSpots + j - 1]){
// continue;
// }
// for(int r=0; r<unknowns[(i-1)*numParkingSpots + j - 1].height(); r++){
// for(int c=0; c<unknowns[(i-1)*numParkingSpots + j - 1].width(); c++){
// double Y = 0.299*unknowns[(i-1)*numParkingSpots + j - 1][r][c].r + 0.587*unknowns[(i-1)*numParkingSpots + j - 1][r][c].g + 0.114*unknowns[(i-1)*numParkingSpots + j - 1][r][c].b;
// double U = (unknowns[(i-1)*numParkingSpots + j - 1][r][c].b - Y)*0.565;
// double V = (unknowns[(i-1)*numParkingSpots + j - 1][r][c].r - Y)*0.713;
// if(!(similar(0, ((int)U), bgSimilarity) && similar(0, ((int)V), bgSimilarity))){
// unknownH[(i-1)*numParkingSpots + j - 1].UValues[(((int)U) + 128)/binSize]++;
// unknownH[(i-1)*numParkingSpots + j - 1].VValues[(((int)V) + 128)/binSize]++;
// }
// }
// }
// unknownH[(i-1)*numParkingSpots + j - 1].normalize();
//
// cvReleaseImage(&img);
//
// }
// if(i%1000==0)
// qDebug()<<"( Frame"<<i<<")";
// }
// delete [] unknowns;
header = "../data/"+activeFolder;
QDir dir(header); if (!dir.exists()) dir.mkpath(".");
qDebug()<<"Computing confusion matrix...";
int confHeight = 480, confWidth = 2*confHeight;//, buffer = 2, unknownWidth = (double)(confWidth/(numKnownAnts*maxSamplesPerAnt))*numUnknownImages;
QString name = header+"confusionmat"+QString::number(clipNum)+".png";
//cvNamedWindow("ConfusionMatrix", CV_WINDOW_AUTOSIZE);
IplImage* confImg = cvCreateImage(cvSize(confWidth,confHeight), IPL_DEPTH_8U, 1);
BwImage confMat(confImg);
int totalUnknownSamples = 0;
for(int i=1; i<=numUnknownImages; i++){
if(nullPtr[i-1]){
continue;
}
totalUnknownSamples++;
}
int totalKnownSamples = 0;
for(int i=0; i<numKnownAnts;i++)
totalKnownSamples += samplesPerAnt[i];
int vertStep = max(confHeight/totalKnownSamples, 1);
int horzStep = max((confWidth/2)/totalKnownSamples, 1);
int stepRow = 0;
for(int i=1; i<=numKnownAnts; i++){
for(int j=1; j<=samplesPerAnt[i-1]; j++){
int rowIndex = (i-1)*maxSamplesPerAnt+j-1;
int stepCol = 0;
for(int ii=1; ii<=numKnownAnts; ii++){
for(int jj=1; jj<=samplesPerAnt[ii-1]; jj++){
int colIndex = (ii-1)*maxSamplesPerAnt+jj-1;
for(int k=0; k<=vertStep; k++){
for(int kk=0; kk<=horzStep; kk++){
confMat[min(confHeight,(int)(((double)stepRow/totalKnownSamples)*confHeight+k))]
[min(confWidth/2, (int)(((double)stepCol/totalKnownSamples)*(confWidth/2)+kk))] = 255 * H[rowIndex].intersectionWith(H[colIndex]);
}
}
stepCol++;
}
}
stepCol = 0;
for(int ii=1; ii<=maxFrames; ii++){
开发者ID:biotracking,项目名称:biotrack,代码行数:67,代码来源:main.cpp
示例8: main
//.........这里部分代码省略.........
storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
cvNamedWindow( "Image View", 1 );
for(;;)
{
IplImage *view = 0, *view_gray = 0;
int count = 0, found, blink = 0;
CvPoint text_origin;
CvSize text_size = {0,0};
int base_line = 0;
char s[100];
int key;
if( f && fgets( imagename, sizeof(imagename)-2, f ))
{
int l = strlen(imagename);
if( l > 0 && imagename[l-1] == '\n' )
imagename[--l] = '\0';
if( l > 0 )
{
if( imagename[0] == '#' )
continue;
view = cvLoadImage( imagename, 1 );
}
}
else if( capture )
{
IplImage* view0 = cvQueryFrame( capture );
if( view0 )
{
view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
if( view0->origin == IPL_ORIGIN_BL )
cvFlip( view0, view, 0 );
else
cvCopy( view0, view );
}
}
if( !view )
{
if( image_points_seq->total > 0 )
{
image_count = image_points_seq->total;
goto calibrate;
}
break;
}
if( flip_vertical )
cvFlip( view, view, 0 );
img_size = cvGetSize(view);
found = cvFindChessboardCorners( view, board_size,
image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );
#if 1
// improve the found corners' coordinate accuracy
view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
cvCvtColor( view, view_gray, CV_BGR2GRAY );
cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
cvReleaseImage( &view_gray );
#endif
开发者ID:AmmarkoV,项目名称:RGBDAcquisition,代码行数:67,代码来源:calibrate.cpp
示例9: show
static void show()
{
if(!exist_image || !exist_scan){
return;
}
IplImage* image_view = cvCreateImage(cvGetSize(&image), image.depth, image.nChannels);
cvCopy(&image, image_view);
float min_d, max_d;
min_d = max_d = scan_image.distance.at(0);
for(int i = 1; i < IMAGE_WIDTH * IMAGE_HEIGHT; i++){
float di = scan_image.distance.at(i);
max_d = di > max_d ? di : max_d;
min_d = di < min_d ? di : min_d;
}
float wid_d = max_d - min_d;
/*
* Plot depth points on an image
*/
CvPoint pt;
int height, width;
for(int i = 0; i < (int)scan_image.distance.size(); i++) {
height = (int)(i % IMAGE_HEIGHT);
width = (int)(i / IMAGE_HEIGHT);
if(scan_image.distance.at(i) != 0.0) {
pt.x = width;
pt.y = height;
int colorid= wid_d ? ( (scan_image.distance.at(i) - min_d) * 255 / wid_d ) : 128;
cv::Vec3b color=colormap.at<cv::Vec3b>(colorid);
int g = color[1];
int b = color[2];
int r = color[0];
cvCircle(image_view, pt, 2, CV_RGB (r, g, b), CV_FILLED, 8, 0);
}
}
drawRects(image_view,
car_fused_objects.obj,
cvScalar(255.0, 255.0, 0,0),
(image_view->height)*.3);
drawRects(image_view,
pedestrian_fused_objects.obj,
cvScalar(0.0, 255.0, 0,0),
(image_view->height)*.3);
/* PUT DISTANCE text on image */
putDistance(image_view,
car_fused_objects.obj,
(image_view->height)*.3,
car_fused_objects.type.c_str());
putDistance(image_view,
pedestrian_fused_objects.obj,
(image_view->height)*.3,
pedestrian_fused_objects.type.c_str());
/*
* Show image
*/
cvShowImage(window_name, image_view);
cvWaitKey(2);
cvReleaseImage(&image_view);
}
开发者ID:Keerecles,项目名称:Autoware,代码行数:65,代码来源:scan_image_d_viewer.cpp
示例10: mexFunction
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
double nan = std::numeric_limits<double>::quiet_NaN();
double inf = std::numeric_limits<double>::infinity();
if (nrhs == 0)
{
mexPrintf("Lucas-Kanade\n");
return;
}
IplImage **IMG = 0;
IplImage **PYR = 0;
IMG = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));
PYR = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));
if (IMG == 0 || (nrhs != 5 && nrhs != 6))
{
mexPrintf("lk(2,imgI,imgJ,ptsI,ptsJ,Level)\n");
// 0 1 2 3 4
return;
}
int Level;
if (nrhs == 6)
Level = (int) *mxGetPr(prhs[5]);
else
Level = 5;
int I = 0;
int J = 1;
int Winsize = 10;
// Images
CvSize imageSize = cvSize(mxGetN(prhs[1]),mxGetM(prhs[1]));
IMG[I] = cvCreateImage( imageSize, 8, 1 );
PYR[I] = cvCreateImage( imageSize, 8, 1 );
loadImageFromMatlab(prhs[1], IMG[I]);
imageSize = cvSize(mxGetN(prhs[2]),mxGetM(prhs[2]));
IMG[J] = cvCreateImage( imageSize, 8, 1 );
PYR[J] = cvCreateImage( imageSize, 8, 1 );
loadImageFromMatlab(prhs[2], IMG[J]);
// Points
double *ptsI = mxGetPr(prhs[3]); int nPts = mxGetN(prhs[3]);
double *ptsJ = mxGetPr(prhs[4]);
if (nPts != mxGetN(prhs[4]))
{
mexPrintf("Inconsistent input!\n");
return;
}
points[0] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // template
points[1] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // target
points[2] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // forward-backward
for (int i = 0; i < nPts; i++)
{
points[0][i].x = ptsI[2*i]; points[0][i].y = ptsI[2*i+1];
points[1][i].x = ptsJ[2*i]; points[1][i].y = ptsJ[2*i+1];
points[2][i].x = ptsI[2*i]; points[2][i].y = ptsI[2*i+1];
}
float *ncc = (float*) cvAlloc(nPts*sizeof(float));
float *ssd = (float*) cvAlloc(nPts*sizeof(float));
float *fb = (float*) cvAlloc(nPts*sizeof(float));
char *status = (char*) cvAlloc(nPts);
cvCalcOpticalFlowPyrLK( IMG[I], IMG[J], PYR[I], PYR[J], points[0], points[1], nPts, cvSize(win_size,win_size), Level, status, 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES);
cvCalcOpticalFlowPyrLK( IMG[J], IMG[I], PYR[J], PYR[I], points[1], points[2], nPts, cvSize(win_size,win_size), Level, status, 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY );
normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ncc, Winsize,CV_TM_CCOEFF_NORMED);
//normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ssd, Winsize,CV_TM_SQDIFF);
euclideanDistance( points[0],points[2],fb,nPts);
// Output
int M = 4;
plhs[0] = mxCreateDoubleMatrix(M, nPts, mxREAL);
double *output = mxGetPr(plhs[0]);
for (int i = 0; i < nPts; i++)
{
if (status[i] == 1)
{
output[M*i] = (double) points[1][i].x;
output[M*i+1] = (double) points[1][i].y;
output[M*i+2] = (double) fb[i];
output[M*i+3] = (double) ncc[i];
}
else
{
output[M*i] = nan;
output[M*i+1] = nan;
output[M*i+2] = nan;
output[M*i+3] = nan;
}
}
//.........这里部分代码省略.........
开发者ID:CoffeeZhang,项目名称:MDP_Tracking,代码行数:101,代码来源:lk.cpp
示例11: GrabFrame
/** Grab a Frame from either camera or video source
*
*/
int GrabFrame(Experiment* exp) {
if (!(exp->VidFromFile)) {
/** Acquire from Physical Camera **/
if (exp->UseFrameGrabber) {
/** Use BitFlow SDK to acquire from Frame Grabber **/
if (AcquireFrame(exp->fg)==T2FG_ERROR){
return EXP_ERROR;
}
/** Check to see if file sizes match **/
LoadFrameWithBin(exp->fg->HostBuf, exp->fromCCD);
} else {
/** Acqure from ImagingSource USB Cam **/
exp->lastFrameSeenOutside = exp->MyCamera->iFrameNumber;
/*** Create a local copy of the image***/
LoadFrameWithBin(exp->MyCamera->iImageData, exp->fromCCD);
}
} else {
/** Acquire from file **/
IplImage* tempImg;
/** Grab the frame from the video **/
tempImg = cvQueryFrame(exp->capture);
/** Stall for a little bit **/
//Sleep(50);
if (tempImg == NULL) {
printf("There was an error querying the frame from video!\n");
return EXP_VIDEO_RAN_OUT;
}
/** Create a new temp image that is grayscale and of the same size **/
IplImage* tempImgGray = cvCreateImage(cvGetSize(tempImg), IPL_DEPTH_8U,
1);
/** Convert Color to GrayScale **/
cvCvtColor(tempImg, tempImgGray, CV_RGB2GRAY);
/** Load the frame into the fromCCD frame object **/
/*** ANDY! THIS WILL FAIL BECAUSE THE SIZING ISN'T RIGHT **/
LoadFrameWithImage(tempImgGray, exp->fromCCD);
cvReleaseImage(&tempImgGray);
/*
* Note: for some reason thinks crash when you go cvReleaseImage(&tempImg)
* And there don't seem to be memory leaks if you leave it. So I'm going to leave it in place.
*
*/
}
exp->Worm->frameNum++;
return EXP_SUCCESS;
}
开发者ID:MaxMillion,项目名称:mindcontrol,代码行数:65,代码来源:experiment.c
示例12: cvCreateImage
int TextRecognizer::recognize(IplImage *input,
const struct TextDetectionParams ¶ms, std::string svmModel,
std::vector<Chain> &chains,
std::vector<std::pair<Point2d, Point2d> > &compBB,
std::vector<std::pair<CvPoint, CvPoint> > &chainBB,
std::vector<std::string>& text) {
// Convert to grayscale
IplImage * grayImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
cvCvtColor(input, grayImage, CV_RGB2GRAY);
for (unsigned int i = 0; i < chainBB.size(); i++) {
cv::Point center = cv::Point(
(chainBB[i].first.x + chainBB[i].second.x) / 2,
(chainBB[i].first.y + chainBB[i].second.y) / 2);
/* work out if total width of chain is large enough */
if (chainBB[i].second.x - chainBB[i].first.x
< input->width / params.maxImgWidthToTextRatio) {
LOGL(LOG_TXT_ORIENT,
"Reject chain #" << i << " width=" << (chainBB[i].second.x - chainBB[i].first.x) << "<" << (input->width / params.maxImgWidthToTextRatio));
continue;
}
/* eliminate chains with components of lower height than required minimum */
int minHeight = chainBB[i].second.y - chainBB[i].first.y;
for (unsigned j = 0; j < chains[i].components.size(); j++) {
minHeight = std::min(minHeight,
compBB[chains[i].components[j]].second.y
- compBB[chains[i].components[j]].first.y);
}
if (minHeight < params.minCharacterheight) {
LOGL(LOG_CHAINS,
"Reject chain # " << i << " minHeight=" << minHeight << "<" << params.minCharacterheight);
continue;
}
/* invert direction if angle is in 3rd/4th quadrants */
if (chains[i].direction.x < 0) {
chains[i].direction.x = -chains[i].direction.x;
chains[i].direction.y = -chains[i].direction.y;
}
/* work out chain angle */
double theta_deg = 180
* atan2(chains[i].direction.y, chains[i].direction.x) / PI;
if (absd(theta_deg) > params.maxAngle) {
LOGL(LOG_TXT_ORIENT,
"Chain angle " << theta_deg << " exceeds max " << params.maxAngle);
continue;
}
if ((chainBB.size() == 2) && (absd(theta_deg) > 5))
continue;
LOGL(LOG_TXT_ORIENT,
"Chain #" << i << " Angle: " << theta_deg << " degrees");
/* create copy of input image including only the selected components */
cv::Mat inputMat = cv::Mat(input);
cv::Mat grayMat = cv::Mat(grayImage);
cv::Mat componentsImg = cv::Mat::zeros(grayMat.rows, grayMat.cols,
grayMat.type());
std::vector<cv::Point> compCoords;
for (unsigned int j = 0; j < chains[i].components.size(); j++) {
int component_id = chains[i].components[j];
cv::Rect roi = cv::Rect(compBB[component_id].first.x,
compBB[component_id].first.y,
compBB[component_id].second.x
- compBB[component_id].first.x,
compBB[component_id].second.y
- compBB[component_id].first.y);
cv::Mat componentRoi = grayMat(roi);
compCoords.push_back(
cv::Point(compBB[component_id].first.x,
compBB[component_id].first.y));
compCoords.push_back(
cv::Point(compBB[component_id].second.x,
compBB[component_id].second.y));
compCoords.push_back(
cv::Point(compBB[component_id].first.x,
compBB[component_id].second.y));
compCoords.push_back(
cv::Point(compBB[component_id].second.x,
compBB[component_id].first.y));
cv::Mat thresholded;
cv::threshold(componentRoi, thresholded, 0 // the value doesn't matter for Otsu thresholding
, 255 // we could choose any non-zero value. 255 (white) makes it easy to see the binary image
, cv::THRESH_OTSU | cv::THRESH_BINARY_INV);
#if 0
cv::Moments mu = cv::moments(thresholded, true);
std::cout << "mu02=" << mu.mu02 << " mu11=" << mu.mu11 << " skew="
<< mu.mu11 / mu.mu02 << std::endl;
#endif
cv::imwrite("thresholded.png", thresholded);
cv::threshold(componentRoi, componentsImg(roi), 0 // the value doesn't matter for Otsu thresholding
//.........这里部分代码省略.........
开发者ID:yarec,项目名称:bibnumber,代码行数:101,代码来源:textrecognition.cpp
示例13: main
int main(int argc, char* argv[])
{
IplImage* color = cvLoadImage("E:\\pic_skindetect\\clothtest\\2.jpg", 1);
IplImage* gray = cvCreateImage(cvGetSize(color), 8, 1);
IplImage* show = cvCreateImage(cvGetSize(color), 8, 1);
cvZero(show);
int i = 0;
cvCvtColor(color, gray, CV_RGB2GRAY);
//cvThreshold(gray, gray, 100, 255, CV_THRESH_BINARY_INV);
cvCanny(gray, gray, 50, 150, 3);
CvMemStorage * storage = cvCreateMemStorage(0);
CvSeq* contours;
CvSeq* seq_fourier = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),sizeof(CvPoint2D32f), storage);
cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE);
CvSeq* mostContours = contours;
/*for(; contours; contours = contours->h_next)
{
if (mostContours->total < contours->total)
{
mostContours = contours;
}
}*/
int t = 0;
for(; contours; contours = contours->h_next)
{
//contours = mostContours;
++t;
printf("%d\n", contours->total);
cvDrawContours(color, contours, CV_RGB(255,0,0), CV_RGB(255,0,0), 1, 3);
CalcFourierDescriptorCoeff(contours, 2000, seq_fourier);
CalcBoundary(seq_fourier, contours->total, contours);
for(int i = 0; i < contours->total; i++)
{
CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
if(pt->x >= 0 && pt->x < show->width && pt->y >= 0 && pt->y < show->height)
{
((uchar*)(show->imageData+pt->y*show->widthStep))[pt->x] = 255;
}
}
/*for(i = 0; i < contours->total; i++)
{
CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
printf("%d, %d, %d\n", pt->x, pt->y, i);
}*/
/*
for(i = 0; i < seq_fourier->total; i++)
{
CvPoint2D32f* pt=(CvPoint2D32f*)cvGetSeqElem(seq_fourier, i);
printf("%f, %f, %d\n", pt->x, pt->y, i);
}*/
}
printf("t=%d\n", t);
cvNamedWindow("color", 0);
cvShowImage("color",color);
//cvWaitKey(0);
cvNamedWindow("gray", 0);
cvShowImage("gray", gray);
//cvWaitKey(0);
cvNamedWindow("reconstructed", 0);
cvShowImage("reconstructed", show);
cvWaitKey(0);
cvReleaseMemStorage(&storage);
cvReleaseImage(&color);
cvReleaseImage(&gray);
cvReleaseImage(&show);
cvDestroyAllWindows();
return 0;
}
开发者ID:landys,项目名称:image-features,代码行数:76,代码来源:test.cpp
示例14: Histogram
std::list<vision::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
std::list<vision::Garbage*>::iterator it;
for ( it=garbages.begin() ; it != garbages.end() ; it++ )
delete *it;
garbages.clear();
//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
//object model
//image for the histogram-based filter
//could be a parameter
vision::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);
//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);
//gets a frame for setting image size
//CvSize srcSize = cvSize(frameWidth,frameHeight);
CvSize srcSize = cvGetSize(src);
//images for HSV conversion
IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );
//Image for thresholding
IplImage * threshImage=cvCreateImage(srcSize,8,1);
//image for equalization
IplImage * equalizedImage=cvCreateImage(srcSize,8,1);
//image for Morphing operations(Dilate-erode)
IplImage * morphImage=cvCreateImage(srcSize,8,1);
//image for image smoothing
IplImage * smoothImage=cvCreateImage(srcSize,8,1);
//image for contour-finding operations
IplImage * contourImage=cvCreateImage(srcSize,8,3);
int frameCounter=1;
int cont_index=0;
//convolution kernel for morph operations
IplConvKernel* element;
CvRect boundingRect;
//contours
CvSeq * contours;
CvSeq * contoursCopy;
//Main loop
frameCounter++;
//convert image to hsv
cvCvtColor( src, hsv, CV_BGR2HSV );
cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
//equalize Saturation Channel image
cvEqualizeHist(s_plane,equalizedImage);
//threshold the equalized Saturation channel image
cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
CV_THRESH_BINARY);
//apply morphologic operations
element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
CV_SHAPE_RECT, NULL);
cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);
//apply smooth gaussian-filter
cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);
//get all contours
contours = myFindContours(smoothImage);
contoursCopy=contours;
cont_index=0;
cvCopy(src,contourImage,0);
while(contours!=NULL){
CvSeq * aContour=getPolygon(contours);
vision::Contours * ct = new Contours(aContour);
//.........这里部分代码省略.........
开发者ID:margguo,项目名称:tpf-robotica,代码行数:101,代码来源:GarbageRecognition.cpp
示例15: loadConfig
void VideoCapture::start()
{
loadConfig();
if (useCamera) setUpCamera();
if (useVideo) setUpVideo();
if (!capture) std::cerr << "Capture error..." << std::endl;
int input_fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
std::cout << "input->fps:" << input_fps << std::endl;
IplImage* frame1 = cvQueryFrame(capture);
frame = cvCreateImage(cvSize((int)((frame1->width*input_resize_percent) / 100), (int)((frame1->height*input_resize_percent) / 100)), frame1->depth, frame1->nChannels);
//cvCreateImage(cvSize(frame1->width/input_resize_factor, frame1->height/input_resize_factor), frame1->depth, frame1->nChannels);
std::cout << "input->resize_percent:" << input_resize_percent << std::endl;
std::cout << "input->width:" << frame->width << std::endl;
std::cout << "input->height:" << frame->height << std::endl;
double loopDelay = 33.333;
if (input_fps > 0)
loopDelay = (1. / input_fps)*1000.;
std::cout << "loopDelay:" << loopDelay << std::endl;
std::cout << "Press 'ESC' to stop..." << std::endl;
bool firstTime = true;
do
{
frameNumber++;
frame1 = cvQueryFrame(capture);
if (!frame1) break;
cvResize(frame1, frame);
if (enableFlip)
cvFlip(frame, frame, 0);
if (VC_ROI::use_roi == true && VC_ROI::roi_defined == false && firstTime == true)
{
VC_ROI::reset();
do
{
cv::Mat img_input(frame);
if (showOutput)
{
cv::imshow("Input", img_input);
std::cout << "Set ROI (press ESC to skip)" << std::endl;
VC_ROI::img_input1 = new IplImage(img_input);
cvSetMouseCallback("Input", VC_ROI::VideoCapture_on_mouse, NULL);
key = cvWaitKey(0);
delete VC_ROI::img_input1;
}
else
key = KEY_ESC;
if (key == KEY_ESC)
{
std::cout << "ROI disabled" << std::endl;
VC_ROI::reset();
VC_ROI::use_roi = false;
break;
}
if (VC_ROI::roi_defined)
{
std::cout << "ROI defined (" << VC_ROI::roi_x0 << "," << VC_ROI::roi_y0 << "," << VC_ROI::roi_x1 << "," << VC_ROI::roi_y1 << ")" << std::endl;
break;
}
else
std::cout << "ROI undefined" << std::endl;
} while (1);
}
if (VC_ROI::use_roi == true && VC_ROI::roi_defined == true)
{
CvRect rect = cvRect(VC_ROI::roi_x0, VC_ROI::roi_y0, VC_ROI::roi_x1 - VC_ROI::roi_x0, VC_ROI::roi_y1 - VC_ROI::roi_y0);
cvSetImageROI(frame, rect);
}
cv::Mat img_input(frame);
if (showOutput)
cv::imshow("Input", img_input);
if (firstTime)
saveConfig();
start_time = cv::getTickCount();
frameProcessor->process(img_input);
int64 delta_time = cv::getTickCount() - start_time;
freq = cv::getTickFrequency();
fps = freq / delta_time;
//std::cout << "FPS: " << fps << std::endl;
cvResetImageROI(frame);
//.........这里部分代码省略.........
开发者ID:2php,项目名称:ShadowDetection,代码行数:101,代码来源:VideoCapture.cpp
示例16: MBLBPDetectSingleScale
void MBLBPDetectSingleScale( const IplImage* img,
MBLBPCascade * pCascade,
CvSeq * positions,
CvSize winStride)
{
IplImage * sum = 0;
int ystep, xstep, ymax, xmax;
CV_FUNCNAME( "MBLBPDetectSingleScale" );
__BEGIN__;
if( !img )
CV_ERROR( CV_StsNullPtr, "Null image pointer" );
if( ! pCascade)
CV_ERROR( CV_StsNullPtr, "Invalid classifier cascade" );
if( !positions )
CV_ERROR( CV_StsNullPtr, "Null CvSeq pointer" );
if(pCascade->win_width > img->width ||
pCascade->win_height > img->height)
return ;
CV_CALL( sum = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_32S, 1));
myIntegral(img, sum);
//cvIntegral(img, sum);
UpdateCascade(pCascade, sum);
ystep = winStride.height;
xstep = winStride.width;
ymax = img->height - pCascade->win_height -1;
xmax = img->width - pCascade->win_width -1;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int iy = 0; iy < ymax; iy+=ystep)
{
for(int ix = 0; ix < xmax; ix+=xstep)
{
int w_offset = iy * sum->widthStep / sizeof(int) + ix;
int result = DetectAt(pCascade, w_offset);
if( result > 0)
{
|
请发表评论