本文整理汇总了C++中cvPoint函数的典型用法代码示例。如果您正苦于以下问题:C++ cvPoint函数的具体用法?C++ cvPoint怎么用?C++ cvPoint使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvPoint函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: main
int main() {
raspicam::RaspiCam_Cv Camera; // Camera Object
cv::Mat frame;
// Set camera params
Camera.set(CV_CAP_PROP_FORMAT, CV_8UC3); // For color
Camera.set(CV_CAP_PROP_FRAME_WIDTH, 640);
Camera.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
// Open camera
std::cout << "Opening camera...\n";
if (! Camera.open()) {
std::cerr << "Error opening camera!\n";
return -1;
}
// The two windows we'll be using
cvNamedWindow("video");
cvNamedWindow("thresh");
cvMoveWindow("video", 0, 0);
cvMoveWindow("thresh", 240, 0);
int thresh_h[] {0, 18};
int thresh_s[] {160, 255};
int thresh_v[] {144, 255};
const int max_thresh(255);
cv::createTrackbar(" H min:", "thresh", &thresh_h[0], max_thresh, nullptr);
cv::createTrackbar(" H max:", "thresh", &thresh_h[1], max_thresh, nullptr);
cv::createTrackbar(" S min:", "thresh", &thresh_s[0], max_thresh, nullptr);
cv::createTrackbar(" S max:", "thresh", &thresh_s[1], max_thresh, nullptr);
cv::createTrackbar(" V min:", "thresh", &thresh_v[0], max_thresh, nullptr);
cv::createTrackbar(" V max:", "thresh", &thresh_v[1], max_thresh, nullptr);
// This image holds the "scribble" data, the tracked positions of the ball
IplImage* imgScribble = NULL;
cv::Mat frame_mat;
while (true) {
if (! Camera.grab()) {
break;
}
Camera.retrieve(frame_mat);
// Will hold a frame captured from the camera
IplImage frame = frame_mat;
// If this is the first frame, we need to initialize it
if (imgScribble == NULL) {
imgScribble = cvCreateImage(cvGetSize(&frame), 8, 3);
}
// Holds the yellow thresholded image (yellow = white, rest = black)
IplImage* imgYellowThresh = GetThresholdedImage(&frame, thresh_h, thresh_s, thresh_v);
// Calculate the moments to estimate the position of the ball
CvMoments moments;
cvMoments(imgYellowThresh, &moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(&moments, 1, 0);
double moment01 = cvGetSpatialMoment(&moments, 0, 1);
double area = cvGetCentralMoment(&moments, 0, 0);
// Holding the last and current ball positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10 / area;
posY = moment01 / area;
// Print it out for debugging purposes
printf("position (%d,%d)\n", posX, posY);
// We want to draw a line only if its a valid position
if (lastX > 0 && lastY > 0 && posX > 0 && posY > 0) {
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
}
// Add the scribbling image and the frame... and we get a combination of the two
cvAdd(&frame, imgScribble, &frame);
cvCvtColor(&frame, &frame, CV_BGR2RGB);
cvShowImage("video", &frame);
// cvShowImage("video", imgScribble);
cvShowImage("thresh", imgYellowThresh);
// Wait for a keypress
int c = cvWaitKey(10);
if (c != -1) {
// If pressed, break out of the loop
break;
}
}
//.........这里部分代码省略.........
开发者ID:tsandmann,项目名称:ctbot-misc,代码行数:101,代码来源:trackcolour.cpp
示例2: getFeatureMaps
/*
// Getting feature map for the selected subimage
//
// API
// int getFeatureMaps(const IplImage * image, const int k, featureMap **map);
// INPUT
// image - selected subimage
// k - size of cells
// OUTPUT
// map - feature map
// RESULT
// Error status
*/
int getFeatureMaps(const IplImage* image, const int k, CvLSVMFeatureMap **map)
{
int sizeX, sizeY;
int p, px, stringSize;
int height, width, numChannels;
int i, j, kk, c, ii, jj, d;
float * datadx, * datady;
int ch;
float magnitude, x, y, tx, ty;
IplImage * dx, * dy;
int *nearest;
float *w, a_x, b_x;
float kernel[3] = {-1.f, 0.f, 1.f};
CvMat kernel_dx = cvMat(1, 3, CV_32F, kernel);
CvMat kernel_dy = cvMat(3, 1, CV_32F, kernel);
float * r;
int * alfa;
float boundary_x[NUM_SECTOR + 1];
float boundary_y[NUM_SECTOR + 1];
float max, dotProd;
int maxi;
height = image->height;
width = image->width ;
numChannels = image->nChannels;
dx = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
dy = cvCreateImage(cvSize(image->width, image->height),
IPL_DEPTH_32F, 3);
sizeX = width / k;
sizeY = height / k;
px = 3 * NUM_SECTOR;
p = px;
stringSize = sizeX * p;
allocFeatureMapObject(map, sizeX, sizeY, p);
cvFilter2D(image, dx, &kernel_dx, cvPoint(-1, 0));
cvFilter2D(image, dy, &kernel_dy, cvPoint(0, -1));
float arg_vector;
for(i = 0; i <= NUM_SECTOR; i++)
{
arg_vector = ( (float) i ) * ( (float)(PI) / (float)(NUM_SECTOR) );
boundary_x[i] = cosf(arg_vector);
boundary_y[i] = sinf(arg_vector);
}/*for(i = 0; i <= NUM_SECTOR; i++) */
r = (float *)malloc( sizeof(float) * (width * height));
alfa = (int *)malloc( sizeof(int ) * (width * height * 2));
for(j = 1; j < height - 1; j++)
{
datadx = (float*)(dx->imageData + dx->widthStep * j);
datady = (float*)(dy->imageData + dy->widthStep * j);
for(i = 1; i < width - 1; i++)
{
c = 0;
x = (datadx[i * numChannels + c]);
y = (datady[i * numChannels + c]);
r[j * width + i] =sqrtf(x * x + y * y);
for(ch = 1; ch < numChannels; ch++)
{
tx = (datadx[i * numChannels + ch]);
ty = (datady[i * numChannels + ch]);
magnitude = sqrtf(tx * tx + ty * ty);
if(magnitude > r[j * width + i])
{
r[j * width + i] = magnitude;
c = ch;
x = tx;
y = ty;
}
}/*for(ch = 1; ch < numChannels; ch++)*/
max = boundary_x[0] * x + boundary_y[0] * y;
maxi = 0;
for (kk = 0; kk < NUM_SECTOR; kk++)
{
//.........这里部分代码省略.........
开发者ID:93sam,项目名称:opencv,代码行数:101,代码来源:featurepyramid.cpp
示例3: main
int main(int argc, char * argv[])
{
static int framecounter=0;
const CvSize imsize = cvSize(320,240);
cvNamedWindow("Test");
CvParticleState state;
CvParticleObserve observe;
observe.cvParticleObserveInitialize("../data/pcaval.xml","../data/pcavec.xml","../data/pcaavg.xml");
CvHaarCascadeDetector detector; detector.load();
//CvAbstractTracker tracker;
CvHandTracker tracker;
CvCapture * capture = NULL;
if (argc==1) {
capture = cvCreateCameraCapture(0);
// set resolution to 320x240
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, imsize.width);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, imsize.height);
}else{
capture = cvCreateFileCapture(argv[1]);
}
if (!capture) {fprintf(stderr, "Error: fail to open source video!\n");}
static CvRect ROIs[50];
CvParticle *particle = cvCreateParticle( 5/*num_states*/, 100/*num_particles*/, true/*logprob*/ );
bool pf_initialized=false;
static int delay = 0; framecounter=350;
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter);
stMouseParam mouseParam;
cvSetMouseCallback("Test",cbMouse, &mouseParam);
while(1)
{
if (0){
cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, framecounter++);
}else{
framecounter++;
}
IplImage * rawImage = cvQueryFrame(capture);
if (!rawImage) {fprintf(stderr, "Info: end of video!\n"); break;}
if (tracker.initialized()){
tracker.update(rawImage);
}else{
tracker.initialize(rawImage);
tracker.m_framecounter=framecounter;
}
// START PROCESSING HERE
{
// draw face rectangles
mouseParam.img=rawImage;
float points_data[2];
CvMat points = cvMat(1,1,CV_32FC2,points_data);
points.data.fl[0]=mouseParam.pt.x;
points.data.fl[1]=mouseParam.pt.y;
cvOpticalFlowPointTrack(tracker.m_currImage, tracker.m_nextImage, &points,cvSize(10,10),2);
{
IplImage * dispImage = cvCloneImage(rawImage);
cvCircle(dispImage,cvPoint(points.data.i[0],points.data.i[1]),2,CV_RED,-1);
// draw frame count
cvDrawFrameCount(dispImage, framecounter);
// show
cvShowImage("Test", dispImage); CV_WAIT2(10);
cvReleaseImageEx(dispImage);
}
}
int key = cvWaitKey(delay)&0xff;
if (key==27){
break;
}else if (key==' '){
if (delay){ delay = 0; }else{ delay = 30; }
}else if (key=='f'){ // skip to next frame
}else if (key!=0xff){
fprintf(stderr, "Warning: Unknown key press : %c\n", key);
} // end of key press processing
} // end of video
cvReleaseCapture(&capture);
cvDestroyWindow("Test");
}
开发者ID:liangfu,项目名称:dnn,代码行数:84,代码来源:main15_optflow.cpp
示例4: CVAPI
CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
const CvArr* mask CV_DEFAULT(0) );
CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );
CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
const CvArr* mask CV_DEFAULT(0) );
CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
float perimScale CV_DEFAULT(4.f),
CvMemStorage* storage CV_DEFAULT(0),
CvPoint offset CV_DEFAULT(cvPoint(0,0)));
#ifdef __cplusplus
}
namespace cv
{
/*!
The Base Class for Background/Foreground Segmentation
The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms.
*/
class CV_EXPORTS_W BackgroundSubtractor
{
开发者ID:reppie,项目名称:virm-ios,代码行数:30,代码来源:background_segm.hpp
示例5: MarkFace
void MarkFace(cv::Mat &source, const cv::Rect &face)
{
CvPoint topLeft = cvPoint(face.x, face.y);
CvPoint downRight = cvPoint(face.x + face.width-1, face.y + face.height-1);
rectangle( source, topLeft, downRight, CV_RGB(0,0,255));
}
开发者ID:neverice,项目名称:DataMiner,代码行数:6,代码来源:FaceDetector.cpp
示例6: assert
void CvCalibFilter::DrawPoints(CvMat** dstarr) {
int i, j;
if (!dstarr) {
assert(0);
return;
}
if (latestCounts) {
for (i = 0; i < cameraCount; i++) {
if (dstarr[i] && latestCounts[i]) {
CvMat dst_stub, *dst;
int count = 0;
bool found = false;
CvPoint2D32f* pts = 0;
GetLatestPoints(i, &pts, &count, &found);
dst = cvGetMat(dstarr[i], &dst_stub);
static const CvScalar line_colors[] = {
{{0, 0, 255}},
{{0, 128, 255}},
{{0, 200, 200}},
{{0, 255, 0}},
{{200, 200, 0}},
{{255, 0, 0}},
{{255, 0, 255}}
};
const int colorCount = sizeof(line_colors) / sizeof(line_colors[0]);
const int r = 4;
CvScalar color = line_colors[0];
CvPoint prev_pt = { 0, 0};
for (j = 0; j < count; j++) {
CvPoint pt;
pt.x = cvRound(pts[j].x);
pt.y = cvRound(pts[j].y);
if (found) {
if (etalonType == CV_CALIB_ETALON_CHESSBOARD) {
color = line_colors[(j/cvRound(etalonParams[0])) % colorCount];
} else {
color = CV_RGB(0, 255, 0);
}
if (j != 0) {
cvLine(dst, prev_pt, pt, color, 1, CV_AA);
}
}
cvLine(dst, cvPoint(pt.x - r, pt.y - r),
cvPoint(pt.x + r, pt.y + r), color, 1, CV_AA);
cvLine(dst, cvPoint(pt.x - r, pt.y + r),
cvPoint(pt.x + r, pt.y - r), color, 1, CV_AA);
cvCircle(dst, pt, r + 1, color, 1, CV_AA);
prev_pt = pt;
}
}
}
}
}
开发者ID:353,项目名称:viewercv,代码行数:66,代码来源:calibfilter.cpp
示例7: cvClearMemStorage
//.........这里部分代码省略.........
if(biggestContour)
{
// calculate convex hull of the biggest contour found which is hopefully the hand
CvSeq* hulls = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 0);
if (m_debugImageEnabled)
{
// calculate convex hull and return it in a different form.
// only required for drawing
CvSeq* hulls2 = cvConvexHull2(biggestContour, m_cvMemStorage, CV_CLOCKWISE, 1);
// draw the convex hull
cvDrawContours(m_iplDebugImage, hulls2, cvScalar(rCol, gCol , bCol), cvScalar(rCol, gCol, bCol), 1);
}
// calculate convexity defects of hand's convex hull
CvSeq* defects = cvConvexityDefects(biggestContour, hulls, m_cvMemStorage);
int numOfDefects = defects->total;
if (numOfDefects > 0)
{
// calculate defect min size in projective coordinates.
// this is done using a vector from current hand position to a point DEFECT_MIN_SIZE amount above it.
// that vector is converted to projective coordinates and it's length is calculated.
XnPoint3D rwTempPoint = m_handPosRealWorld;
rwTempPoint.Y += DEFECT_MIN_SIZE;
XnPoint3D projTempPoint;
m_depthGenerator.ConvertRealWorldToProjective(1, &rwTempPoint, &projTempPoint);
int defectMinSizeProj = m_handPosProjected.Y - projTempPoint.Y;
// convert opencv seq to array
CvConvexityDefect* defectArray;defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect) * numOfDefects);
cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ);
for(int i = 0; i < numOfDefects; i++)
{
// ignore too small defects
if((defectArray[i].depth) < defectMinSizeProj)
{
continue;
}
numOfValidDefects++;
if (m_debugImageEnabled)
{
// draw blue point to defect
cvCircle(m_iplDebugImage, *(defectArray[i].depth_point), 5, cvScalar(0, 0, 255), -1);
cvCircle(m_iplDebugImage, *(defectArray[i].start), 5, cvScalar(0, 0, 255), -1);
cvCircle(m_iplDebugImage, *(defectArray[i].end), 5, cvScalar(0, 0, 255), -1);
}
}
free(defectArray);
}
}
if (m_debugImageEnabled)
{
cvResetImageROI(m_iplDebugImage);
// draw white dot on current hand position
cvCircle(m_iplDebugImage, cvPoint(m_handPosProjected.X, m_handPosProjected.Y), 5, cvScalar(255, 255, 255), -1);
// draw gray dot on current center of threshold position
//cvCircle(m_iplDebugImage, cvPoint(projThresholdPoint.X, projThresholdPoint.Y), 5, cvScalar(127, 127, 127), -1);
// draw ROI with green
//cvRectangle(m_iplDebugImage, cvPoint(ROItopLeftX, ROItopLeftY), cvPoint(ROIbottomRightX, ROIbottomRightY), cvScalar(0, 255, 0));
}
// determine current grab status based on defect count
if(numOfValidDefects <= GRAB_MAX_DEFECTS)
{
m_currentGrab = true;
}
else
{
m_currentGrab = false;
}
if (m_debugImageEnabled)
{
// debug strings
QList<QString> debugStrings;
debugStrings.push_back(QString("hand distance: " + QString::number(m_handPosRealWorld.Z) + " mm").toStdString().c_str());
debugStrings.push_back(QString("defects: " + QString::number(numOfValidDefects)).toStdString().c_str());
// convert iplDebugImage to QImage
char* scanLinePtr = m_iplDebugImage->imageData;
for (int y = 0;y < DEPTH_MAP_SIZE_Y; y++) {
memcpy(m_debugImage->scanLine(y), scanLinePtr, DEPTH_MAP_SIZE_X * 3);
scanLinePtr += DEPTH_MAP_SIZE_X * 3;
}
emit debugUpdate(*m_debugImage, debugStrings);
}
}
开发者ID:madamiky,项目名称:Qt_AirCursor,代码行数:101,代码来源:aircursor.cpp
示例8: main
int main (int argc, const char * argv[]) {
char *imagefilename=(char*)malloc(sizeof(char)*16);
char *dscfilename=(char*)malloc(sizeof(char)*16);
if (argc<3) {
printf("Usage: ./dump-descr image-file-name descriptor-file-name");
strcpy(imagefilename, "savekkkk.jpg");
strcpy(dscfilename, "saveD.jpg.dsc");
}
else {
strcpy(imagefilename,argv[1]);
strcpy(dscfilename,argv[2]);
}
FILE* dscfile;
int w=1280,h=720;
int i=0;
int nkeypoints=0;
vl_bool render=1;
vl_bool first=1;
VlSiftFilt * myFilter=0;
VlSiftKeypoint const* keys;
char img2_file[] = "/Users/quake0day/ana2/MVI_0124.MOV";
//printf("sizeof(VlSiftKeypoint)=%d, filt=%d, pix=%d\n", sizeof(VlSiftKeypoint), sizeof(VlSiftFilt),sizeof(vl_sift_pix));
dscfile=fopen(dscfilename, "wb");
vl_sift_pix* fim;
int err=0;
int octave, nlevels, o_min;
//vl_sift_pix descr[128];
//CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
CvCapture * camera = cvCreateFileCapture(img2_file);
cvNamedWindow("Hello", 1);
IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage(imagefilename, 0);
IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
octave=3;
nlevels=10;
o_min=1;
myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
vl_sift_set_peak_thresh(myFilter, 0.5);
fim=malloc(sizeof(vl_sift_pix)*w*h);
int press=0;
while (myCVImage) {
dprintf("%d*%d\n",myCVImage->width,myCVImage->height);
//w=myCVImage->width;
//h=myCVImage->height;
cvResize(myCVImage, resizingImg, CV_INTER_AREA);
dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
cvConvertImage(resizingImg, afterCVImage, 0);
for (i=0; i<h; i++) {
for (int j=0; j<w; j++) {
fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
//printf("%f ", fim[i*w+j]);
}
}
//vl_sift_set_peak_thresh(myFilter, 0.5);
//vl_sift_set_edge_thresh(myFilter, 10.0);
first=1;
while (1) {
if (first) {
first=0;
err=vl_sift_process_first_octave(myFilter, fim);
}
else {
err=vl_sift_process_next_octave(myFilter);
}
if (err) {
err=VL_ERR_OK;
break;
}
vl_sift_detect(myFilter);
nkeypoints=vl_sift_get_nkeypoints(myFilter);
dprintf("insider numkey:%d\n",nkeypoints);
keys=vl_sift_get_keypoints(myFilter);
dprintf("final numkey:%d\n",nkeypoints);
if (render) {
for (i=0; i<nkeypoints; i++) {
cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 255, 50, 0), 1, CV_AA, 0);
//printf("x:%f,y:%f,s:%f,sigma:%f,\n",keys->x,keys->y,keys->s,keys->sigma);
if (press=='d') {
double angles [4] ;
int nangles ;
//.........这里部分代码省略.........
开发者ID:mdqyy,项目名称:SIFT_Project,代码行数:101,代码来源:dump.c
示例9: gst_bgfg_acmmm2003_chain
//.........这里部分代码省略.........
// send mask event, if requested
if (filter->send_mask_events) {
GstStructure *structure;
GstEvent *event;
GArray *data_array;
IplImage *mask;
// prepare and send custom event with the mask surface
mask = filter->model->foreground;
data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize);
g_array_append_vals(data_array, mask->imageData, mask->imageSize);
structure = gst_structure_new("bgfg-mask",
"data", G_TYPE_POINTER, data_array,
"width", G_TYPE_UINT, mask->width,
"height", G_TYPE_UINT, mask->height,
"depth", G_TYPE_UINT, mask->depth,
"channels", G_TYPE_UINT, mask->nChannels,
"timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
NULL);
event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
gst_pad_push_event(filter->srcpad, event);
g_array_unref(data_array);
if (filter->display) {
// shade the regions not selected by the acmmm2003 algorithm
cvXorS(mask, CV_RGB(255, 255, 255), mask, NULL);
cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask);
cvXorS(mask, CV_RGB(255, 255, 255), mask, NULL);
}
}
if (filter->send_roi_events) {
CvSeq *contour;
CvRect *bounding_rects;
guint i, j, n_rects;
// count # of contours, allocate array to store the bounding rectangles
for (contour = filter->model->foreground_regions, n_rects = 0;
contour != NULL;
contour = contour->h_next, ++n_rects);
bounding_rects = g_new(CvRect, n_rects);
for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i)
bounding_rects[i] = cvBoundingRect(contour, 0);
for (i = 0; i < n_rects; ++i) {
// skip collapsed rectangles
if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue;
for (j = (i + 1); j < n_rects; ++j) {
// skip collapsed rectangles
if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue;
if (rect_overlap(bounding_rects[i], bounding_rects[j])) {
bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]);
bounding_rects[j] = NULL_RECT;
}
}
}
for (i = 0; i < n_rects; ++i) {
GstEvent *event;
GstStructure *structure;
CvRect r;
// skip collapsed rectangles
r = bounding_rects[i];
if ((r.width == 0) || (r.height == 0)) continue;
structure = gst_structure_new("bgfg-roi",
"x", G_TYPE_UINT, r.x,
"y", G_TYPE_UINT, r.y,
"width", G_TYPE_UINT, r.width,
"height", G_TYPE_UINT, r.height,
"timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
NULL);
event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
gst_pad_send_event(filter->sinkpad, event);
if (filter->verbose)
GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n",
r.x, r.y, r.width, r.height);
if (filter->display)
cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height),
CV_RGB(0, 0, 255), 1, 0, 0);
}
g_free(bounding_rects);
}
if (filter->display)
gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize);
return gst_pad_push(filter->srcpad, buf);
}
开发者ID:gama,项目名称:gst-opencv,代码行数:101,代码来源:gstbgfgacmmm2003.c
示例10: WinMain
int WINAPI WinMain(HINSTANCE hThisInstance, HINSTANCE hPrevInstance, LPSTR lpszArgs, int nWinMode)
{
// переменные для хранения изображений
IplImage *frame = 0, *image = 0, *hsv = 0, *dst = 0, *dst2 = 0, *color_indexes = 0, *dst3 = 0, *image2 = 0, *tmp = 0;
int key = 0, zx = 0, zy = 0;
// загружаем картинку из файла
IplImage *menu = cvLoadImage("menu.png");
// создаем главное окно проекта
cvNamedWindow("Проект OpenCV");
cvShowImage("Проект OpenCV",menu);
cvMoveWindow("Проект OpenCV",100,50);
// получаем любую подключенную Web-камеру
CvCapture *capture = cvCaptureFromCAM(CV_CAP_ANY);
// частота кадров
double fps = 18;
// инициализация записи видео в файл; 4-буквенный код кодека для обработки видео, формируется макросом CV_FOURCC
CvVideoWriter *writer = cvCreateVideoWriter("record.avi", CV_FOURCC('I','Y','U','V'), fps, cvSize(640, 480), 1);
if (!capture)
return 0;
else
{
while(key != 27)
{
// получаем текущий кадр
frame = cvQueryFrame(capture);
// копируем его для обработки
image = cvCloneImage(frame);
// зум
if(key=='+')
{
zx = zx + 4;
zy = zy + 3;
}
if(key=='-')
{
zx = zx - 4;
zy = zy - 3;
}
if(zx > 300)
{
zx = 300;
zy = 225;
}
if(zx < 0)
{
zx = 0;
zy = 0;
}
// задаем ширину и высоту ROI
int zwidth = frame->width-2*zx;
int zheight = frame->height-2*zy;
// устанавливаем ROI (Region Of Interest — интересующая область изображения)
cvSetImageROI(frame, cvRect(zx,zy,zwidth,zheight));
// копируем интересующую область в переменную image2
image2 = cvCloneImage(frame);
// создаем пустое изображение размером 640x480
tmp = cvCreateImage( cvSize(640, 480), frame->depth, frame->nChannels );
// размещаем ROI на пустое изображение tmp
cvResize(image2, tmp, 0);
// сохраняем кадр в видео-файл
cvWriteFrame(writer, tmp);
// сбрасываем ROI
cvResetImageROI(frame);
// инициализация шрифта
CvFont font;
cvInitFont( &font, CV_FONT_HERSHEY_COMPLEX,1.0, 1.0, 0, 1, CV_AA);
// используя шрифт выводим на картинку текст
cvPutText(tmp, "press '+' to increase", cvPoint(150, 40), &font, CV_RGB(150, 0, 150) );
cvPutText(tmp, "press '-' to reduce", cvPoint(165, 450), &font, CV_RGB(150, 0, 150) );
// число пикселей данного цвета на изображении
uint colorCount[NUM_COLOR_TYPES] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
hsv = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
cvCvtColor( image, hsv, CV_BGR2HSV );
// картинки для хранения результатов
dst = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
dst2 = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 3 );
color_indexes = cvCreateImage( cvGetSize(image), IPL_DEPTH_8U, 1 ); //для хранения индексов цвета
// для хранения RGB цветов
CvScalar rgb_colors[NUM_COLOR_TYPES];
int i=0, j=0, x=0, y=0;
// обнуляем цвета
for(i=0; i<NUM_COLOR_TYPES; i++) {
rgb_colors[i] = cvScalarAll(0);
}
//.........这里部分代码省略.........
开发者ID:kovaloid,项目名称:university,代码行数:101,代码来源:main.cpp
示例11: cvCvtColor
void CHandDrawEffect::DrawEdge(IplImage* image, IplImage* image2, IplImage* base, int plane)
{
CvSeq* contourSeq0 = NULL;
int height = image->height;
int width = image->width;
int step = image->widthStep;
int channels = image->nChannels;
uchar* data = (uchar*)image->imageData;
if(plane < 3) {
cvCvtColor(image, hsv, CV_BGR2HSV); // HSVのplaneを線画生成の元にする
for(int i = 0; i < height * width; i++)
grayImage->imageData[i] = hsv->imageData[i * 3 + plane];
} else {
cvCvtColor(image, grayImage, CV_BGR2GRAY); // グレーイメージを作り線画生成の元にする
}
IplImage* target = base; // 書き込むターゲットイメージ
for(int x = 20; x < 240; x += Y) {
cvThreshold(grayImage, binaryImage, x, 255, CV_THRESH_BINARY); // x の値を境に2値化し輪郭を抽出
contourSeq0 = 0;
cvFindContours(binaryImage, memStorage0, &contourSeq0, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0, 0)); // 輪郭線探索
if(lineNoise > 0) { // 不連続ラインの場合
for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {
CvPoint *p;
if(contourSeq0->total< X * 5) continue; // 5角形以下の細かいのは排除
int index = 0;
for(int i = 0; i < contourSeq0->total; i += X) {
p = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, i); // 点の場所と色を登録
CvScalar color = GetColor(image2, p->x, p->y);
MulScaler(color, DARK); // 輝度を修正
color.val[3] = CheckPoint(image, p->x, p->y, lineNoise); // 有効点かどうかを近接ピクセルから判断して[3]へ格納
SetPoint(index, p, color); // pointTableへ保存
index++;
if(index > MAX_POINT) {
// printf("INDEX ERROR\n");
index = 0;
}
}
// 5連続以下の有効点は無効 (Pending:高速化)
for(int i = 0; i < index; i++) {
int p1 = i;
int p2, p3, p4, p0;
if(pointTable[p1].color.val[3]) {
p2 = (p1 + 1) % index;
p3 = (p1 + 2) % index;
p4 = (p1 + 3) % index;
p0 = (p1 - 1 + index) % index;
if(pointTable[p0].color.val[3]) continue;
if(!pointTable[p2].color.val[3] ||
!pointTable[p3].color.val[3] ||
!pointTable[p4].color.val[3]) {
pointTable[p1].color.val[3] = 0;
}
}
}
// 接続された有効点を描く
for(int i = 0; i < index; i++) {
int p1 = i;
int p2 = (i + 1) % index; // if (p2==index) p2 = 0;
if(pointTable[p1].color.val[3] && pointTable[p2].color.val[3]) {
CvScalar c = pointTable[p1].color;
MulScaler(c, DARK);
cvLine(target, pointTable[p1].p, pointTable[p2].p, c, lineWidth, CV_AA);
}
}
}
} else {
// 全部描く場合
for(; contourSeq0 != 0; contourSeq0 = contourSeq0->h_next) {
CvPoint *p1 = 0;
CvPoint *p2;
if(contourSeq0->total < X * 5) continue;
for(int i = 0; i < contourSeq0->total; i += X) {
p1 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i) % contourSeq0->total);//始点
p2 = CV_GET_SEQ_ELEM(CvPoint, contourSeq0, (i + X + Z) % contourSeq0->total);// 終点
CvScalar color = GetColor(image, p1->x, p1->y);
MulScaler(color, DARK);
cvLine(target, *p1, *p2, color, lineWidth, CV_AA);
}
}
}
}
cvClearMemStorage(memStorage0);
}
开发者ID:zphilip,项目名称:VirtualTS,代码行数:92,代码来源:CCVEffect.cpp
示例12: cvPoint
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <math.h>
#include "Trilateration_2D.h"
double r1, r2, r3;
CvPoint Anchor1 = cvPoint(0, 0);
CvPoint Anchor2 = cvPoint(400, 0);
CvPoint Anchor3 = cvPoint(255, 450);
IplImage* bckgrd = cvCreateImage(cvSize(500, 500), IPL_DEPTH_8U, 3);
double getDistance(CvPoint center, CvPoint tag);
void on_mouseEvent(int event, int x, int y, int flag, void* param);
int main()
{
r1 = 0;
r2 = 0;
r3 = 0;
//IplImage* bckgrd = cvLoadImage("bckgrd.jpg", CV_LOAD_IMAGE_UNCHANGED);
cvNamedWindow("trilateration");
cvSetMouseCallback("trilateration", on_mouseEvent, bckgrd);
cvShowImage("trilateration", bckgrd);
cvWaitKey(0);
cvReleaseImage(&bckgrd);
开发者ID:yongwoojung90,项目名称:Trilateration_2D,代码行数:31,代码来源:main.cpp
示例13: CalibrateTarget
bool CalibrateTarget(
QLCalibrationId calibrationId,
QLCalibrationTarget target,
IplImage* displayImage,
const char* windowName)
{
// Loop on this target until eye data was successfully collected for each
// eye.
QLCalibrationStatus status = QL_CALIBRATION_STATUS_OK;
do
{
//Clear the calibration window
memset(displayImage->imageData, 128, displayImage->imageSize);
// Display the cleared image buffer in the calibration window.
cvShowImage(windowName, displayImage);
// Wait for a little bit so show the blanked screen.
if(cvWaitKeyEsc == cvWaitKey(100))
return false;
// The target positions are in percentage of the area to be tracked so
// we need to scale to the calibration window size.
int tx = (int)target.x * displayImage->width / 100;
int ty = (int)target.y * displayImage->height / 100;
// Draw a target to the image buffer
DrawTarget(displayImage, cvPoint(tx, ty), 20, CV_RGB(0, 255, 0));
// Display the image buffer in the calibration window.
cvShowImage(windowName, displayImage);
// Wait a little bit so the user can see the target before we calibrate.
cvWaitKey(250);
// Calibrate the target for 1000 ms. This can be done two ways; blocking and
// non-blocking. For blocking set the block variable to true. for
// non-blocking set it to false.
bool block = false;
QLCalibration_Calibrate(calibrationId, target.targetId, 1500, block);
// When non-blocking is used, the status of the target needs to be
// polled to determine when it has finished. During the polling we can
// do other things like querying user input as we do here to see if
// the user wants to quit the calibration.
int keyPressed = 0;
while(!block &&
((keyPressed = cvWaitKey(10)) != cvWaitKeyEsc) &&
(QLCalibration_GetStatus(calibrationId, target.targetId, &status) == QL_ERROR_OK) &&
(status == QL_CALIBRATION_STATUS_CALIBRATING));
// If the user terminated the calibration early then return false.
if(keyPressed == cvWaitKeyEsc)
return false;
// Get the status of the target.
QLCalibration_GetStatus(calibrationId, target.targetId, &status);
}while(status != QL_CALIBRATION_STATUS_OK);
// Return true to indicate that the target has successfully been calibrated
return true;
};
开发者ID:halodog,项目名称:eyeglass,代码行数:62,代码来源:Calibrate.cpp
示例14: AutoCalibrate
bool AutoCalibrate(
QLDeviceId deviceId,
QLCalibrationType calibrationType,
QLCalibrationId* calibrationId)
{
QLError qlerror = QL_ERROR_OK;
// Initialize the calibration using the inputed data.
qlerror = QLCalibration_Initialize(deviceId, *calibrationId, calibrationType);
// If the calibrationId was not valid then create a new calibration
// container and use it.
if(qlerror == QL_ERROR_INVALID_CALIBRATION_ID)
{
QLCalibration_Create(0, calibrationId);
qlerror = QLCalibration_Initialize(deviceId, *calibrationId, calibrationType);
}
// If the initialization failed then print an error and return false.
if(qlerror == QL_ERROR_INVALID_DEVICE_ID)
{
printf_s("QLCalibration_Initialize() failed with error code %d.\n", qlerror);
return false;
}
// Create a buffer for the targets. This just needs to be large enough to
// hold the targets.
const int bufferSize = 20;
int numTargets = bufferSize;
QLCalibrationTarget targets[bufferSize];
// Get the targets. After the call, numTargets will contain the number of
// actual targets.
qlerror = QLCalibration_GetTargets(*calibrationId, &numTargets, targets);
// If the buffer was not large enough then print an error and return false.
if(qlerror == QL_ERROR_BUFFER_TOO_SMALL)
{
printf_s(
"The target buffer is too small. It should be at least %d bytes.\n",
numTargets * sizeof(QLCalibrationTarget));
return false;
}
// Use OpenCV to create a window for doing the calibration. The calibration
// will only be valid over the area of this window. If the entire screen
// area is to be calibrated then this window should be set to the screen
// size.
int windowWidth = 512;
int windowHeight = 384;
const char* windowName = "Calibration Window";
IplImage* displayImage = cvCreateImage(cvSize(windowWidth, windowHeight), 8, 3);
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
cvMoveWindow(windowName, 0, 0);
cvResizeWindow(windowName, windowWidth, windowHeight);
//Clear the calibration window
memset(displayImage->imageData, 128, displayImage->imageSize);
// Create a font for printing to the window.
CvFont font;
cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX, .5, .5, 0, 1);
// Print a message to the image.
int lineSize = 20;
int lineIndent = 10;
cvPutText(
displayImage,
"The calibration is dependant on the location of the calibration area.",
cvPoint(lineIndent, lineSize * 1),
&font,
CV_RGB(255, 255, 255));
cvPutText(
displayImage,
"Move the window to the area of the screen you would like calibrate.",
cvPoint(lineIndent, lineSize * 2),
&font,
CV_RGB(255, 255, 255));
cvPutText(
displayImage,
"Press ENTER when ready.",
cvPoint(lineIndent, lineSize * 3),
&font,
CV_RGB(255, 255, 255));
cvPutText(
displayImage,
"Press ESC at any time to terminate the calibration",
cvPoint(lineIndent, lineSize * 4),
&font,
CV_RGB(255, 255, 255));
// Display the image to the window.
cvShowImage(windowName, displayImage);
// Wait for the user to place the window and press a key.
if(cvWaitKey() == cvWaitKeyEsc)
{
QLCalibration_Cancel(*calibrationId);
cvReleaseImage(&displayImage);
//.........这里部分代码省略.........
开发者ID:halodog,项目名称:eyeglass,代码行数:101,代码来源:Calibrate.cpp
示例15: ocvThread
static void ocvThread(void){
//if(cvDefined==FALSE){
ttModels theModels;
ttInit(&theModels);
static GdkPixbuf *pixbuf;
IplImage *theFrame, *segmented;
static char theStr[12];
thePixel=cvPoint(0,0);
//globalFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
//char theChar;
#if use_webcam==1
CvCapture* theCamera;
CvSize size=cvSize(justWidth,justHeight);
theCamera=cvCaptureFromCAM(-1);
cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_WIDTH,justWidth );
cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_HEIGHT,justHeight );
theFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
#else
theFrame=cvLoadImage("images/image02.jpg",1);
assert(theFrame!=NULL);
justWidth=theFrame->width;
justHeight=theFrame->height;
CvSize size=cvSize(justWidth,justHeight);
cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
#endif
segmented=cvCreateImage(size,IPL_DEPTH_8U,3);
while (1){
#if use_webcam==1
theFrame=cvQueryFrame(theCamera);
assert(theFrame!=NULL);
cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
#endif
if(changeFlag==1){
theRanger.hue=-1;
theH=ttCalibration(theFrame,&thePixel,&theRanger,NULL);
theRanger.hue=theH;
changeFlag=0;
//getIndex();
//printf("%d\n",theIndex);
//updateLimits();
}
ttCalibration(theFrame,&thePixel,&theRanger,segmented);
sprintf(theStr,"Hue=%d",theH);
getIndex();
//cvShowImage("window",theImage);
//theFrame=theImage;
//cvWaitKey(5000);
gdk_threads_enter();
pixbuf = gdk_pixbuf_new_from_data ((guchar*) theFrame->imageData,
GDK_COLORSPACE_RGB,
FALSE,
theFrame->depth,
theFrame->width,
theFrame->height,
(theFrame->widthStep),
NULL,
NULL);
//printf("\n\nchingadamadre!\n");CV_CVTIMG_SWAP_RB
gtk_image_set_from_pixbuf(GTK_IMAGE(image), pixbuf);
pixbuf = gdk_pixbuf_new_from_data ((guchar*) segmented->imageData,
GDK_COLORSPACE_RGB,
FALSE,
theFrame->depth,
theFrame->width,
theFrame->height,
(theFrame->widthStep),
NULL,
NULL);
gtk_image_set_from_pixbuf(GTK_IMAGE(gseg), pixbuf);
gtk_label_set_text((GtkLabel *)hval,theStr);
gdk_threads_leave();
//cvWaitKey();
#if use_webcam==0
g_usleep(50000);
#endif
}
}
开发者ID:craksz,项目名称:sdk_gtk-201,代码行数:91,代码来源:testing10.c
示例16: CreateExperimentStruct
//.........这里部分代码省略.........
exp->WinCon1 = NULL;
exp->WinCon2 = NULL;
exp->WinCon3 = NULL;
/** Error information **/
exp->e = 0;
/** CommandLine Input **/
exp->argv = NULL;
exp->argc = 0;
exp->outfname = NULL;
exp->infname = NULL;
exp->dirname = NULL;
exp->protocolfname = NULL;
/** Protocol Data **/
exp->p = NULL;
exp->pflag = 0;
/** Camera Input**/
exp->MyCamera = NULL;
/** FrameGrabber Input **/
exp->fg = NULL;
exp->UseFrameGrabber = FALSE;
/** Video input **/
exp->capture = NULL;
/** Last Observerd CamFrameNumber **/
exp->lastFrameSeenOutside = 0;
/** DLP Output **/
exp->myDLP = 0;
/** Calibration Data Object**/
exp->Calib = NULL;
/** User-configurable Worm-related Parameters **/
exp->Params = NULL;
/** Information about Our Worm **/
exp->Worm = NULL;
/** Information about the Previous frame's Worm **/
exp->PrevWo
|
请发表评论