本文整理汇总了C++中ofxCvColorImage类的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvColorImage类的具体用法?C++ ofxCvColorImage怎么用?C++ ofxCvColorImage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ofxCvColorImage类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: cvFilterCartoon
/// ****************************************************
///
/// CARTOON FILTER
///
/// ****************************************************
bool testApp::cvFilterCartoon(ofxCvColorImage &src, ofxCvColorImage &dst, int w, int h)
{
//CvtColor(src, dst, code)
//cv::cvtColor(inputFrame, bgr, CV_BGRA2BGR);
// cv::pyrMeanShiftFiltering(bgr.clone(), bgr, sp, sr);
// PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))
// Temporary storage.
IplImage* pyr = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
IplImage* edges = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 1 );
IplImage* edgesRgb = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
//cvSet(s, cvScalar(0,0,0));
ofxCvGrayscaleImage tempGrayImg;
tempGrayImg.allocate(w, h);
tempGrayImg.setFromColorImage(src);
//------------------------------
cvPyrMeanShiftFiltering(src.getCvImage(), pyr, 10, 10);
// cv::Canny(gray, edges, 150, 150);
cvCanny(tempGrayImg.getCvImage(), edges, 150,150);
cvCvtColor(edges, edgesRgb, CV_GRAY2RGB);
cvAbsDiff(pyr, edgesRgb, pyr);
//cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage());
dst.setFromPixels((unsigned char *)pyr->imageData, w, h);
return true;
}
开发者ID:dasaki,项目名称:cvcinema,代码行数:39,代码来源:testApp.cpp
示例2: setFromCvColorImage
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::setFromCvColorImage( ofxCvColorImage& mom ) {
if( matchingROI(getROI(), mom.getROI()) ) {
cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY );
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in =, ROI mismatch");
}
}
开发者ID:d3cod3,项目名称:of_0071_GAmuza64,代码行数:9,代码来源:ofxCvGrayscaleImage.cpp
示例3: feedImg
void margDisplay::feedImg(ofxCvColorImage& _source) {
if (image.getWidth() != _source.getWidth()) {
image.clear();
image.allocate(source.getWidth(), source.getHeight());
}
cvWarpPerspective(_source.getCvImage(), image.getCvImage(), translate);
image.flagImageChanged();
}
开发者ID:amintz,项目名称:M1.0,代码行数:9,代码来源:margDisplay.cpp
示例4: keyImage
void chromaKeyer::keyImage( ofxCvColorImage & src, ofxCvColorImage & dst, int w, int h )
{
// resize images if not at same size already
if( hsvImage.width != w || hsvImage.height != h)
{
hsvImage.allocate(w,h);
hueImg.allocate(w,h);
satImg.allocate(w,h);
valImg.allocate(w,h);
}
// convert src to hsv color space
hsvImage.setFromPixels(src.getPixels(),w,h);
hsvImage.convertRgbToHsv();
// extract the hsv channels to a grayscale image
hsvImage.convertToGrayscalePlanarImages(hueImg,satImg,valImg);
unsigned char * pixelsHue = hsvImage.getPixels();
//unsigned char * pixelsSat = satImg.getPixels();
unsigned char * dstMask = new unsigned char[w*h];
// loop through and compare
/*
if( pixelsHue[i] >= H-tH && pixelsHue[i] <= H+tH&&
pixelsSat[i] >= S-tS && pixelsSat[i] <= S+tS
){
*/
for( int i = 0; i < w*h; i++)
{
if( pixelsHue[i*3] >= H-tH && pixelsHue[i*3] <= H+tH&&
pixelsHue[i*3+1] >= S-tS && pixelsHue[i*3+1] <= S+tS
){
dstMask[i] = 0;
}else{
dstMask[i] = 255;
}
}
hueImg.setFromPixels(dstMask,w,h);
cvCopy( hsvImage.getCvImage(),dst.getCvImage(),hueImg.getCvImage());//,hueImg.getCvImage());
dst.flagImageChanged();
dst.convertHsvToRgb();
delete dstMask;
}
开发者ID:assistiveAI,项目名称:msbTools,代码行数:52,代码来源:chromaKeyer.cpp
示例5: copyRegion
/*ipWidth = ipGrabber[i]->getWidth();
ipHeight = ipGrabber[i]->getHeight();
if ((ipWidth > 0) && (ipHeight >0) ) {
if ((ipWidth != ipImg[i].getWidth()) || (ipHeight != ipImg[i].getHeight())) {
if (ipImg[i].bAllocated) ipImg[i].resize(ipWidth, ipHeight);
else ipImg[i].allocate(ipWidth, ipHeight);
}
if ((ipWidth != outW) || (ipHeight != outH)) {
ofxCvColorImage tempIpImg;
tempIpImg.allocate(ipWidth, ipHeight);
tempIpImg.setFromPixels(ipGrabber[i]->getPixels(), ipWidth, ipHeight);
ipImg[i].scaleIntoMe(tempIpImg, OF_INTERPOLATE_NEAREST_NEIGHBOR);
}
else {
ipImg[i].setFromPixels(ipGrabber[i]->getPixels(), ipWidth, ipHeight);
}
}*/
}
}
///*****************************************************************
/// MONOCHANNEL MONOBLOB
///******************************************************************/
void testApp::monoCmonoB(){
int numBlobs = lastBlobs.size();
if (numBlobs > 0) {
if (bZoomTarget) {
left = lastBlobs[0].boundingRect.x * in_analysis_scale;
top = lastBlobs[0].boundingRect.y * in_analysis_scale;
targW = lastBlobs[0].boundingRect.width * in_analysis_scale;
targH = lastBlobs[0].boundingRect.height * in_analysis_scale;
// adjust to mantain inAspect ratio
int targW_inAspect = targH*inAspect;
if (targW < targW_inAspect) {
left -= (targW_inAspect-targW)/2;
targW = targW_inAspect;
}
else {
int targH_inAspect = targW/inAspect;
top -= (targH_inAspect-targH)/2;
targH = targH_inAspect;
}
}
else {
targW = cropW;
targH = cropH;
top = lastBlobs[0].centroid.y*in_analysis_scale-targH/2;
left = lastBlobs[0].centroid.x*in_analysis_scale-targW/2;
}
// copyRegion needs variables as argumets
int out_left = 0;
copyRegion( fullFrame, left, top, targW, targH,
outputImg, out_left, out_H_gap, outW, out_H_in_aspect);
}
}
///*****************************************************************
/// MONOCHANNEL MULTIBLOB
///******************************************************************/
void testApp::monoCmultiB(){
int numBlobs = lastBlobs.size();
if (numBlobs == 1) monoCmonoB();
else if (numBlobs > 1) {
int max_x = 0;
int max_y = 0;
for (unsigned int i = 0; i < lastBlobs.size(); i++) {
left = MIN( left, lastBlobs[i].boundingRect.x) ;
top = MIN( top, lastBlobs[i].boundingRect.y) ;
max_x = MAX( max_x, lastBlobs[i].boundingRect.x+
lastBlobs[i].boundingRect.width );
max_y = MAX( max_y, lastBlobs[i].boundingRect.y+
lastBlobs[i].boundingRect.height );
}
left *= in_analysis_scale;
top *= in_analysis_scale;
max_x *= in_analysis_scale;
max_y *= in_analysis_scale;
if (bZoomTarget) {
targW = (max_x-left);
targH = (max_y-top);
// adjust to mantain inAspect ratio
int targW_inAspect = targH*inAspect;
if (targW < targW_inAspect) {
left -= (targW_inAspect-targW)/2;
targW = targW_inAspect;
}
else {
int targH_inAspect = targW/inAspect;
top -= (targH_inAspect-targH)/2;
targH = targH_inAspect;
}
}
else {
targW = cropW;
targH = cropH;
// centroid of all blobs
top = (float)(top+max_y)/2;
top -= ((float)targH/2);
left = (float)(left+max_x)/2;
left -= ((float)targW/2);
//.........这里部分代码省略.........
开发者ID:dasaki,项目名称:cvcinema,代码行数:101,代码来源:testApp.cpp
示例6:
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator = ( ofxCvColorImage& mom ) {
if( mom.width == width && mom.height == height ) {
cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY );
} else {
cout << "error in =, images are different sizes" << endl;
}
}
开发者ID:heavyside,项目名称:refractiveindex,代码行数:8,代码来源:ofxCvFloatImage.cpp
示例7: applyDisplaceMap
void dfDisplacementMap::applyDisplaceMap(ofxCvColorImage& sourceImage,ofTexture& destTexture,float hscale=0.3, float vscale=0.3){
//apply displacement
unsigned char * displacePixels = this->getPixels();
unsigned char * pixels = sourceImage.getPixels();
int displace,hdisplace,vdisplace;
int totalPixels=height*width*3;
unsigned char * videoDisplaced = new unsigned char[totalPixels];
for (int i = 0; i < totalPixels;i+=3){
hdisplace = (int)((displacePixels[i] - 127)*hscale); //x coord
vdisplace = (int)((displacePixels[i+2] - 127) *vscale); //y coord
if( i%(320*3)+hdisplace*3 >0 && i%(320*3)+hdisplace*3<320*3){
displace=hdisplace+vdisplace*320;
}else{
displace = 0;
}
displace*= 3;
if(i+displace>0 && i+displace<totalPixels){
videoDisplaced[i] = pixels[i+displace];
videoDisplaced[i+1] = pixels[i+displace+1];
videoDisplaced[i+2] = pixels[i+displace+2];
}
}
destTexture.loadData(videoDisplaced,width,height, GL_RGB);
delete videoDisplaced;
}
开发者ID:typecode,项目名称:digitallyfit,代码行数:25,代码来源:dfDisplacementMap.cpp
示例8:
//--------------------------------------------------------------------------------
void ofxCvColorImage::operator += ( ofxCvColorImage& mom ) {
if( mom.width == width && mom.height == height ) {
cvAdd( cvImage, mom.getCvImage(), cvImageTemp );
swapTemp();
} else {
cout << "error in +=, images are different sizes" << endl;
}
}
开发者ID:heavyside,项目名称:refractiveindex,代码行数:9,代码来源:ofxCvColorImage.cpp
示例9: set
//---------------------------------------------------------------------------------
void videoBlob::set(ofxCvBlob myBlob, ofxCvColorImage myImage, ofxCvGrayscaleImage myMask){
memcpy(&blob, &myBlob, sizeof(ofxCvBlob));
// now, let's get the data in,
int w = blob.boundingRect.width;
int h = blob.boundingRect.height;
int imgw = myImage.width;
int imgh = myImage.height;
int imgx = blob.boundingRect.x;
int imgy = blob.boundingRect.y;
unsigned char * blobRGBA = new unsigned char [ w * h * 4 ];
unsigned char * colorPixels = myImage.getPixels();
unsigned char * grayPixels = myMask.getPixels();
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++){
int posTex = (j * w + i)*4;
int posGray = ((j+imgy)*imgw + (i + imgx));
int posCol = posGray * 3;
blobRGBA[posTex + 0] = colorPixels[posCol + 0];
blobRGBA[posTex + 1] = colorPixels[posCol + 1];
blobRGBA[posTex + 2] = colorPixels[posCol + 2];
blobRGBA[posTex + 3] = grayPixels[posGray];
}
}
// myTexture.clear();
// myTexture.allocate(w,h,GL_RGBA);
unsigned char * black = new unsigned char [ofNextPow2(w) * ofNextPow2(h) * 4];
memset(black, 0, ofNextPow2(w) * ofNextPow2(h) * 4);
// myTexture.loadData(black, ofNextPow2(w), ofNextPow2(h), GL_RGBA);
// myTexture.loadData(blobRGBA, w, h, GL_RGBA);
delete black;
delete blobRGBA;
pos.x = blob.centroid.x;
pos.y = blob.centroid.y;
scale = 1;
angle = 0;
}
开发者ID:Ahmedn1,项目名称:ccv-hand,代码行数:46,代码来源:videoBlob.cpp
示例10: getColorFromScreen
void chromaKeyer::getColorFromScreen( int x, int y, int w, ofxCvColorImage & src)
{
// tried many different things here but it doesnt work... need to fix it
cout << " x " << x << " y " << y << endl;
if( x > src.width-1 || y > src.height-1 || x < 1 || y < 1 ) return;
//ofxCvColorImage temp;
//temp.allocate(src.width,src.height);
//temp = src;
unsigned char * pixels = src.getPixels();
int pix = x * w + y;
int r = pixels[ pix ];
int g = pixels[ pix + 1];
int b = pixels[ pix + 2];
setFromRGB( r, g, b);
}
开发者ID:assistiveAI,项目名称:msbTools,代码行数:21,代码来源:chromaKeyer.cpp
示例11: scaleIntoMe
//--------------------------------------------------------------------------------
void ofxCvColorImage::scaleIntoMe( ofxCvColorImage& mom, int interpolationMethod){
if ((interpolationMethod != CV_INTER_NN) ||
(interpolationMethod != CV_INTER_LINEAR) ||
(interpolationMethod != CV_INTER_AREA) ||
(interpolationMethod != CV_INTER_CUBIC) ){
printf("error in scaleIntoMe / interpolationMethod, setting to CV_INTER_NN \n");
interpolationMethod = CV_INTER_NN;
}
cvResize( mom.getCvImage(), cvImage, interpolationMethod );
/*
you can pass in:
CV_INTER_NN - nearest-neigbor interpolation,
CV_INTER_LINEAR - bilinear interpolation (used by default)
CV_INTER_AREA - resampling using pixel area relation. It is preferred method for image decimation that gives moire-free results. In case of zooming it is similar to CV_INTER_NN method.
CV_INTER_CUBIC - bicubic interpolation.
----> http://opencvlibrary.sourceforge.net/CvReference
*/
}
开发者ID:heavyside,项目名称:refractiveindex,代码行数:22,代码来源:ofxCvColorImage.cpp
示例12: update
void ofxBackground::update(ofxCvColorImage& input){
float now = ofGetElapsedTimeMillis();
// get width/height disregarding ROI
IplImage* ipltemp = input.getCvImage();
_width = ipltemp->width;
_height = ipltemp->height;
if( inputCopy.getWidth() == 0 ) {
allocate( _width, _height );
} else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) {
// reallocate to new size
clear();
allocate( _width, _height );
} else { //don't do anything unless we have allocated! (and therefore set timeStartedLearning to a safe, non zero value)
inputCopy = input;
inputCopy.setROI( input.getROI() );
yuvImage.setROI( input.getROI() ); //pass on ROI'ness
yuvImage.setFromPixels(inputCopy.getPixels(), _width, _height);
yuvImage.convertRgbToYuv();
if((now-timeStartedLearning) < LEARNING_TIME){
//then we should be learning
//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
accumulateBackground(inputCopy.getCvImage());
//LEARNING THE CODEBOOK BACKGROUND
pColor = (uchar *)((yuvImage.getCvImage())->imageData);
for(int c=0; c<imageLen; c++)
{
cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
pColor += 3;
}
//TODO: clear stale entries
bStatsDone = false;
bLearning = true;
}else {
//its either time to do stats or not
bLearning = false;
if(!bStatsDone){
//do the stats, just the once
createModelsfromStats(); //create the background model
bStatsDone = true;
}else {
//learn as normal, find the foreground if any
//FIND FOREGROUND BY AVG METHOD:
backgroundDiff(inputCopy.getCvImage(),ImaskAVG);
cvCopy(ImaskAVG,ImaskAVGCC);
cvconnectedComponents(ImaskAVGCC);
//FIND FOREGROUND BY CODEBOOK METHOD
uchar maskPixelCodeBook;
pColor = (uchar *)((yuvImage.getCvImage())->imageData); //3 channel yuv image
uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
for(int c=0; c<imageLen; c++)
{
maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
*pMask++ = maskPixelCodeBook;
pColor += 3;
}
//This part just to visualize bounding boxes and centers if desired
cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvconnectedComponents(ImaskCodeBookCC);
//TODO: update the learned background pixels....
//TODO: clear stale codebook entries on a much slower frequency
}
}
backgroundAverage = ImaskAVG;
backgroundAverageConnectedComponents = ImaskAVGCC;
backgroundCodebook = ImaskCodeBook;
backgroundCodeBookConnectedComponents = ImaskCodeBookCC;
}
}
开发者ID:HellicarAndLewis,项目名称:Feedback,代码行数:78,代码来源:ofxBackground.cpp
示例13:
void ofxOpticalFlowLK :: update ( ofxCvColorImage& source )
{
update( source.getPixels(), source.width, source.height, OF_IMAGE_COLOR );
}
开发者ID:MaxWorgan,项目名称:ofxOpticalFlowLK,代码行数:4,代码来源:ofxOpticalFlowLK.cpp
示例14: cvCopy
//--------------------------------------------------------------------------------
ofxCvColorImage::ofxCvColorImage( const ofxCvColorImage& mom ) {
cvCopy( mom.getCvImage(), cvImage, 0 );
}
开发者ID:heavyside,项目名称:refractiveindex,代码行数:4,代码来源:ofxCvColorImage.cpp
示例15: update
//--------------------------------------------------------------
void testApp::update(){
vidGrabber.grabFrame();
if(vidGrabber.isFrameNew()) {
colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
colorImg.mirror(false, true);
greyImage = colorImg;
greyImageSmall.scaleIntoMe(greyImage);
haarFinder.findHaarObjects(greyImageSmall);
}
}
开发者ID:bmwcmw,项目名称:iSteveJobs,代码行数:16,代码来源:testApp.cpp
示例16: update
void update()
{
// Update our little offset thingy.
offset += 0.01;
if (offset > 1)
{
offset = 0;
}
// Update our camera.
grabber.update();
// If the camera has a new frame to offer us ...
if (grabber.isFrameNew())
{
// Make a copy of our grabber pixels in the colorImage.
colorImage.setFromPixels(grabber.getPixelsRef());
// When we assign a color image to a grayscale image, it is converted automatically.
grayscaleImage = colorImage;
// If we set learnBackground to true using the keyboard, we'll take a snapshot of
// the background and use it to create a clean foreground image.
if (learnBackground == true)
{
// We assign the grayscaleImage to the grayscaleBackgroundImage.
grayscaleBackgroundImage = grayscaleImage;
// Now we set learnBakground so we won't set a background unless
// explicitly directed to with a keyboard command.
learnBackground = false;
}
// Create a difference image by comparing the background and the current grayscale images.
grayscaleAbsoluteDifference.absDiff(grayscaleBackgroundImage, grayscaleImage);
// Assign grayscaleAbsoluteDifference to the grayscaleBinary image.
grayscaleBinary = grayscaleAbsoluteDifference;
// Then threshold the grayscale image to create a binary image.
grayscaleBinary.threshold(threshold, invert);
// Find contours (blobs) that are between the size of 20 pixels and
// 1 / 3 * (width * height) of the camera. Also find holes.
contourFinder.findContours(grayscaleBinary, 100, (width * height) / 3.0, 10, true);
// Get the biggest blob and use it to draw.
if (contourFinder.nBlobs > 0)
{
holePositions.addVertex(contourFinder.blobs[0].boundingRect.getCenter());
}
else
{
holePositions.clear();
}
}
}
开发者ID:SAIC-ATS,项目名称:ARTTECH-5010,代码行数:58,代码来源:main.cpp
示例17: setup
void setup(){
sampleImg.load("sample_img.jpg");
colorImg.allocate(900, 900);
colorImg = sampleImg;
grayImg = colorImg;
grayImg.threshold(200);
}
开发者ID:jeonghopark,项目名称:simpleOpenCV,代码行数:11,代码来源:main.cpp
示例18: setup
void setup()
{
ofSetWindowShape(width, height); // Set the window size.
grabber.initGrabber(width, height); // Set the grabber size.
// Allocate each of our helper images.
colorImage.allocate(width, height);
grayscaleImage.allocate(width, height);
grayscaleBackgroundImage.allocate(width, height);
grayscaleAbsoluteDifference.allocate(width, height);
grayscaleBinary.allocate(width, height);
}
开发者ID:SAIC-ATS,项目名称:ARTTECH-5010,代码行数:13,代码来源:main.cpp
示例19: setup
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(640, 480);
colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImageSmall.allocate(120, 90);
haarFinder.setup("haarcascade_frontalface_alt2.xml");
img.loadImage("stevejobs.png");
img.setAnchorPercent(0.5, 0.5);
ofEnableAlphaBlending();
}
开发者ID:bmwcmw,项目名称:iSteveJobs,代码行数:16,代码来源:testApp.cpp
示例20: draw
//--------------------------------------------------------------
void testApp::draw(){
ofSetColor(255, 255, 255);
colorImg.draw(0, 0, ofGetWidth(), ofGetHeight());
glPushMatrix();
glScalef(ofGetWidth() / (float)greyImageSmall.getWidth(), ofGetHeight() / (float)greyImageSmall.getHeight(), 1);
// haarTracker.draw(0, 0);
ofNoFill();
for(int i = 0; i < haarFinder.blobs.size(); i++) {
ofRectangle cur = haarFinder.blobs[i].boundingRect;
// ofRect(cur.x, cur.y, cur.width, cur.height);
int iw = cur.width * 1.4;
img.draw(haarFinder.blobs[i].centroid, iw, iw * img.getHeight() / img.getWidth());
}
glPopMatrix();
}
开发者ID:bmwcmw,项目名称:iSteveJobs,代码行数:18,代码来源:testApp.cpp
注:本文中的ofxCvColorImage类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论