本文整理汇总了C++中cvGetReal2D函数的典型用法代码示例。如果您正苦于以下问题:C++ cvGetReal2D函数的具体用法?C++ cvGetReal2D怎么用?C++ cvGetReal2D使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvGetReal2D函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: memset
// Histogram Contrast Stretching
void Filterling ::contrastStretching (IplImage *srcImage, IplImage *dstImage) {
// histogram연산을 위해 사용할 배열메모리를 할당
unsigned int *histogramArray = new unsigned int[256],
*sumHistogramArray = new unsigned int[256] ;
memset (histogramArray, 0, sizeof (unsigned int) * 256) ;
memset (sumHistogramArray, 0, sizeof (unsigned int) * 256) ;
// 영상의 histogram을 계산
for (size_t i = 0 ; i < srcImage ->height ; i++)
for (size_t j = 0 ; j < srcImage ->width ; j++)
histogramArray[(unsigned int)cvGetReal2D (srcImage, i, j)]++ ;
// histogram의 정규화된 합을 계산
unsigned int sum = 0 ;
const float SCALE_FACTOR = 128.0f / (float)(srcImage ->height * srcImage ->width) ;
for (size_t i = 0 ; i < 256 ; i++) {
sum += histogramArray[i] ;
sumHistogramArray[i] = (unsigned int)((sum * SCALE_FACTOR) + 0.5) ;
}
// LUT로써 sumHistogramArray 배열을 사용하여 영상을 변환
for (size_t i = 0 ; i < srcImage ->height ; i++)
for (size_t j = 0 ; j < srcImage ->width ; j++)
// pixel indexing
cvSetReal2D (dstImage, i, j, (double) sumHistogramArray[(unsigned int)cvGetReal2D (srcImage, i, j)]) ;
delete [] (histogramArray) ;
delete [] (sumHistogramArray) ;
}
开发者ID:astroluj,项目名称:VC_OpenCV_Coin_Detection,代码行数:31,代码来源:filterling.cpp
示例2: cvGetReal2D
//--------------------------------------------------------------
void fluid001::opticalFlowToFluid() {
int x, y;
float dx, dy;
float iw = 1.0f/flow.captureWidth;
float iy = 1.0f/flow.captureHeight;
int particleEmitCounter = 0;
float flowThreshold = 100;
float opticalFlowFluidMult = 0.001;
float multX = (float)ofGetWidth()/flow.captureWidth;
float multY = (float)ofGetHeight()/flow.captureHeight;
for ( y = 0; y < flow.captureHeight; y+=flow.captureRowsStep ){
for ( x = 0; x < flow.captureWidth; x+=flow.captureColsStep ){
dx = cvGetReal2D( flow.vel_x, y, x );
dy = cvGetReal2D( flow.vel_y, y, x );
if(dx*dx+dy*dy > flowThreshold) {
addToFluid((float)x/flow.captureWidth, (float)y/flow.captureHeight, dx*opticalFlowFluidMult, dy*opticalFlowFluidMult);
}
}
}
}
开发者ID:mazbox,项目名称:TukeSprint,代码行数:28,代码来源:fluid001.cpp
示例3: cvCreateMat
/**
* Test the Image
*/
float NN::test(IplImage* Image){
CvMat* Sample = cvCreateMat(1,IMGSIZE,CV_8U);
IplImage *resized = cvCreateImage(cvSize(IMGWIDTH,IMGHEIGHT),Image->depth,1);
cvResize(Image,resized,0);
cvReleaseImage(&Image);
for (int i=0; i< Image->width; i++)
for (int j=0; j< Image->height; j++)
cvSet2D(Sample,0,i*resized->width + j,cvGet2D(resized,i,j));
cvReleaseImage(&resized);
CvMat *output = cvCreateMat(1,Length,CV_32F);
NNModel.predict(Sample,output);
int min = 10000;
for (int i=0; i<Length;i++)
{
cout << cvGetReal2D(output,0,i) << " ";
if (min > cvGetReal2D(output,0,i))
min = i;
}
return min;
}
开发者ID:ProjPossibility,项目名称:pp-usc-ss12-sign-language-reader,代码行数:30,代码来源:NN.cpp
示例4:
void CamaraLucida::convertKKopencv2opengl(CvMat* opencvKK, float width, float height, float near, float far, float* openglKK)
{
float fx = (float)cvGetReal2D( opencvKK, 0, 0 );
float fy = (float)cvGetReal2D( opencvKK, 1, 1 );
float cx = (float)cvGetReal2D( opencvKK, 0, 2 );
float cy = (float)cvGetReal2D( opencvKK, 1, 2 );
float A = 2. * fx / width;
float B = 2. * fy / height;
float C = 2. * (cx / width) - 1.;
float D = 2. * (cy / height) - 1.;
float E = - (calib.far + calib.near) / (calib.far - calib.near);
float F = -2. * calib.far * calib.near / (calib.far - calib.near);
// col-major
openglKK[0]= A; openglKK[4]= 0.; openglKK[8]= C; openglKK[12]= 0.;
openglKK[1]= 0.; openglKK[5]= B; openglKK[9]= D; openglKK[13]= 0.;
openglKK[2]= 0.; openglKK[6]= 0.; openglKK[10]= E; openglKK[14]= F;
openglKK[3]= 0.; openglKK[7]= 0.; openglKK[11]= -1.; openglKK[15]= 0.;
// another solution by Kyle McDonald...
// https://github.com/kylemcdonald/ofxCv/blob/master/libs/ofxCv/src/Calibration.cpp
// glFrustum(
// nearDist * (-cx) / fx, nearDist * (w - cx) / fx,
// nearDist * (cy - h) / fy, nearDist * (cy) / fy,
// nearDist, farDist);
}
开发者ID:Giladx,项目名称:Camara-Lucida,代码行数:27,代码来源:CamaraLucida.cpp
示例5: cvGetReal2D
bool ofxCvOpticalFlowLK::getBiggestFlowPoint(ofPoint *pos, ofPoint *vel) {
float sqrMax = 0;
float sqrMaxX = -1;
float sqrMaxY = -1;
for (int yy = 0; yy < captureHeight; yy+=2 ){
for (int xx = 0; xx < captureWidth; xx+=2 ){
float dx = cvGetReal2D( vel_x, yy, xx );
float dy = cvGetReal2D( vel_y, yy, xx );
float sqrDist = dx*dx + dy*dy;
if(sqrMax<sqrDist) {
sqrMaxX = xx;
sqrMaxY = yy;
sqrMax = sqrDist;
vel->x = dx;
vel->y = dy;
}
}
}
if(sqrMax>0) {
pos->x = sqrMaxX/captureWidth;
pos->y = sqrMaxY/captureHeight;
return true;
} else {
return false;
}
}
开发者ID:mazbox,项目名称:TukeSprint,代码行数:28,代码来源:ofxCvOpticalFlowLK.cpp
示例6: glLineWidth
void ofxCvOpticalFlowLK::draw(float x,float y,float w, float h) {
glLineWidth(1);
ofSetColor(0xffffff);
float speed;
int xx, yy, dx, dy;
float multW = w/captureWidth;
float multH = h/captureHeight;
glPushMatrix();
{
glTranslatef(x, y, 0);
glScalef(multW, multH, 1);
for ( yy = 0; yy < captureHeight; yy+=captureRowsStep ){
for ( xx = 0; xx < captureWidth; xx+=captureColsStep ){
dx = (int)cvGetReal2D( vel_x, yy, xx );
dy = (int)cvGetReal2D( vel_y, yy, xx );
//speed = dx * dx + dy * dy;
ofLine(xx, yy, xx+dx * 2, yy+dy * 2);
}
}
}
glPopMatrix();
}
开发者ID:mazbox,项目名称:TukeSprint,代码行数:26,代码来源:ofxCvOpticalFlowLK.cpp
示例7: cvGetReal2D
void Panoramic::ShowStitch(IplImage *profile,IplImage *center)
{
double profileFaceDate = 0.;
double Alpha = 0.5;//allocate middle of stich image a half of center and lateral image
double Blendling = 0.;
for(int j = 0;j < m_height;j++)
{
for(int i = 0;i < m_width;i++)
{
double linePosition = m_slope*i+m_intercept-j;
profileFaceDate = cvGetReal2D(profile,j,i);
if(linePosition <= -m_lineT)
{
cvSetReal2D(m_PanoramicFace,j,i,profileFaceDate);
}
else if(linePosition > -m_lineT && linePosition < m_lineT)
{
double getAlpha = LinearBlending(m_lineT,m_slope*i+m_intercept-j,Alpha);
double getBeta = 1-getAlpha;
Blendling = getAlpha*cvGetReal2D(center,j,i) + getBeta*profileFaceDate;
if(Blendling > 255) Blendling = 255;
else if(Blendling < 0)Blendling = 0;
cvSetReal2D(m_PanoramicFace,j,i,Blendling) ;
}
}
}
}
开发者ID:Claycau,项目名称:MultiviewFaceReg,代码行数:33,代码来源:Panoramic.cpp
示例8: COG_edges
/*
* COG_edges
* find intersection of chalkline with either edge
*/
void COG_edges(float *Y_left, float *Y_right)
{
float intensity = 0.0, accum = 0.0;
int x, y;
*Y_left = 0.0;
*Y_right = 0.0;
for(x = 0; x < 40; x++)
{
for(y = 0; y < thresh->height; y++)
{
intensity = (float)(cvGetReal2D(thresh, y, x));
*Y_left += y*intensity;
accum += intensity;
}
}
*Y_left /= accum;
accum = 0.0;
for(x = thresh->width-40; x < thresh->width; x++)
{
for(y = 0; y < thresh->height; y++)
{
intensity = (float)(cvGetReal2D(thresh, y, x));
*Y_right += y*intensity;
accum += intensity;
}
}
*Y_right /= accum;
}
开发者ID:roofilin,项目名称:roofilin,代码行数:35,代码来源:final.c
示例9: getVelAtNorm
//--------------------------------------------------------------
void openniTracking::getVelAtNorm(float x, float y, float *u, float *v) {
int ix = x * _width;
int iy = y * _height;
if(ix<0) ix = 0; else if(ix>=_width) ix = _width - 1;
if(iy<0) iy = 0; else if(iy>=_height) iy = _height - 1;
*u = cvGetReal2D( opticalFlow.getVelX(), iy, ix );
*v = cvGetReal2D( opticalFlow.getVelY(), iy, ix );
}
开发者ID:d3cod3,项目名称:of_0071_GAmuza64,代码行数:9,代码来源:openniTracking.cpp
示例10: getVelAtNorm
void MotionTracker::getVelAtNorm(float x, float y, float *u, float *v) {
int ix = x * camWidth;
int iy = y * camHeight;
if(ix<0) ix = 0; else if(ix>=camWidth) ix = camWidth - 1;
if(iy<0) iy = 0; else if(iy>=camHeight) iy = camHeight - 1;
*u = cvGetReal2D( opticalFlow.vel_x, iy, ix );
*v = cvGetReal2D( opticalFlow.vel_y, iy, ix );
}
开发者ID:6301158,项目名称:ofx-dev,代码行数:8,代码来源:MotionTracker.cpp
示例11: cvGetReal2D
ofPoint ofxCvOpticalFlowLK::flowAtPoint(int x, int y){
if(x >= captureWidth || x < 0 || y >= captureHeight || y < 0){
return 0.0f;
}
float fdx = cvGetReal2D( vel_x, y, x );
float fdy = cvGetReal2D( vel_y, y, x );
return ofPoint(fdx, fdy);
}
开发者ID:wlstks7,项目名称:fuckyoubody,代码行数:8,代码来源:ofxCvOpticalFlowLK.cpp
示例12: getBlockVel
//Get the velocity of each block given the x and y of that block
ofPoint ofxOpticalFlowBM :: getBlockVel( int x, int y ) {
ofPoint p;
if( x < flowSize.width && y < flowSize.height ) {
p.x = cvGetReal2D(opFlowVelX, y, x); //NOTE: y then x ... annoying
p.y = cvGetReal2D(opFlowVelY, y, x); //NOTE: y then x ... annoying
}
return p;
}
开发者ID:kellyegan,项目名称:bmOpticalFlow,代码行数:9,代码来源:ofxOpticalFlowBM.cpp
示例13: printf
void CamaraLucida::printM(CvMat* M, bool colmajor)
{
if (ofGetLogLevel() != OF_LOG_VERBOSE)
{
return;
}
int i,j;
if (colmajor)
{
for (i = 0; i < M->rows; i++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (j = 0; j < M->cols; j++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (j = 0; j < M->cols; j++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
else
{
for (j = 0; j < M->cols; j++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (i = 0; i < M->rows; i++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (i = 0; i < M->rows; i++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
printf("\n");
}
开发者ID:Giladx,项目名称:Camara-Lucida,代码行数:54,代码来源:CamaraLucida.cpp
示例14: ofRectangle
void MotionTracker::getVelAverageComponents(float *u, float *v, ofRectangle* bounds){
bool cleanBounds = false;
if(bounds == NULL){
bounds = new ofRectangle(0, 0, optFlowWidth, optFlowHeight);
cleanBounds = true;
}
// normalize the bounds to optical flow system
bounds->x = (float)bounds->x/(float)camWidth*(float)optFlowWidth; // normalize co-ords from camera size to optical flow size
bounds->y = (float)bounds->y/(float)camHeight*(float)optFlowHeight;
bounds->width = (float)bounds->width/(float)camWidth*(float)optFlowWidth;
bounds->height = (float)bounds->height/(float)camHeight*(float)optFlowHeight;
// fix out of bounds
if(bounds->x < 0){
bounds->width += bounds->x;
bounds->x = 0;
}
else if(bounds->x + bounds->width > optFlowWidth){
bounds->width -= ((bounds->x + bounds->width) - optFlowWidth);
}
if(bounds->y < 0){
bounds->height += bounds->y;
bounds->y = 0;
}
else if(bounds->y + bounds->height > optFlowHeight){
bounds->height -= ((bounds->y + bounds->height) - optFlowHeight);
}
/** cout << "--bounds:norm--" << endl;
cout << "x:" << bounds->x << endl;
cout << "y:" << bounds->y << endl;
cout << "w:" << bounds->width << endl;
cout << "h:" << bounds->height << endl;
cout << "--------------------" << endl;*/
*u = 0.0;
*v = 0.0;
for(int i=bounds->x; i < bounds->x+(int)bounds->width; i++){
for(int j=bounds->y; j < bounds->y+(int)bounds->height; j++){
*u += cvGetReal2D( opticalFlow.vel_x, j, i);
*v += cvGetReal2D( opticalFlow.vel_y, j, i);
// cout << i << "," << j << "::" << *u << "," << *v << endl;
}
}
*u /= 2.0 * (float)bounds->height; // effectively: u / (width*height) * width/2.0
*v /= 2.0 * (float)bounds->width; // effectively: v / (width*height) * height/2.0
if(cleanBounds)
delete bounds;
}
开发者ID:6301158,项目名称:ofx-dev,代码行数:52,代码来源:MotionTracker.cpp
示例15: dance_measurement
static void dance_measurement(const CvMat* x_k,
const CvMat* n_k,
CvMat* z_k)
{
cvSetReal2D(z_k, 0, 0, cvGetReal2D(x_k, 0, 0));
cvSetReal2D(z_k, 1, 0, cvGetReal2D(x_k, 1, 0));
/* as above, skip this step when n_k is null */
if(n_k)
{
cvAdd(z_k, n_k, z_k);
}
}
开发者ID:leonard-lab,项目名称:DanceTrack,代码行数:13,代码来源:DanceTracker.cpp
示例16: printf
void Calibration::printM( CvMat* M, bool colmajor )
{
int i,j;
if (colmajor)
{
for (i = 0; i < M->rows; i++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (j = 0; j < M->cols; j++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (j = 0; j < M->cols; j++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
else
{
for (j = 0; j < M->cols; j++)
{
printf("\n");
switch( CV_MAT_DEPTH(M->type) )
{
case CV_32F:
case CV_64F:
for (i = 0; i < M->rows; i++)
printf("%9.3f ", (float)cvGetReal2D( M, i, j ));
break;
case CV_8U:
case CV_16U:
for (i = 0; i < M->rows; i++)
printf("%6d",(int)cvGetReal2D( M, i, j ));
break;
default:
break;
}
}
}
printf("\n\n");
};
开发者ID:kamend,项目名称:ofxCamaraLucida,代码行数:49,代码来源:cmlCalibration.cpp
示例17: adjustRMatrixAndZForMeasurementSize
void adjustRMatrixAndZForMeasurementSize(CvMat*& R, CvMat*& z, unsigned int nmeas)
{
if(nmeas == 0)
{
return;
}
unsigned int nrows_prev = (R)->rows;
unsigned int nrows_z_prev = (z)->rows;
if(nrows_prev < 4)
{
fprintf(stderr, "adjustRMatrixForMeasurementSize Error: Existing R is too small.\n");
return;
}
/* x, y, th each meas + z */
unsigned int nrows_now = 3*nmeas + 1;
/* same number of measurements -> do nothing */
if(nrows_now == nrows_prev && nrows_now == nrows_z_prev)
{
return;
}
// printf("adjR a %d %d\n", nrows_prev, nrows_now);
double sx = cvGetReal2D(R, 0, 0);
double sy = cvGetReal2D(R, 1, 1);
double sth = cvGetReal2D(R, 2, 2);
double sz = cvGetReal2D(R, nrows_prev-1, nrows_prev-1);
// printf("adjR b\n");
cvReleaseMat(&R);
cvReleaseMat(&z);
// printf("adjR c\n");
R = cvCreateMat(nrows_now, nrows_now, CV_64FC1);
z = cvCreateMat(nrows_now, 1, CV_64FC1);
// printf("adjR d\n");
cvZero(R);
cvZero(z);
// printf("adjR e\n");
for(unsigned int i = 0; i < nmeas; i++)
{
cvSetReal2D(R, i*3 + 0, i*3 + 0, sx);
cvSetReal2D(R, i*3 + 1, i*3 + 1, sy);
cvSetReal2D(R, i*3 + 2, i*3 + 2, sth);
}
// printf("adjR f\n");
cvSetReal2D(R, nrows_now-1, nrows_now-1, sz);
}
开发者ID:dantswain,项目名称:BelugaSketch,代码行数:48,代码来源:TrackerExtras.cpp
示例18: cvCloneImage
IplImage* Saliency::Get(const IplImage* imgIn)
{
if (imgIn != NULL)
{
IplImage* tt = cvCloneImage(imgIn);
Mat sal(tt);
Mat img3f(tt);
img3f.convertTo(img3f, CV_32FC3, 1.0/255);//图片转换,可能是0-255之间转换到0-1之间,归一化
sal = GetRC(img3f);
sal.convertTo(sal,CV_32FC3,255);
IplImage qImg(sal);
IplImage * result = cvCreateImage(cvGetSize(imgIn),8,1);
for (int r=0;r<imgIn->height;r++)
{
for (int c=0;c<imgIn->width;c++)
{
CvScalar col;
col.val[0] = cvGetReal2D(&qImg,r,c);
cvSet2D(result,r,c,col);
}
}
return result;
}
return NULL;
}
开发者ID:LoveWX,项目名称:Projects_in_master_stage,代码行数:26,代码来源:Saliency.cpp
示例19: PintarFeatures
void PintarFeatures(IplImage& vx,IplImage& vy,IplImage& windowBackground,CvPoint2D32f frame1_features[],int number_of_features,float optical_flow_feature_error[],char optical_flow_found_feature[])
{
for(int i = 0; i < number_of_features; i++)
{
int line_thickness=1;
CvScalar line_color = CV_RGB(0,255,0);
CvPoint p,q;
p.x = (int) frame1_features[i].x;
p.y = (int) frame1_features[i].y;
q.x =p.x+ cvGetReal2D(&vx, p.y,p.x);
q.y =p.y + cvGetReal2D(&vy, p.y,p.x);
PaintPoint(p,q,windowBackground,line_thickness,line_color,1);
}
}
开发者ID:guobs,项目名称:lukas-kanade-openmp-sse2,代码行数:16,代码来源:PaintUtils.cpp
示例20: TiXmlElement
TiXmlElement* FileFormatUtils::createXMLMatrix(const char* element_name, const CvMat *matrix) {
if (!matrix) return NULL;
TiXmlElement* xml_matrix = new TiXmlElement(element_name);
int precision;
if (cvGetElemType(matrix) == CV_32F) {
xml_matrix->SetAttribute("type", "CV_32F");
precision = std::numeric_limits<float>::digits10 + 2;
}
else if (cvGetElemType(matrix) == CV_64F) {
xml_matrix->SetAttribute("type", "CV_64F");
precision = std::numeric_limits<double>::digits10 + 2;
}
else {
delete xml_matrix;
return NULL;
}
xml_matrix->SetAttribute("rows", matrix->rows);
xml_matrix->SetAttribute("cols", matrix->cols);
for (int r = 0; r < matrix->rows; ++r) {
for (int c = 0; c < matrix->cols; ++c) {
TiXmlElement *xml_data = new TiXmlElement("data");
xml_matrix->LinkEndChild(xml_data);
std::stringstream ss;
ss.precision(precision);
ss<<cvGetReal2D(matrix, r, c);
xml_data->LinkEndChild(new TiXmlText(ss.str().c_str()));
}
}
return xml_matrix;
}
开发者ID:130s,项目名称:ar_track_alvar,代码行数:33,代码来源:FileFormatUtils.cpp
注:本文中的cvGetReal2D函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论