本文整理汇总了C++中cvInitMatHeader函数的典型用法代码示例。如果您正苦于以下问题:C++ cvInitMatHeader函数的具体用法?C++ cvInitMatHeader怎么用?C++ cvInitMatHeader使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvInitMatHeader函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: NODE_IMPLEMENTATION
static NODE_IMPLEMENTATION(resize, Pointer)
{
MuLangContext* context = static_cast<MuLangContext*>(NODE_THREAD.context());
const Class* c = static_cast<const ImageType*>(NODE_THIS.type());
ClassInstance* inObj = NODE_ARG_OBJECT(0, ClassInstance);
int width = NODE_ARG(1, int);
int height = NODE_ARG(2, int);
ClassInstance* outObj = makeImage(context, c, width, height);
ImageStruct* inIm = inObj->data<ImageStruct>();
ImageStruct* outIm = outObj->data<ImageStruct>();
CvMat inMat;
CvMat outMat;
cvInitMatHeader(&inMat,
inIm->height,
inIm->width,
CV_32FC(4),
inIm->data->data<float>(),
0);
cvInitMatHeader(&outMat,
outIm->height,
outIm->width,
CV_32FC(4),
outIm->data->data<float>(),
0);
cvResize(&inMat, &outMat, CV_INTER_AREA);
NODE_RETURN(outObj);
}
开发者ID:jimhourihan,项目名称:mu,代码行数:32,代码来源:ImageModule.cpp
示例2: cvInitMatHeader
void
condensPose::init(vpHomogeneousMatrix& cMo, float rotPerturb, float transPerturb)
{
vpPoseVector pv;
pv.buildFrom(cMo);
float minRange[] = {
pv[0] - rotPerturb,
pv[1] - rotPerturb,
pv[2] - rotPerturb,
pv[3] - transPerturb,
pv[4] - transPerturb,
pv[5] - transPerturb};
float maxRange[] = {
pv[0] + rotPerturb,
pv[1] + rotPerturb,
pv[2] + rotPerturb,
pv[3] + transPerturb,
pv[4] + transPerturb,
pv[5] + transPerturb};
CvMat LB, UB;
cvInitMatHeader(&LB, this->dim, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, this->dim, 1, CV_32FC1, maxRange);
cvConDensInitSampleSet(condens, &LB, &UB);
}
开发者ID:wpfnihao,项目名称:EET,代码行数:25,代码来源:condensPose.cpp
示例3: draw_oxfd_feature
/*在图像上画单个OXFD特征点
参数:
img:图像指针
feat:要画的特征点
color:颜色
*/
static void draw_oxfd_feature(IplImage* img, struct feature* feat,
CvScalar color)
{
double m[4] = { feat->a, feat->b, feat->b, feat->c };
double v[4] = { 0 }; //特征向量的数据
double e[2] = { 0 }; //特征值的数据
CvMat M, V, E;
double alpha, l1, l2;
//计算椭圆的轴线和方向
cvInitMatHeader(&M, 2, 2, CV_64FC1, m, CV_AUTOSTEP); //矩阵
cvInitMatHeader(&V, 2, 2, CV_64FC1, v, CV_AUTOSTEP); //2个2*1的特征向量组成的矩阵
cvInitMatHeader(&E, 2, 1, CV_64FC1, e, CV_AUTOSTEP); //特征值
cvEigenVV(&M, &V, &E, DBL_EPSILON, 0, 0); //计算特征值和特征向量
l1 = 1 / sqrt(e[1]);
l2 = 1 / sqrt(e[0]);
alpha = -atan2(v[1], v[0]);
alpha *= 180 / CV_PI;
//画椭圆和十字星
cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha,
0, 360, CV_RGB(0, 0, 0), 3, 8, 0);
cvEllipse(img, cvPoint(feat->x, feat->y), cvSize(l2, l1), alpha,
0, 360, color, 1, 8, 0);
cvLine(img, cvPoint(feat->x + 2, feat->y), cvPoint(feat->x - 2, feat->y),
color, 1, 8, 0);
cvLine(img, cvPoint(feat->x, feat->y + 2), cvPoint(feat->x, feat->y - 2),
color, 1, 8, 0);
}
开发者ID:BITDIP,项目名称:BITDIP,代码行数:35,代码来源:imgfeatures.cpp
示例4: draw_oxfd_feature
/*
Draws a single Oxford-type feature
@param img image on which to draw
@param feat feature to be drawn
@param color color in which to draw
*/
void draw_oxfd_feature( IplImage* img, struct feature* feat, CvScalar color )
{
double m[4] = { feat->a, feat->b, feat->b, feat->c };
double v[4] = { 0 };
double e[2] = { 0 };
CvMat M, V, E;
double alpha, l1, l2;
/* compute axes and orientation of ellipse surrounding affine region */
cvInitMatHeader( &M, 2, 2, CV_64FC1, m, CV_AUTOSTEP );
cvInitMatHeader( &V, 2, 2, CV_64FC1, v, CV_AUTOSTEP );
cvInitMatHeader( &E, 2, 1, CV_64FC1, e, CV_AUTOSTEP );
#if CV_MAJOR_VERSION==1
cvEigenVV( &M, &V, &E, DBL_EPSILON );
#else
cvEigenVV( &M, &V, &E, DBL_EPSILON, -1,-1 );
#endif
l1 = 1 / sqrt( e[1] );
l2 = 1 / sqrt( e[0] );
alpha = -atan2( v[1], v[0] );
alpha *= 180 / CV_PI;
cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha,
0, 360, CV_RGB(0,0,0), 3, 8, 0 );
cvEllipse( img, cvPoint( feat->x, feat->y ), cvSize( l2, l1 ), alpha,
0, 360, color, 1, 8, 0 );
cvLine( img, cvPoint( feat->x+2, feat->y ), cvPoint( feat->x-2, feat->y ),
color, 1, 8, 0 );
cvLine( img, cvPoint( feat->x, feat->y+2 ), cvPoint( feat->x, feat->y-2 ),
color, 1, 8, 0 );
}
开发者ID:KongWeibin,项目名称:mrpt,代码行数:38,代码来源:imgfeatures.c
示例5: setRanges
void particle::genParticles(glm::vec3 particleV)
{
particleCenterM.setTo(cv::Scalar(0));
//Bereich der Partikelstreuung
setRanges(particleV.x, particleV.y, particleV.z, 0.5);
CvMat LB, UB;
cvInitMatHeader(&LB, 3, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, 3, 1, CV_32FC1, maxRange);
CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);
cvConDensInitSampleSet(condens, &LB, &UB);
//Einheitsmatrix
condens->DynamMatr[0] = 1.0;
condens->DynamMatr[1] = 0.0;
condens->DynamMatr[2] = 0.0;
condens->DynamMatr[3] = 0.0;
condens->DynamMatr[4] = 1.0;
condens->DynamMatr[5] = 0.0;
condens->DynamMatr[6] = 0.0;
condens->DynamMatr[7] = 0.0;
condens->DynamMatr[8] = 1.0;
cameraV.clear();
newCameraV.clear();
for (int i = 0; i < condens->SamplesNum; i++) {
//Berechnung der Abweichung
// float diffX = (particleV.x - condens->flSamples[i][0])/xRange;
// float diffY = (particleV.y - condens->flSamples[i][1])/yRange;
// float diffZ = (particleV.z - condens->flSamples[i][2])/zRange;
// condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY + diffZ * diffZ));
// Partikelstreuung werde ich benötigen
//cv::Point3f partPt(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2]);
glm::vec3 partCenter(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2]);
particleCenterM(i,0) = partCenter.x;
particleCenterM(i,1) = partCenter.y;
particleCenterM(i,2) = partCenter.z;
genParticles(lookAtCamera, partCenter, i);
//cout << "PartikelPos: X-Achse: " << condens->flSamples[i][0] << "/" << lastCam(0) << " Y-Achse: " << condens->flSamples[i][1] << "/" << lastCam(1)<< " Z-Achse: " << condens->flSamples[i][2] << "/" << lastCam(2)<< endl;
//writeFile(condens->flSamples[i][0], condens->flSamples[i][1], condens->flSamples[i][2], "particlePos.txt");
}
//cvConDensUpdateByTime(condens);
//Bester Partikel, ist aber keine der Partikelpositionen
//cv::Point3f statePt(condens->State[0], condens->State[1], condens->State[2]);
//newCameraV.push_back(statePt);
//cout << "NeuePose: X-Achse: " << condens->State[0] << "/" << lastCam(0) << " Y-Achse: " << condens->State[1] << "/" << lastCam(1)<< " Z-Achse: " << condens->State[2] << "/" << lastCam(2)<< endl;
}
开发者ID:maxluzius,项目名称:BA,代码行数:59,代码来源:Particle.cpp
示例6: cvCopy
void BazARTracker::show_result(CamAugmentation &augment, IplImage *video, IplImage **dst)
{
if (getDebugMode()){
if (*dst==0) *dst=cvCloneImage(video);
else cvCopy(video, *dst);
}
CvMat *m = augment.GetProjectionMatrix(0);
// Flip...(This occured from OpenGL origin / camera origin)
CvMat *coordinateTrans = cvCreateMat(3, 3, CV_64F);
cvmSetIdentity(coordinateTrans);
cvmSet(coordinateTrans, 1, 1, -1);
cvmSet(coordinateTrans, 1, 2, m_cparam->cparam.ysize);
cvMatMul(coordinateTrans, m, m);
// extract intrinsic camera parameters from bazar's projection matrix..
GetARToolKitRTfromBAZARProjMat(g_matIntrinsic, m, matCameraRT4_4);
cvTranspose(matCameraRT4_4, matCameraRT4_4);
cvReleaseMat(&coordinateTrans);
// Debug
if (getDebugMode()) {
// draw the coordinate system axes
double w =video->width/2.0;
double h =video->height/2.0;
// 3D coordinates of an object
double pts[4][4] = {
{w,h,0, 1}, // 0,0,0,1
{w*2,h,0, 1}, // w, 0
{w,h*2,0, 1}, // 0, h
{w,h,-w-h, 1} // 0, 0, -
};
CvMat ptsMat, projectedMat;
cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
cvInitMatHeader(&projectedMat, 3, 4, CV_64FC1, projected);
cvGEMM(m, &ptsMat, 1, 0, 0, &projectedMat, CV_GEMM_B_T );
for (int i=0; i<4; i++)
{
projected[0][i] /= projected[2][i];
projected[1][i] /= projected[2][i];
}
// draw the projected lines
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][1], (int)projected[1][1]), CV_RGB(255,0,0), 2);
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][2], (int)projected[1][2]), CV_RGB(0,255,0), 2);
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][3], (int)projected[1][3]), CV_RGB(0,0,255), 2);
}
}
开发者ID:soulsheng,项目名称:osgART,代码行数:56,代码来源:BazARTracker.cpp
示例7: augment_scene
static void augment_scene(CalibModel &model, IplImage *frame, IplImage *display)
{
cvCopy(frame, display);
if (!model.detector.object_is_detected)
return;
CvMat *m = model.augm.GetProjectionMatrix(0);
if (!m) return;
double pts[4][4];
double proj[4][4];
CvMat ptsMat, projMat;
cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
cvInitMatHeader(&projMat, 3, 4, CV_64FC1, proj);
for (int i=0; i<4; i++) {
pts[0][i] = model.corners[i].x;
pts[1][i] = model.corners[i].y;
pts[2][i] = 0;
pts[3][i] = 1;
}
cvMatMul(m, &ptsMat, &projMat);
cvReleaseMat(&m);
CvPoint projPts[4];
for (int i=0;i<4; i++) {
projPts[i].x = cvRound(proj[0][i]/proj[2][i]);
projPts[i].y = cvRound(proj[1][i]/proj[2][i]);
}
CvMat *o2w = model.augm.GetObjectToWorld();
float normal[3];
for (int j=0;j<3;j++)
normal[j] = cvGet2D(o2w, j, 2).val[0];
cvReleaseMat(&o2w);
// we want to relight a color present on the model image
// with an irradiance coming from the irradiance map
CvScalar color = cvGet2D(model.image, model.image->height/2, model.image->width/2);
CvScalar irradiance = model.map.readMap(normal);
// the camera has some gain and bias
const float *g = model.map.getGain(0);
const float *b = model.map.getBias(0);
// relight the 3 RGB channels. The bias value expects 0 black 1 white,
// but the image are stored with a white value of 255: Conversion is required.
for (int i=0; i<3; i++) {
color.val[i] = 255.0*(g[i]*(color.val[i]/255.0)*irradiance.val[i] + b[i]);
}
// draw a filled polygon with the relighted color
cvFillConvexPoly(display, projPts, 4, color);
}
开发者ID:emblem,项目名称:aril,代码行数:54,代码来源:fullcalib.cpp
示例8: cvGetDiag
// get column of input array
CV_IMPL CvMat*
cvGetDiag( const CvArr* arr, CvMat* submat, int diag )
{
CvMat* res = 0;
CV_FUNCNAME( "cvGetDiag" );
__BEGIN__;
CvMat stub, *mat = (CvMat*)arr;
int pix_size;
if( !CV_IS_ARR( mat ))
CV_CALL( mat = cvGetMat( mat, &stub ));
if( !submat )
CV_ERROR( CV_StsNullPtr, "" );
pix_size = icvPixSize[CV_ARR_TYPE(mat->type)];
if( diag >= 0 )
{
int len = mat->width - diag;
if( len <= 0 )
CV_ERROR( CV_StsOutOfRange, "" );
len = CV_IMIN( len, mat->height );
CV_CALL( cvInitMatHeader( submat, len, 1, mat->type,
mat->data.ptr + diag*pix_size,
mat->step + pix_size ));
}
else
{
int len = mat->height + diag;
diag = -diag;
if( len <= 0 )
CV_ERROR( CV_StsOutOfRange, "" );
len = CV_IMIN( len, mat->width );
CV_CALL( cvInitMatHeader( submat, len, 1, mat->type,
mat->data.ptr + diag*mat->step,
mat->step + pix_size ));
}
res = submat;
__END__;
return res;
}
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:55,代码来源:cvarray.cpp
示例9: point
/*
Performs a perspective transformation on a single point. That is, for a
point (x, y) and a 3 x 3 matrix T this function returns the point
(u, v), where
[x' y' w']^T = T * [x y 1]^T,
and
(u, v) = (x'/w', y'/w').
Note that affine transforms are a subset of perspective transforms.
@param pt a 2D point
@param T a perspective transformation matrix
@return Returns the point (u, v) as above.
*/
CvPoint2D64f persp_xform_pt( CvPoint2D64f pt, CvMat* T )
{
CvMat XY, UV;
double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };
CvPoint2D64f rslt;
cvInitMatHeader( &XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP );
cvInitMatHeader( &UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP );
cvMatMul( T, &XY, &UV );
rslt = cvPoint2D64f( uv[0] / uv[2], uv[1] / uv[2] );
return rslt;
}
开发者ID:Mizutome,项目名称:Cpp-SIFTICGM,代码行数:31,代码来源:xform.cpp
示例10: persp_xform_pt
/*计算点pt经透视变换后的点,即给定一点pt和透视变换矩阵T,计算变换后的点
给定点(x,y),变换矩阵M,计算[x',y',w']^T = M * [x,y,1]^T(^T表示转置),
则变换后的点是(u,v) = (x'/w', y'/w')
注意:仿射变换是透视变换的特例
参数:
pt:一个二维点
T:透视变换矩阵
返回值:pt经透视变换后的点
*/
CvPoint2D64f persp_xform_pt(CvPoint2D64f pt, CvMat* T)
{
CvMat XY, UV; //XY:点pt对应的3*1列向量,UV:pt变换后的点对应的3*1列向量
double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 }; //对应的数据
CvPoint2D64f rslt; //结果
//初始化矩阵头
cvInitMatHeader(&XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP);
cvInitMatHeader(&UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP);
cvMatMul(T, &XY, &UV); //计算矩阵乘法,T*XY,结果放在UV中
rslt = cvPoint2D64f(uv[0] / uv[2], uv[1] / uv[2]); //得到转换后的点
return rslt;
}
开发者ID:BITDIP,项目名称:BITDIP,代码行数:23,代码来源:xform.c
示例11: interp_contr
/*
Calculates interpolated pixel contrast. Based on Eqn. (3) in Lowe's paper.
@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col
@param Returns interpolated contrast.
*/
double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r,
int c, double xi, double xr, double xc )
{
CvMat* dD, X, T;
double t[1], x[3] = { xc, xr, xi };
cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
dD = deriv_3D( dog_pyr, octv, intvl, r, c );
cvGEMM( dD, &X, 1, NULL, 0, &T, CV_GEMM_A_T );
cvReleaseMat( &dD );
return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
开发者ID:cherubjywh,项目名称:opencv,代码行数:28,代码来源:sift.cpp
示例12: cvCreateMat
void margBlobCorrector::init() {
camera_matrix = cvCreateMat(3, 3, CV_32FC1);
dist_coeffs = cvCreateMat(1, 4, CV_32FC1);
float cam_mat[] = { 0, 0, 0,
0, 0, 0,
0, 0, 1 };
cvInitMatHeader(camera_matrix, 3, 3, CV_32FC1, cam_mat);
float dist_c[] = {0, 0, 0, 0};
cvInitMatHeader(dist_coeffs, 1, 4, CV_32FC1, dist_c);
}
开发者ID:amintz,项目名称:M1.0,代码行数:14,代码来源:margBlobCorrector.cpp
示例13: cvGetSubArr
// get ROI (or minor) of input array
CV_IMPL CvMat*
cvGetSubArr( const CvArr* arr, CvMat* submat, CvRect rect )
{
CvMat* res = 0;
CV_FUNCNAME( "cvGetSubArr" );
__BEGIN__;
CvMat stub, *mat = (CvMat*)arr;
if( !CV_IS_ARR( mat ))
CV_CALL( mat = cvGetMat( mat, &stub ));
if( !submat )
CV_ERROR( CV_StsNullPtr, "" );
if( (rect.x|rect.y|rect.width|rect.height) < 0 )
CV_ERROR( CV_StsBadSize, "" );
if( rect.x + rect.width > mat->width ||
rect.y + rect.height > mat->height )
CV_ERROR( CV_StsBadSize, "" );
CV_CALL( cvInitMatHeader( submat, rect.height, rect.width, mat->type,
mat->data.ptr + rect.y*mat->step +
rect.x*icvPixSize[CV_ARR_TYPE(mat->type)],
mat->step ));
res = submat;
__END__;
return res;
}
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:35,代码来源:cvarray.cpp
示例14: sin
/**
* Calculate pose of the camera. Since no translation are made
* Only the rotation is calculated.
*
* [R|T]
*/
CvMat* MultipleViewGeomOld::calculateRotationMatrix(float angle) {
// | R T |
// | 0 1 |
// 1 0 0 0
// 0 cos() -sin() 0
// 0 sin() cos() 0
// 0 0 0 1
float sinTeta = sin(angle);
float cosTeta = cos(angle);
float a[] = { 1, 0, 0, 0, 0, cosTeta, -sinTeta, 0, 0, sinTeta, cosTeta, 0,
0, 0, 0, 1
};
//CvMat rtMat = cvMat(4, 4, CV_32FC1, a);
//rtMat = *cvCloneMat(&rtMat);
CvMat* rtMat = cvCreateMat(4, 4, CV_32F);
cvInitMatHeader(rtMat, 4, 4, CV_32F, a);
rtMat = cvCloneMat(rtMat);
LOG4CPLUS_DEBUG(myLogger,"Rotation R|T matrix for angle: " << angle << endl << printCvMat(rtMat));
return rtMat;
}
开发者ID:ricleal,项目名称:3dac,代码行数:34,代码来源:MultipleViewGeomOld.cpp
示例15: main
int main(int argc, char* argv[])
{
//创建矩阵 方式1 直接创建
CvMat* pmat1;
pmat1 = cvCreateMat(8, 9, CV_32FC1);
//创建矩阵方式2 先创建矩阵头部 再创建矩阵的数据块的内存空间
CvMat* pmat2;
pmat2 = cvCreateMatHeader(4, 5, CV_8UC1);
cvCreateData(pmat2);
//创建矩阵方式3 通过数据创建矩阵
float data[4] = { 3, 4, 6, 0 };
CvMat pmat3;
cvInitMatHeader(&pmat3, 2, 2, CV_32FC1, data);
//创建矩阵方式4 通过已有矩阵进行克隆
CvMat* pmat4;
pmat4 = cvCloneMat(pmat2);
//访问矩阵的相关属性
test(pmat2);
//释放矩阵的内存空间
cvReleaseMat(&pmat1);
cvReleaseMat(&pmat2);
cvReleaseMat(&pmat4);
return 0;
}
开发者ID:JoyFYan,项目名称:OPENCV,代码行数:32,代码来源:t1.cpp
示例16: printf
// TODO: Would fail if m_nChannels != 3
// RGB to LAB
bool CFeatureExtraction::GetColorChannels(CvMat * pChannels, CvMat * pColorChannelsArr[])
{
printf("\nCFeatureExtraction::GetColorChannels in\n");
int nSize = COLOR_CHANNEL_NUM;
// Convert to LAB color space
IplImage *pLabImg = cvCreateImage(cvSize(m_pSrcImg->width,m_pSrcImg->height), IPL_DEPTH_32F, nSize);
cvCvtColor(m_pSrcImgFloat,pLabImg,CV_BGR2Lab);
// Put the 32F lab image data in a matrix header
CvMat srcMat;
cvInitMatHeader(&srcMat, m_nWidth*m_nHeight, nSize , CV_32F, (float*)pLabImg->imageData );
// This matrix would hold the values represented in the new basis we've found
//CvMat * pResultMat = cvCreateMat( m_nWidth*m_nHeight, nSize , CV_32F );
CvMat * pResultMat = pChannels;
// Actual calculation
DoPCA(&srcMat, pResultMat, nSize, COLOR_CHANNEL_NUM);
// Extracting the 3 primary channels
//GetChannels(pResultMat, pColorChannelsArr, nSize, COLOR_CHANNEL_NUM);
// Useful releasing
cvReleaseImage(&pLabImg);
printf("CFeatureExtraction::GetColorChannels out\n");
return true;
}
开发者ID:altok234,项目名称:cgworkshop,代码行数:29,代码来源:FeatureExtraction.cpp
示例17: cvGetCol
// get column of input array
CV_IMPL CvMat*
cvGetCol( const CvArr* arr, CvMat* submat, int col )
{
CvMat* res = 0;
CV_FUNCNAME( "cvGetCol" );
__BEGIN__;
CvMat stub, *mat = (CvMat*)arr;
if( !CV_IS_ARR( mat ))
CV_CALL( mat = cvGetMat( mat, &stub ));
if( !submat )
CV_ERROR( CV_StsNullPtr, "" );
if( (unsigned)col >= (unsigned)mat->width )
CV_ERROR( CV_StsOutOfRange, "" );
CV_CALL( cvInitMatHeader( submat, mat->height, 1, mat->type,
mat->data.ptr + col*icvPixSize[CV_ARR_TYPE(mat->type)],
mat->step ));
res = submat;
__END__;
return res;
}
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:30,代码来源:cvarray.cpp
示例18: cvInitMatHeader
void margBlobCorrector::setCameraMatrix(float _fX, float _fY, float _cX, float _cY){
float* _camera_matrix = new float[9];
_camera_matrix[0] = _fX;
_camera_matrix[1] = 0;
_camera_matrix[2] = _cX;
_camera_matrix[3] = 0;
_camera_matrix[4] = _fY;
_camera_matrix[5] = _cY;
_camera_matrix[6] = 0;
_camera_matrix[7] = 0;
_camera_matrix[8] = 1;
fX = _fX;
cX = _cX;
fY = _fY;
cY = _cY;
ifX= 1.0/fX;
ifY= 1.0/fY;
cvInitMatHeader(camera_matrix, 3, 3, CV_32FC1, _camera_matrix);
calculateLensUndistBounds();
delete[] _camera_matrix;
}
开发者ID:amintz,项目名称:M1.0,代码行数:27,代码来源:margBlobCorrector.cpp
示例19: cvGetRow
// get row of input array
CV_IMPL CvMat*
cvGetRow( const CvArr* arr, CvMat* submat, int row )
{
CvMat* res = 0;
CV_FUNCNAME( "cvGetRow" );
__BEGIN__;
CvMat stub, *mat = (CvMat*)arr;
if( !CV_IS_ARR( mat ))
CV_CALL( mat = cvGetMat( mat, &stub ));
if( !submat )
CV_ERROR( CV_StsNullPtr, "" );
if( (unsigned)row >= (unsigned)mat->height )
CV_ERROR( CV_StsOutOfRange, "" );
CV_CALL( cvInitMatHeader( submat, 1, mat->width, mat->type,
mat->data.ptr + row*mat->step,
CV_AUTOSTEP ));
res = submat;
__END__;
return res;
}
开发者ID:mikanradojevic,项目名称:sdkpub,代码行数:30,代码来源:cvarray.cpp
示例20: cvCreateMat
/**
* Projects a point in real world coordinates against the image
* Output: image coordinate in pixels
*/
CvPoint MultipleViewGeomOld::getProjectionOf(float angle, CvPoint3D32f point) {
//
map<float, CvMat*>::iterator iter = projMatList.find(angle);
CvMat *projMat = cvCreateMat(4, 3, CV_32FC1);
if (iter == projMatList.end()) {
// project matrix does not exist!!
// Calculate rotation matrix
CvMat* rtMat = calculateRotationMatrix(angle);
// Calculate projection matrix
projMat = calculateProjectionMatrix(rtMat);
projMat = cvCloneMat(projMat);
projMatList.insert(pair<float, CvMat*> (angle, projMat));
} else {
// otherwise it exists
projMat = iter->second;
}
LOG4CPLUS_DEBUG(myLogger,"Projection matrix for angle: " << radToDegree(angle) << " and points: " << point << endl << printCvMat(projMat));
// [u v 1] = proj * [X Y Z 1]
float uvContents[3];
//CvMat* uvMat = cvMat(3, 1, CV_32F, uvContents);
CvMat* uvMat = cvCreateMat(3, 1, CV_32F);
cvInitMatHeader(uvMat, 3, 1, CV_32F, uvContents);
float xyzContents[] = { point.x, point.y, point.z, 1 };
//CvMat* xyzMat = cvMat(4, 1, CV_32F, xyzContents);
CvMat* xyzMat = cvCreateMat(4, 1, CV_32F);
cvInitMatHeader(xyzMat, 4, 1, CV_32F, xyzContents);
cvMatMul (projMat, xyzMat,uvMat);
LOG4CPLUS_DEBUG(myLogger, "Result [u v 1] = proj * [X Y Z 1]: " << endl << printCvMat(uvMat));
return cvPoint(cvRound(cvmGet(uvMat, 0, 0)), cvRound(cvmGet(uvMat, 1, 0)));
}
开发者ID:ricleal,项目名称:3dac,代码行数:51,代码来源:MultipleViewGeomOld.cpp
注:本文中的cvInitMatHeader函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论