• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ VO_Shape类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中VO_Shape的典型用法代码示例。如果您正苦于以下问题:C++ VO_Shape类的具体用法?C++ VO_Shape怎么用?C++ VO_Shape使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VO_Shape类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1:

/**
 * @author     JIA Pei
 * @version    2010-06-07
 * @brief      Constrain all points respetively
 * @param      ioShape     	Input and Output - the input and output shape
*/
void VO_Point2DDistributionModel::VO_ConstrainAllPoints(VO_Shape& ioShape)
{
    unsigned int NbOfPoints = ioShape.GetNbOfPoints();
    Point2f pt;

    for(unsigned int i = 0; i < NbOfPoints; i++)
    {
        pt = ioShape.GetA2DPoint(i);
        VO_Point2DDistributionModel::VO_ConstrainSinglePoint( pt, this->m_VONormalizedEllipses[i] );
        ioShape.SetA2DPoint(pt, i);
    }
}
开发者ID:haifaben,项目名称:vosm,代码行数:18,代码来源:VO_Point2DDistributionModel.cpp


示例2: CalcFaceYaw

float CRecognitionAlgs::CalcFaceYaw(const vector<float>& iLine,
                                    const VO_Shape& iShape,
                                    const VO_FaceParts& iFaceParts)
{
    float yaw = 0.0f;
    int dim = iShape.GetNbOfDim();

    // Theoretically, using eye corner is correct, but it's not stable at all. Therefore, here we use COG_left and COG_right instead.
    ///////////////////////////////////////////////////////////////////////////////
    //     float leftDist = 0.0f, rightDist = 0.0f;    
    //     vector<unsigned int> eyeCornerPoints = iFaceParts.GetEyeCornerPoints().GetIndexes();
    //     Point2f leftmostEyeCorner = Point2f(FLT_MAX, 0.0f);
    //     Point2f rightmostEyeCorner = Point2f(0.0f, 0.0f);
    // 
    //     for(unsigned int i = 0; i < eyeCornerPoints.size(); ++i)
    //     {
    //         if(leftmostEyeCorner.x > iShape.GetAShape(dim*eyeCornerPoints[i]) )
    //         {
    //             leftmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
    //             leftmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
    //         }
    //         if(rightmostEyeCorner.x < iShape.GetAShape(dim*eyeCornerPoints[i]) )
    //         {
    //             rightmostEyeCorner.x = iShape.GetAShape(dim*eyeCornerPoints[i]);
    //             rightmostEyeCorner.y = iShape.GetAShape(dim*eyeCornerPoints[i]+1);
    //         }
    //     }
    //     leftDist = cvDistFromAPoint2ALine2D(leftmostEyeCorner,  iLine);
    //     rightDist = cvDistFromAPoint2ALine2D(rightmostEyeCorner,  iLine);
    //     float r = leftDist/rightDist;
    // Refer to my PhD dissertation. Chapter 4
    //     yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
    ///////////////////////////////////////////////////////////////////////////////

    float leftDist = 0.0f, rightDist = 0.0f;
    vector<unsigned int> leftSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTSIDEPOINTS).GetIndexes();
    vector<unsigned int> rightSidePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTSIDEPOINTS).GetIndexes();
    for(unsigned int i = 0; i < leftSidePoints.size(); ++i)
    {
        leftDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*leftSidePoints[i]), iShape.GetAShape(dim*leftSidePoints[i]+1)),  iLine);
    }
    for(unsigned int i = 0; i < rightSidePoints.size(); ++i)
    {
        rightDist += cvDistFromAPoint2ALine2D(Point2f(iShape.GetAShape(dim*rightSidePoints[i]), iShape.GetAShape(dim*rightSidePoints[i]+1)),  iLine);
    }

    float r = leftDist/rightDist;
    // Refer to my PhD dissertation. Chapter 4
    // yaw = atan ( ( 0.65*(r-1) ) / ( 0.24 * (r+1) ) ) * 180.0f / CV_PI;
    yaw = atan ( ( (r-1) ) / ((r+1) ) ) * safeDoubleToFloat(180.0 / CV_PI);

    return yaw;
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:53,代码来源:VO_RecognitionAlgs.cpp


示例3: PyramidFit

/**
 * @author      YAO Wei, JIA Pei
 * @version     2010-05-20
 * @brief       Find the best offset for one point
 * @param       ioShape     Input and output - the input and output shape
 * @param       iImg        Input - image to be fitted
 * @param       oImages     Output - the output images
 * @param       iLev        Input - current pyramid level
 * @param       PClose      Input - percentage of converged points. Say, 0.9 means if 90% of the points
 *                                  are judged as converged, the iteration of this pyramid can stop
 * @param       epoch       Input - the maximum iteration times
 * @param       profdim     Input - dimension used during fitting. For example, the trained data could be 4D, but the user may only use 1D
 * @note        Refer to "AAM Revisited, page 34, figure 13", particularly, those steps.
*/
void VO_FittingASMNDProfiles::PyramidFit(   VO_Shape& ioShape,
                                            const cv::Mat& iImg,
                                            std::vector<cv::Mat>& oImages,
                                            unsigned int iLev,
                                            float PClose,
                                            unsigned int epoch,
                                            unsigned int profdim,
                                            bool record)
{
    VO_Shape tempShape = ioShape;
    int nGoodLandmarks = 0;
    float PyrScale = pow(2.0f, (float) (iLev) );

    const int nQualifyingDisplacements = (int)(this->m_VOASMNDProfile->m_iNbOfPoints * PClose);

    for(unsigned int iter = 0; iter < epoch; iter++)
    {
        this->m_iIteration++;
        // estimate the best ioShape by profile matching the landmarks in this->m_VOFittingShape
        nGoodLandmarks = VO_FittingASMNDProfiles::UpdateShape(  this->m_VOASMNDProfile,
                                                                iImg,
                                                                tempShape,
                                                                this->m_vShape2DInfo,
                                                                this->m_VOASMNDProfile->m_vvMeanNormalizedProfile[iLev],
                                                                this->m_VOASMNDProfile->m_vvvCVMInverseOfSg[iLev],
                                                                3,
                                                                profdim);

        // conform ioShape to the shape model
        this->m_VOASMNDProfile->VO_CalcAllParams4AnyShapeWithConstrain( tempShape,
                                                                        this->m_MatModelAlignedShapeParam,
                                                                        this->m_fScale,
                                                                        this->m_vRotateAngles,
                                                                        this->m_MatCenterOfGravity );
        tempShape.ConstrainShapeInImage(iImg);

if(record)
{
        // If we get better fitting result, record this fitting result
        cv::Mat temp3 = cv::Mat(this->m_ImageInput.size(), this->m_ImageInput.type(), this->m_ImageInput.channels());
        cv::Mat temp3ROI = temp3(cv::Range (0, (int)(this->m_ImageInput.rows/PyrScale) ), cv::Range (0, (int)(this->m_ImageInput.cols/PyrScale) ) );
        cv::resize(this->m_ImageInput, temp3ROI, temp3ROI.size());
        VO_Fitting2DSM::VO_DrawMesh(tempShape / this->m_fScale2, this->m_VOASMNDProfile, temp3);
        oImages.push_back(temp3);
}

        // the fitting result is good enough to stop the iteration
        if(nGoodLandmarks > nQualifyingDisplacements)
            break;
    }
    ioShape = tempShape;
}
开发者ID:jiapei100,项目名称:VOSM,代码行数:66,代码来源:VO_FittingASMNDProfiles.cpp


示例4: SaveShapeResults

/**
 * @param	fd					- input		folder name
 * @param	fnIdx				- input		fitting result
 * @param	deviation			- input		what is the deviation from refShape to fittedShape
 * @param	ptErrorFreq			- input		for curve to display frequency -- point distance
 * @param	fittedShape			- input		fitting result
 * @return	whether the fitting is acceptable
 */
void CRecognitionAlgs::SaveShapeResults(		const string& fd,
												const string& fnIdx,
												float deviation,
												vector<float>& ptDists,
												vector<float>& ptErrorFreq,
												const VO_Shape& fittedShape)
{
    string fn;
    fn = fd + "/" + fnIdx + ".res";
    
    fstream fp;
    fp.open(fn.c_str (), ios::out);

	fp << "Error per point -- Distance from ground truth" << endl;
	for(unsigned int i = 0; i < ptDists.size(); ++i){
		fp << ptDists[i] << endl;
	}
	fp << endl;

	fp << "Total landmark error" << endl;
	float errSum = std::accumulate(ptDists.begin(),ptDists.end(),0.0f);
	fp << errSum << endl;
	fp <<"Average landmark distance" << endl;
	fp << errSum / ptDists.size() << endl;
	fp << endl;

    fp << "Total Deviation" << endl << deviation << endl;				// deviation
    fp << "Point Error -- Frequency" << endl;
    for(unsigned int i = 0; i < ptErrorFreq.size(); i++)
    {
        fp << ptErrorFreq[i] << " ";
    }
	fp << endl;
	fp << endl;
	fp << "Fitted points" << endl;
	//output actual points along with error frequency
	unsigned int NbOfShapeDim   = fittedShape.GetNbOfDim();
	unsigned int NbOfPoints     = fittedShape.GetNbOfPoints();
	for(unsigned int i = 0; i < NbOfPoints; i++)
	{
		for(unsigned int j = 0; j < NbOfShapeDim; j++)
		{
			fp << fittedShape.GetAShape(j*NbOfPoints+i) << " ";
		}
		fp << endl;
	}
    fp << endl;
	
    fp.close();fp.clear();
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:58,代码来源:VO_RecognitionAlgs.cpp


示例5: ReadASF

/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Read a file and obtain all annotation data in VO_Shape
 * @param       filename    input parameter     -   which .asf annotation file to read
 * @param       oShape      output parameter    -   save annotation data to AAM shape data structure
*/
void CAnnotationDBIO::ReadASF(  const std::string &filename,
                                VO_Shape& oShape )
{
    oShape.SetAnnotationFileName(filename);

    std::fstream fp;
    fp.open(filename.c_str (), std::ios::in);

    std::stringstream ss;
    std::string temp;
    float tempFloat = 0.0f;

    // Just for the specific .asf
    for(unsigned int i = 0; i < 10; i++)
        //fp >> temp;
        std::getline(fp, temp);

    unsigned int NbOfPoints = atoi(temp.c_str ());
    oShape.Resize(2, NbOfPoints);

    // Just for the specific .asf
    for(unsigned int i = 0; i < 6; i++)
        //fp >> temp;
        std::getline(fp, temp);

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp >> temp >> temp >> temp;
        // In DTU IMM , x means rows from left to right
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        oShape(0, i) = tempFloat;
        fp >> temp;
        // In DTU IMM , y means cols from top to bottom
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        //fp >> temp;
        std::getline(fp, temp);
        // In sum, topleft is (0,0), right bottom is (640,480)
        oShape(1, i) = tempFloat;
    }

    // Just for the specific .asf
    for(unsigned int i = 0; i < 5; i++)
        fp >> temp;

    fp.close ();fp.clear();
}
开发者ID:jiapei100,项目名称:VOSM,代码行数:57,代码来源:VO_AnnotationDBIO.cpp


示例6: switch

/**
 * @author      JIA Pei
 * @version     2010-05-07
 * @brief       draw a point on the image
 * @param       iShape          Input -- the input shape
 * @param       theSubshape     Output -- and input, the image drawn with the point
 * @param       iLine           Input -- the line
 * @param       oImg            Output--  output image
 * @param       dir             Input -- direction
 * @param       ws              Input --
 * @param       offset          Input -- add some offset at both ends of the line segment itself
 * @param       ci              Input -- color index
 * @return      void
 */
void VO_Fitting2DSM::VO_DrawAline(  const VO_Shape& iShape,
                                    const VO_Shape& theSubshape,
                                    const std::vector<float>& iLine,
                                    cv::Mat& oImg,
                                    unsigned int dir,
                                    bool ws,
                                    unsigned int offset,
                                    unsigned int ci)
{
    switch(dir)
    {
    case VERTICAL:
    {
        float A = iLine[0];
        float B = iLine[1];
        float C = iLine[2];
        cv::Point2f ptf1, ptf2;
        if(ws)
        {
            ptf1.y = iShape.MinY() - offset;
            ptf2.y = iShape.MaxY() + offset;
        }
        else
        {
            ptf1.y = theSubshape.MinY() - offset;
            ptf2.y = theSubshape.MaxY() + offset;
        }
        ptf1.x = -(C + B*ptf1.y)/A;
        ptf2.x = -(C + B*ptf2.y)/A;
        cv::Point pt1 = cvPointFrom32f( ptf1 );
        cv::Point pt2 = cvPointFrom32f( ptf2 );
        cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
    }
    break;
    case HORIZONTAL:
    default:
    {
        float A = iLine[0];
        float B = iLine[1];
        float C = iLine[2];
        cv::Point2f ptf1, ptf2;
        if(ws)
        {
            ptf1.x = iShape.MinX() - offset;
            ptf2.x = iShape.MaxX() + offset;
        }
        else
        {
            ptf1.x = theSubshape.MinX() - offset;
            ptf2.x = theSubshape.MaxX() + offset;
        }
        ptf1.y = -(C + A*ptf1.x)/B;
        ptf2.y = -(C + A*ptf2.x)/B;
        cv::Point pt1 = cvPointFrom32f( ptf1 );
        cv::Point pt2 = cvPointFrom32f( ptf2 );
        cv::line( oImg, pt1, pt2, colors[ci], 2, 0, 0);
    }
    break;
    }
}
开发者ID:kod3r,项目名称:VOSM,代码行数:74,代码来源:VO_Fitting2DSM.cpp


示例7: EvaluateFaceTrackedByProbabilityImage

/**
* @param    trackalg-   input and output    the track algorithm,
                        will record some information for every frame
* @param    iImg    - input     input image
* @param    iShape  - input     the current tracked shape
* @return   bool    whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByProbabilityImage(
    CTrackingAlgs* trackalg,
    const Mat& iImg,
    const VO_Shape& iShape,
    Size smallSize,
    Size bigSize)
{
    double t = (double)cvGetTickCount();

    Rect rect = iShape.GetShapeBoundRect();

    trackalg->SetConfiguration( CTrackingAlgs::CAMSHIFT,
                                CTrackingAlgs::PROBABILITYIMAGE);
    trackalg->Tracking( rect,
                        iImg,
                        smallSize,
                        bigSize );

    bool res = false;
    if( !trackalg->IsObjectTracked() )
        res = false;
    else if ( ((double)rect.height/(double)rect.width <= 0.75)
        || ((double)rect.height/(double)rect.width >= 2.5) )
        res = false;
    else
        res = true;

    t = ((double)cvGetTickCount() -  t )
        / (cvGetTickFrequency()*1000.);
    cout << "Camshift Tracking time cost: " << t << "millisec" << endl;

    return res;
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:40,代码来源:VO_RecognitionAlgs.cpp


示例8: SplitShapeTextureParams

/**
 * @author      JIA Pei
 * @version     2016-08-24
 * @brief       a pair of shape and texture, respectively decomposed to a shape and a texture
 * @param       iPairShapeTexture   Input - the pair of shape and texture
 * @param       oShapeParams        Output - shape parameters
 * @param       oTextureParams      Output - texture parameters
 * @return      void
*/
void VO_AXM::SplitShapeTextureParams(const std::pair<VO_Shape, VO_Texture>& iPairShapeTexture,
                                     cv::Mat_<float>& oShapeParams,
                                     cv::Mat_<float>& oTextureParams )
{
    VO_Shape iShape = iPairShapeTexture.first;
    VO_Texture iTexture = iPairShapeTexture.second;

    unsigned int NbOfShapeDim = iShape.GetNbOfDim();
    float tempNorm = 0.0f;
    std::vector<float> tempTheta;
    tempTheta.resize(NbOfShapeDim == 2? 1:3);
    cv::Mat_<float> tempCOG = cv::Mat_<float>::zeros(1, NbOfShapeDim);

    this->VO_CalcAllParams4AnyShapeWithConstrain(iShape, oShapeParams, tempNorm, tempTheta, tempCOG);
    this->VO_CalcAllParams4AnyTexture(iTexture, oTextureParams);
}
开发者ID:jiapei100,项目名称:VOSM,代码行数:25,代码来源:VO_AXM.cpp


示例9: VO_CMUInverseCompositional

/**
 * @author      Yao Wei
 * @brief       CMU Inverse Compositional !!
 * @param       - matDeltaP     Input -- deltap
 * @param       - matDeltaQ     Input -- deltaq
 * @param       - s             Input -- the shape
 * @param       - estShape      Output -- newly estimated shape by Inverse compositional
 */
void VO_FittingAAMInverseIA::VO_CMUInverseCompositional(const Mat_<float>& matDeltaP,
                                                        const Mat_<float>& matDeltaQ,
                                                        const VO_Shape& s,
                                                        VO_Shape& estShape)
{
    VO_Shape S0;
    this->VO_PParamQParam2ModelAlignedShape( matDeltaP, matDeltaQ, S0);
//    cvConvertScale(dpq, __inv_pq, -1);
//    __shape.CalcShape(__inv_pq, __update_s0);    // __update_s0 = N.W(s0, -delta_p, -delta_q)

    //Secondly: Composing the Incremental Warp with the Current Warp Estimate.
    Point2f res, tmp;
    int count = 0;
    vector<unsigned int> vertexIdxes;

    for(unsigned int i = 0; i < this->m_VOAAMInverseIA->m_iNbOfPoints; i++)
    {
        res.x = 0.0;    res.y = 0.0;
        count = 0;
        //The only problem with this approach is which triangle do we use?
        //In general there will be several triangles that share the i-th vertex.
        for(unsigned j = 0; j < this->m_VOAAMInverseIA->m_iNbOfTriangles; j++)    // see Figure (11)
        {
            if ( this->m_vTriangle2D[j].HasNode(i) )
            {
                vertexIdxes = this->m_vTriangle2D[j].GetVertexIndexes();

                VO_WarpingPoint::WarpOnePoint(  S0.GetA2DPoint(i),
                                                this->m_vTriangle2D[j], 
                                                tmp,
                                                s.GetA2DPoint(vertexIdxes[0]),
                                                s.GetA2DPoint(vertexIdxes[1]),
                                                s.GetA2DPoint(vertexIdxes[2]) );
                res.x += tmp.x;
                res.y += tmp.y;
                count++;
            }
        }
        // average the result so as to smooth the warp at each vertex
        if(count == 0)
            cerr << "There must be something wrong when CMU Inverse Compositional !" << endl;
        res.x /= count;
        res.y /= count;
        estShape.SetA2DPoint(res, i);
    }
}
开发者ID:cDoru,项目名称:face,代码行数:54,代码来源:VO_FittingAAMInverseIA.cpp


示例10: CalcFaceKeyline

/**
* @brief    Calculate some key lines on the face
* @param    oLine       Output  output those lines
* @param    iShape      Input   the known shape
* @param    iFaceParts  Input   the faceparts
* @param    oSubshape   Output  the output subshape, namely, the line is represented by a VO_Shape
* @param    partIdx     Input   which part is it
* @return   void
 */
void VO_KeyPoint::CalcFaceKeyline(
    std::vector<float>& oLine,
    const VO_Shape& iShape,
    const VO_FaceParts& iFaceParts,
    VO_Shape& oSubshape,
    unsigned int partIdx)
{
    oLine.resize(3);
    int dim = iShape.GetNbOfDim();

    cv::Vec4f line;
    std::vector<unsigned int> linePoints;

    switch(partIdx)
    {
    case VO_FacePart::NOSTRIL:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
        break;
    case VO_FacePart::MOUTHCORNERPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::MOUTHCORNERPOINTS).GetIndexes();
        break;
    case VO_FacePart::PITCHAXISLINEPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::PITCHAXISLINEPOINTS).GetIndexes();
        break;
    case VO_FacePart::EYECORNERPOINTS:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
        break;
    case VO_FacePart::MIDLINEPOINTS:
    default:
        linePoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::EYECORNERPOINTS).GetIndexes();
        break;
    }
    
    oSubshape = iShape.GetSubShape(linePoints);

    // Explained by JIA Pei, some times, there is no linePoints, which means the specified parts are not in one of the database
    if(linePoints.size() >= 2 )
    {
        cv::fitLine( oSubshape.GetTheShape(), line, CV_DIST_L2, 0, 0.001, 0.001 );

        // Ax+By+C = 0
        oLine[0] = -line[1];
        oLine[1] = line[0];
        oLine[2] = line[1]*line[2]-line[0]*line[3];
    }
}
开发者ID:,项目名称:,代码行数:55,代码来源:


示例11: ReadPTS

/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Read a file and obtain all annotation data in VO_Shape
 * @param       filename    input parameter, which .pts annotation file to read
 * @param       oShape      output parameter, save annotation data to AAM shape data structure
*/
void CAnnotationDBIO::ReadPTS(  const std::string &filename,
                                VO_Shape& oShape)
{
    oShape.SetAnnotationFileName(filename);

    std::fstream fp;
    fp.open(filename.c_str (), std::ios::in);

    std::string temp, oneLine;
    std::stringstream ss;
    float tempFloat = 0.0f;

    do
    {
        fp >> temp;
    }while (temp!="n_points:");

    fp >> temp;
    ss << temp;
    unsigned int NbOfPoints;
    ss >> NbOfPoints;
    ss.clear();
    oShape.Resize(2, NbOfPoints);

    fp >> temp;

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp >> temp;
        // x refers to a row from left to right
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        oShape(0, i) = tempFloat;
        fp >> temp;
        // y refers to a col from top to bottom
        ss << temp;
        ss >> tempFloat;
        ss.clear();
        // In sum, topleft is (0,0), right bottom is (720,576)
        oShape(1, i) = tempFloat;
    }

    fp.close ();fp.clear();
}
开发者ID:jiapei100,项目名称:VOSM,代码行数:52,代码来源:VO_AnnotationDBIO.cpp


示例12: CalcShapeFittingEffect

/**
* @brief    Calculate face fitting effect
* @param    refShape    - input     reference shape
* @param    fittedShape - input     fitting result
* @param    deviation   - output    what is the deviation from refShape to fittedShape
* @param    ptErrorFreq - output    point error frequency
* @param    nb          - input     how many evaluation levels that is to be used
* @return   whether the fitting is acceptable
*/
void CRecognitionAlgs::CalcShapeFittingEffect(	const VO_Shape& refShape,
												const VO_Shape& fittedShape,
												float& deviation,
												vector<float>& ptErrorFreq,
												int nb,
												vector<float>* ptErrPerPoint)
{
    assert(refShape.GetNbOfDim() == fittedShape.GetNbOfDim());
	assert(refShape.GetNbOfPoints() == fittedShape.GetNbOfPoints());
    unsigned int NbOfShapeDim   = refShape.GetNbOfDim();
    unsigned int NbOfPoints     = refShape.GetNbOfPoints();
	ptErrorFreq.resize(nb);

	vector<float> ptDists(NbOfPoints, 0.0f);
	
	for(unsigned int i = 0; i < NbOfPoints; i++)
	{
		ptDists[i] = 0.0f;
		for(unsigned int j = 0; j < NbOfShapeDim; j++)
		{
			ptDists[i] += pow(refShape.GetAShape(j*NbOfPoints+i) - fittedShape.GetAShape(j*NbOfPoints+i), 2.0f);
		}
		ptDists[i] = sqrt(ptDists[i]);
	}
	
	ptErrorFreq.resize(nb);
	for(int i = 0; i < nb; i++)
	{
		for (unsigned int j = 0; j < NbOfPoints; j++)
		{
			if (ptDists[j] < i)
			{
				ptErrorFreq[i]++;
			}
		}
		ptErrorFreq[i] /= static_cast<float>(NbOfPoints);
	}
	float sumPtDist = 0.0;
	for(unsigned int i = 0; i<NbOfPoints;++i){
		sumPtDist += ptDists[i];
	}
	printf("Avg ptDists = %f\n",sumPtDist/NbOfPoints);

    deviation = CRecognitionAlgs::ShapeDistance(refShape, fittedShape);
	if(ptErrPerPoint != 0){
		(*ptErrPerPoint) = ptDists;
	}
}
开发者ID:HVisionSensing,项目名称:mc-vosm,代码行数:57,代码来源:VO_RecognitionAlgs.cpp


示例13: WritePTS

/**
 * @author      JIA Pei
 * @version     2010-02-07
 * @brief       Write all annotation data in VO_Shape to a file
 * @param       filename    output parameter, which .pts annotation file to write
 * @param       iAAMShape   input parameter, save annotation data from AAM shape data structure
*/
void CAnnotationDBIO::WritePTS( const std::string &filename,
                                const VO_Shape& iAAMShape)
{
    std::fstream fp;
    fp.open(filename.c_str (), std::ios::out);

    std::string temp, oneLine;
    std::stringstream ss;
    float tempFloat = 0.0f;
    unsigned int NbOfPoints = iAAMShape.GetNbOfPoints();

    fp << "version: 1" << std::endl
    << "n_points: " << NbOfPoints << std::endl
    << "{" << std::endl;

    for (unsigned int i = 0; i < NbOfPoints; i++)
    {
        fp << iAAMShape.GetA2DPoint(i).x << " " << iAAMShape.GetA2DPoint(i).y << std::endl;
    }

    fp << "}" << std::endl << std::endl;

    fp.close ();
}
开发者ID:jiapei100,项目名称:VOSM,代码行数:31,代码来源:VO_AnnotationDBIO.cpp


示例14: iImg

/**
 * @author      JIA Pei
 * @version     2010-02-22
 * @brief       Build wavelet for key points
 * @param       iImg            Input    -- the concerned image
 * @param       theShape        Input    -- the concerned shape
 * @param       ptIdx           Input    -- which point?
 * @param       imgSize         Input    -- the image size
 * @param       mtd             Input    -- LTC method
 * @param       shiftX          Input    -- shift in X direction
 * @param       shiftY          Input    -- shift in Y direction
 * @return      cv::Mat_<float> Output   -- the extracted LTC
 */
void VO_ASMLTCs::VO_LoadLTC4OneAnnotatedPoint(  const cv::Mat& iImg,
                                                const VO_Shape& theShape,
                                                unsigned int ptIdx,
                                                cv::Size imgSize,
                                                VO_Features* vofeatures,
                                                int shiftX,
                                                int shiftY)
{
    cv::Point2f pt                 = theShape.GetA2DPoint(ptIdx);
    pt.x                        += shiftX;
    pt.y                        += shiftY;
    cv::Rect rect                 = VO_ASMLTCs::VO_CalcImagePatchRect(iImg, pt, imgSize);
    cv::Mat imgPatch             = iImg(rect);
    vofeatures->VO_GenerateAllFeatures(imgPatch);
}
开发者ID:,项目名称:,代码行数:28,代码来源:


示例15:

/**
 * @author      JIA Pei
 * @version     2010-05-07
 * @brief       draw a point on the image
 * @param       iShape          Input -- the input shape
 * @param       iAAMModel       Input -- the model
 * @param       ioImg           Input and Output -- the image
 * @return      void
 */
void VO_Fitting2DSM::VO_DrawMesh(const VO_Shape& iShape, const VO_AXM* iModel, cv::Mat& ioImg)
{
    cv::Point iorg,idst;
    std::vector<VO_Edge> edges = iModel->GetEdge();
    unsigned int NbOfEdges = iModel->GetNbOfEdges();

    for (unsigned int i = 0; i < NbOfEdges; i++)
    {
        iorg = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex1() ) );
        idst = cvPointFrom32f( iShape.GetA2DPoint( edges[i].GetIndex2() ) );
        // Edge
        cv::line( ioImg, iorg, idst, colors[8], 1, 0, 0 );
        // Key points
        cv::circle( ioImg, iorg, 2, colors[0], -1, 8, 0 );
        cv::circle( ioImg, idst, 2, colors[0], -1, 8, 0 );
    }
}
开发者ID:kod3r,项目名称:VOSM,代码行数:26,代码来源:VO_Fitting2DSM.cpp


示例16: translation

/**
 * @brief First Estimation of the fitted shape by scaling only
 * @param iShape -- input shape
 * @param rect   -- the rectangle to calculate the scalar
 * @return VO_Shape -- the scaled shape
 */
VO_Shape VO_Fitting2DSM::VO_FirstEstimationByScaling(   const VO_Shape& iShape,
        const cv::Rect& rect )
{
    VO_Shape res = iShape;
    cv::Rect_<float> rect0 = iShape.GetShapeRect();
    float fScaleX = (float)rect.width/rect0.width *0.80;
    float fScaleY = (float)rect.height/rect0.height *0.80;
    res.ScaleX(fScaleX);
    res.ScaleY(fScaleY);
    rect0 = iShape.GetShapeBoundRect();
    cv::Mat_<float> translation = cv::Mat_<float>::zeros(2, 1);
    float centerX = (float)rect.x + (float)rect.width/2.0f;
    float centerY = (float)rect.y + (float)rect.height/2.0f;
    float center0X = (float)rect0.x + (float)rect0.width/2.0f;
    float center0Y = (float)rect0.x + (float)rect0.height/2.0f;
    translation(0,0) = centerX - center0X;
    translation(1,0) = centerY - center0Y;
    res.Translate( translation );
    return res;
}
开发者ID:kod3r,项目名称:VOSM,代码行数:26,代码来源:VO_Fitting2DSM.cpp


示例17: switch

/**
 * @brief       Calculate some key points on the face
 * @param       oPoint      output  point list
 * @param       iShape      input   shape
 * @param       iFaceParts  inut    faceparts
 * @param       ptType      input   point type
 * @return      void
 */
void VO_KeyPoint::CalcFaceKeyPoint( cv::Point2f& oPoint,
                                    const VO_Shape& iShape,
                                    const VO_FaceParts& iFaceParts,
                                    unsigned int ptType)
{
    std::vector<unsigned int> facePartsPoints;
    VO_Shape subiShape;
    // Very very very very important.
    // Explained by JIA Pei.
    // "resize()" is just for resize;
    // it doesn't always set what's already inside the the std::vector to "0"
    // Therefore, clear() is a must before resize().

    switch(ptType)
    {
    case CENTEROFGRAVITY:
        if (iShape.GetNbOfPoints() > 0)
            oPoint = iShape.GetA2DPoint( VO_Shape::CENTER);
        break;
    case LEFTEYELEFTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case LEFTEYERIGHTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case LEFTEYECENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case RIGHTEYELEFTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::LEFTMOST);
            }
        }
        break;
    case RIGHTEYERIGHTCORNER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint(VO_Shape::RIGHTMOST);
            }
        }
        break;
    case RIGHTEYECENTER:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
                oPoint = subiShape.GetA2DPoint( VO_Shape::CENTER);
            }
        }
        break;
    case NOSETIPKEY:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSETIP).GetIndexes();    // Just one point
            if (facePartsPoints.size() == 1)
                oPoint = iShape.GetA2DPoint(facePartsPoints[0]);
        }
        break;
    case NOSTRILLEFT:
        {
            facePartsPoints = iFaceParts.VO_GetOneFacePart(VO_FacePart::NOSTRIL).GetIndexes();
            if (facePartsPoints.size() > 0)
            {
                subiShape = iShape.GetSubShape(facePartsPoints);
//.........这里部分代码省略.........
开发者ID:,项目名称:,代码行数:101,代码来源:


示例18: ConstrainShapeInImage

/**
 * @author     	JIA Pei
 * @version    	2010-05-20
 * @brief      	Basic AAM Fitting, for dynamic image sequence
 * @param      	iImg			Input - image to be fitted
 * @param      	ioShape         Input and Output - the fitted shape
 * @param      	oImg            Output - the fitted image
 * @param		epoch			Input - the iteration epoch
*/
float VO_FittingAAMBasic::VO_BasicAAMFitting(const Mat& iImg,
											VO_Shape& ioShape,
											Mat& oImg,
											unsigned int epoch)
{
	this->m_VOFittingShape.clone(ioShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMBasic);
    this->m_iIteration = 0;

    // Get m_MatModelAlignedShapeParam and m_fScale, m_vRotateAngles, m_MatCenterOfGravity
    this->m_VOAAMBasic->VO_CalcAllParams4AnyShapeWithConstrain(	this->m_VOFittingShape,
																this->m_MatModelAlignedShapeParam,
																this->m_fScale,
																this->m_vRotateAngles,
																this->m_MatCenterOfGravity);
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

	// Get m_MatModelNormalizedTextureParam
    VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
												this->m_ImageProcessing,
												this->m_vTriangle2D,
												this->m_vPointWarpInfo,
												this->m_VOFittingTexture );
	// estimate the texture model parameters
    this->m_VOAAMBasic->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);

    // Calculate m_MatCurrentC
    this->m_VOAAMBasic->VO_SParamTParamProjectToCParam(	this->m_MatModelAlignedShapeParam,
														this->m_MatModelNormalizedTextureParam,
														this->m_MatCurrentC );
    // Set m_MatCurrentT, m_MatDeltaT, m_MatEstimatedT, m_MatDeltaC, m_MatEstimatedC, etc.
	this->m_MatCurrentT 	= Mat_<float>::zeros(this->m_MatCurrentT.size());
	this->m_MatDeltaT 		= Mat_<float>::zeros(this->m_MatDeltaT.size());
	this->m_MatEstimatedT 	= Mat_<float>::zeros(this->m_MatEstimatedT.size());
	this->m_MatDeltaC 		= Mat_<float>::zeros(this->m_MatDeltaC.size());
	this->m_MatEstimatedC 	= Mat_<float>::zeros(this->m_MatEstimatedC.size());
	
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// explained by JIA Pei. 2010-05-20
	// For the first round, this->m_VOFittingShape should not change after calling "VO_CParamTParam2FittingShape"
	// But this is not the case. why?
	// Before calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by 
	// a) assigning m_VOTemplateAlignedShape
	// b) align to the real-size face using detected eyes and mouth
	// c) constrain the shape within the image
	// d) constrain the shape parameters and calculate those rigid transform parameters
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// Estimate m_VOFittingShape and m_VOFittingTexture
	this->VO_CParamTParam2FittingShape(	this->m_MatCurrentC,
										this->m_MatCurrentT,
										this->m_VOModelNormalizedTexture,
										this->m_VOFittingShape,
										this->m_fScale,
										this->m_vRotateAngles,
										this->m_MatCenterOfGravity );
	this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);		// Remember to call ConstrainShapeInImage() whenever you update m_VOFittingShape
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	// When calling VO_CParamTParam2FittingShape, this->m_VOFittingShape is calculated by
	// a) c parameters to reconstruct shape parameters
	// b) shape parameters to reconstruct shape
	// c) align to the real-size face by global shape normalization
	// cout << this->m_VOFittingShape << endl;
	//////////////////////////////////////////////////////////////////////////////////////////////////////
	
	this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
															this->m_VOFittingShape,
															this->m_VOModelNormalizedTexture,
															this->m_VOTextureError);

    do
    {
		float estScale = this->m_fScale;
		vector<float> estRotateAngles = this->m_vRotateAngles;
		Mat_<float> estCOG = this->m_MatCenterOfGravity.clone();
		bool cBetter 	= false;
		bool poseBetter = false;

        /**First shape parameters, c parameters. refer to equation (9.3)
		* Cootes "Statistical Model of Appearance for Computer Vision" */
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMBasic->m_MatRc, -1, Mat(), 0.0, this->m_MatDeltaC, GEMM_2_T);

        // damp -- C
        for(unsigned int i = 0; i < k_values.size(); i++)
        {
            // make damped c prediction
            cv::scaleAdd(this->m_MatDeltaC, k_values[i], this->m_MatCurrentC, this->m_MatEstimatedC);

            // make sure m_MatEstimatedC are constrained
//.........这里部分代码省略.........
开发者ID:haifaben,项目名称:vosm,代码行数:101,代码来源:VO_FittingAAMBasic.cpp


示例19: VO_ICIAAAMFitting

/**
 * @author      JIA Pei
 * @version     2010-05-20
 * @brief       CMU ICIA AAM Fitting, for dynamic image sequence
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and Output - the fitted shape
 * @param       oImg            Output - the fitted image
 * @param       epoch           Input - the iteration epoch
*/
float VO_FittingAAMInverseIA::VO_ICIAAAMFitting(const Mat& iImg,
                                                VO_Shape& ioShape,
                                                Mat& oImg,
                                                unsigned int epoch)
{
    this->m_VOFittingShape.clone(ioShape);
    this->m_VOEstimatedShape.clone(this->m_VOFittingShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMInverseIA);
    this->m_iIteration = 0;

    // Get m_MatCurrentP and m_MatCurrentQ
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithCo 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ VP类代码示例发布时间:2022-05-31
下一篇:
C++ VNode类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap