• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C# CV.MemStorage类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C#中Emgu.CV.MemStorage的典型用法代码示例。如果您正苦于以下问题:C# MemStorage类的具体用法?C# MemStorage怎么用?C# MemStorage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



MemStorage类属于Emgu.CV命名空间,在下文中一共展示了MemStorage类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。

示例1: FindRectangles

		private void FindRectangles(Image<Gray, Byte> blackAndWhiteImage)
		{
			m_FoundRectangles.Clear();

			using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
			{
				for (Contour<Point> contours = blackAndWhiteImage.FindContours(
					Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
					Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
					storage);
					contours != null;
					contours = contours.HNext)
				{
					Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);
					//Debug.WriteLine(currentContour.Area);

					if (currentContour.Area > 250) //only consider contours with area greater than 250
					{
						if (currentContour.Total == 4) //The contour has 4 vertices.
						{
							if (IsRectangle(currentContour))
							{
								m_FoundRectangles.Add(currentContour.GetMinAreaRect());
							}
						}
					}
				}
			}
		}
开发者ID:eldb2,项目名称:robotic-tic-tac-toe-lynxmotion,代码行数:29,代码来源:MainFormModel.cs


示例2: ContourCoordinates

        public void ContourCoordinates()
        {
            Image<Bgr, Byte> img = this.ShowCamera();
            Image<Gray, Byte> g_img = this.FilterImage(img);
            Image<Gray, Byte> r_img = new Image<Gray, Byte>(new Size(g_img.Width, g_img.Height));
            this.h = g_img.Width;
            this.w = g_img.Height;

            using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
            {
                for (Contour<Point> contours = g_img.FindContours(
                    Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE,
                    Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_CCOMP,
                    storage);
                    contours != null;
                    contours = contours.HNext)
                {
                    Contour<Point> contour = contours.ApproxPoly(contours.Perimeter * 0.0005, storage);

                    Point[] pts = contour.ToArray();
                    LineSegment2D[] edges = PointCollection.PolyLine(pts, false);

                    CvInvoke.cvDrawContours(r_img, contour, new MCvScalar(200), new MCvScalar(0, 200,0), 5, -1, LINE_TYPE.FOUR_CONNECTED, new Point(0, 0));
                    for (int k = 0; k < pts.Length; k++)
                    {
                        //r_img.Draw(new CircleF(pts[k], 2), new Gray(255), 1);
                        this.showimg = r_img;
                        //this.Coord2d.Add(pts[k]);
                        List<Point> p = new List<Point>();
                        p.Add(pts[k]);
                        matrix.Add(p);
                    }
                }
            }
        }
开发者ID:ceverdeen7,项目名称:scanner,代码行数:35,代码来源:iProcess.cs


示例3: HOGDescriptor

        /// <summary>
        /// Create a new HOGDescriptor using the specific parameters
        /// </summary>
        public HOGDescriptor(
            Size winSize,
            Size blockSize,
            Size blockStride,
            Size cellSize,
            int nbins,
            int derivAperture,
            double winSigma,
            double L2HysThreshold,
            bool gammaCorrection)
        {
            _ptr = CvHOGDescriptorCreate(
            ref winSize,
            ref blockSize,
            ref blockStride,
            ref cellSize,
            nbins,
            derivAperture,
            winSigma,
            0,
            L2HysThreshold,
            gammaCorrection);

             _rectStorage = new MemStorage();
             _rectSeq = new Seq<Rectangle>(_rectStorage);
        }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:29,代码来源:HOGDescriptor.cs


示例4: CreateChildMemStorage

 /// <summary>
 /// Creates a child memory storage that is similar to simple memory storage except for the differences in the memory allocation/deallocation mechanism. When a child storage needs a new block to add to the block list, it tries to get this block from the parent. The first unoccupied parent block available is taken and excluded from the parent block list. If no blocks are available, the parent either allocates a block or borrows one from its own parent, if any. In other words, the chain, or a more complex structure, of memory storages where every storage is a child/parent of another is possible. When a child storage is released or even cleared, it returns all blocks to the parent. In other aspects, the child storage is the same as the simple storage
 /// </summary>
 /// <returns>Child MemStorage</returns>
 public MemStorage CreateChildMemStorage()
 {
     IntPtr childStoragePtr = CvInvoke.cvCreateChildMemStorage(_ptr);
      MemStorage childStorage = new MemStorage(childStoragePtr);
      //_childStorageList.Add(childStorage);
      return childStorage;
 }
开发者ID:fajoy,项目名称:RTSPExample,代码行数:11,代码来源:MemStorage.cs


示例5: SignDetector

 public SignDetector(Image<Bgr, Byte> stopSignModel)
 {
     _detector2 = new SURFDetector(500, false);
     using (Image<Gray, Byte> redMask = GetColorPixelMask(stopSignModel))
     {
         try
         {
             _tracker2 = new Features2DTracker<float>(_detector2.DetectFeatures(redMask, null));
         }
         catch { }
     }
     _octagonStorage2 = new MemStorage();
     _octagon2 = new Contour<Point>(_octagonStorage2);
     _octagon2.PushMulti(new Point[] { 
         //hexagon
         new Point(1, 0),
         new Point(2, 0),
         new Point(3, 1),
         new Point(2, 2),
         new Point(1, 2),
         new Point(0, 1)},
         //octagon
     //new Point(1, 0),
     //new Point(2, 0),
     //new Point(3, 1),
     //new Point(3, 2),
     //new Point(2, 3),
     //new Point(1, 3),
     //new Point(0, 2),
     //new Point(0, 1)},
        Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
开发者ID:petrind,项目名称:SRTesis2,代码行数:32,代码来源:SignDetector.cs


示例6: Form1

        public Form1()
        {
            InitializeComponent();

            grabber = new Emgu.CV.Capture("C:/Users/L33549.CITI/Desktop/a.avi");
            grabber.QueryFrame();
            frameWidth = grabber.Width;
            frameHeight = grabber.Height;
            //detector = new AdaptiveSkinDetector(1, AdaptiveSkinDetector.MorphingMethod.NONE);
            hsv_min = new Hsv(0, 45, 0);
            hsv_max = new Hsv(20, 255, 255);
            YCrCb_min = new Ycc(0, 129, 40);
            YCrCb_max = new Ycc(255, 185, 135);
            box = new MCvBox2D();
            ellip = new Ellipse();

            contourStorage = new MemStorage();
            approxStorage = new MemStorage();
            hullStorage = new MemStorage();
            defectsStorage = new MemStorage();

            tipPts = new Point[MAX_POINTS];   // coords of the finger tips
            foldPts = new Point[MAX_POINTS];  // coords of the skin folds between fingers
            depths = new float[MAX_POINTS];   // distances from tips to folds
            cogPt = new Point();
            fingerTips = new List<Point>();
            face = new CascadeClassifier("C:/Users/L33549.CITI/Desktop/AbuseAnalysis/HandGestureRecognition/HandGestureRecognition/HandGestureRecognition/haar/Original/haarcascade_hand.xml");

            Application.Idle += new EventHandler(FrameGrabber);

            /*foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (null != this.sensor)
            {
                // Turn on the color stream to receive color frames
                this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);

                // Allocate space to put the pixels we'll receive
                this.colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];

                // This is the bitmap we'll display on-screen
                this.colorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);

                // Set the image we display to point to the bitmap where we'll put the image data
                //this.Image.Source = this.colorBitmap;

                // Add an event handler to be called whenever there is new color frame data
                this.sensor.ColorFrameReady += this.SensorColorFrameReady;

                // Start the sensor!
                this.sensor.Start();
            }*/
        }
开发者ID:hzhiguang,项目名称:AbuseAnalysis,代码行数:60,代码来源:Form1.cs


示例7: button1_Click

        private void button1_Click(object sender, EventArgs e)
        {
            OpenFileDialog Openfile = new OpenFileDialog();
            if (Openfile.ShowDialog() == DialogResult.OK)
            {
                Image<Bgr, byte> My_Image = new Image<Bgr, byte>(Openfile.FileName);
                Image<Gray, byte> gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> eh_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> smooth_gray_image = My_Image.Convert<Gray, byte>();
                Image<Gray, byte> ed_gray_image = new Image<Gray, byte>(gray_image.Size);
                Image<Bgr, byte> final_image = new Image<Bgr, byte>(Openfile.FileName);
                MemStorage stor = new MemStorage();
                List<MCvBox2D> detectedLicensePlateRegionList = new List<MCvBox2D>();

                CvInvoke.cvEqualizeHist(gray_image, eh_gray_image);
                CvInvoke.cvSmooth(eh_gray_image, smooth_gray_image, Emgu.CV.CvEnum.SMOOTH_TYPE.CV_GAUSSIAN, 3, 3, 0, 0);
                //CvInvoke.cvAdaptiveThreshold(smooth_gray_image, bi_gray_image, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, 71, 1);
                CvInvoke.cvCanny(smooth_gray_image, ed_gray_image, 100, 50, 3);
                Contour<Point> contours = ed_gray_image.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, stor);
                DetectPlate(contours, detectedLicensePlateRegionList);

                for (int i = 0; i < detectedLicensePlateRegionList.Count; i++)
                {
                    final_image.Draw(detectedLicensePlateRegionList[i], new Bgr(Color.Red), 2);
                }
                imageBox1.Image = My_Image;
                imageBox2.Image = gray_image;
                imageBox3.Image = eh_gray_image;
                imageBox4.Image = smooth_gray_image;
                imageBox5.Image = ed_gray_image;
                imageBox6.Image = final_image;
            }
        }
开发者ID:Rokeer,项目名称:Car_Plate_Recognition_Demo_1,代码行数:33,代码来源:Form1.cs


示例8: DetectLicensePlate

        /// <summary>
        /// Detect license plate from the given image
        /// </summary>
        /// <param name="img">The image to search license plate from</param>
        /// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
        /// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
        /// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
        /// <returns>The list of words for each license plate</returns>
        public List<List<Word>> DetectLicensePlate(
            Image<Bgr, byte> img,
            List<Image<Gray, Byte>> licensePlateImagesList,
            List<Image<Gray, Byte>> filteredLicensePlateImagesList,
            List<MCvBox2D> detectedLicensePlateRegionList)
        {
            List<List<Word>> licenses = new List<List<Word>>();

            // Convert image to gray
            using (Image<Gray, byte> gray = img.Convert<Gray, Byte>())

            // Create Canny image
            using (Image<Gray, Byte> canny = new Image<Gray, byte>(gray.Size))

            //Create MemStorage
            using (MemStorage stor = new MemStorage())
            {
                //Convert gray with Canny Algorithm
                CvInvoke.cvCanny(gray, canny, 130, 70, 3);

                //List all Contour
                Contour<Point> contours = canny.FindContours(
                     Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                     Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE,
                     stor);

                //Check Contour
                FindLicensePlate(contours, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
            }

            return licenses;
        }
开发者ID:thuyenvinh,项目名称:qlvx,代码行数:40,代码来源:LicensePlates.cs


示例9: HOGDescriptor

 /// <summary>
 /// Create a new HOGDescriptor
 /// </summary>
 public HOGDescriptor()
 {
    _ptr = CvHOGDescriptorCreateDefault();
    _rectStorage = new MemStorage();
    _rectSeq = new Seq<Rectangle>(_rectStorage);
    _vector = new VectorOfFloat();
 }
开发者ID:Rustemt,项目名称:emgu_openCV,代码行数:10,代码来源:HOGDescriptor.cs


示例10: ArrowSignDetector

 //public Bitmap _grayImg;
 //public Bitmap _canndyImg;
 public ArrowSignDetector()
 {
     _stor = new MemStorage();
     _defaultStor = new MemStorage();
     _tempStor = new MemStorage();
     //   _defaultContour = FindDefault();
 }
开发者ID:theone4ever,项目名称:PCPS,代码行数:9,代码来源:ObjectDetector.cs


示例11: FilterPlate

        private static Image<Gray, Byte> FilterPlate(Image<Gray, Byte> plate)
        {
            Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));

            Image<Gray, Byte> plateMask = new Image<Gray, byte>(plate.Size);
            Image<Gray, Byte> plateCanny = plate.Canny(new Gray(100), new Gray(50));
            MemStorage stor = new MemStorage();
            {
                plateMask.SetValue(255.0);
                for (
                   Contour<Point> contours = plateCanny.FindContours(
                      Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                      Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                      stor);
                   contours != null; contours = contours.HNext)
                {
                    Rectangle rect = contours.BoundingRectangle;
                    if (rect.Height > (plate.Height >> 1))
                    {
                        rect.X -= 1; rect.Y -= 1; rect.Width += 2; rect.Height += 2;
                        rect.Intersect(plate.ROI);

                        plateMask.Draw(rect, new Gray(0.0), -1);
                    }
                }

                thresh.SetValue(0, plateMask);
            }

            thresh._Erode(1);
            thresh._Dilate(1);

            return thresh;
        }
开发者ID:donie81,项目名称:ia-licence-plate-detection,代码行数:34,代码来源:Form1.cs


示例12: HandDetector

        public HandDetector(string hsvFnm, int width, int height)
        {
            Size scale = new Size(width/IMG_SCALE, height/IMG_SCALE);
            scaleImg = new Image<Bgr, Byte>(scale);
            hsvImg = new Image<Hsv, Byte>(scale);
            imgThreshed = new Image<Gray, Byte>(scale);

            // storage for contour, hull, and defect calculations by OpenCV
            contourStorage = new MemStorage();
            approxStorage = new MemStorage();
            hullStorage = new MemStorage();
            defectsStorage = new MemStorage();

            msgFont = new Font("SansSerif", 18, FontStyle.Bold, GraphicsUnit.Pixel);

            cogPt = new Point();
            fingerTips = new List<Point>();
            namedFingers = new List<FingerNameClass.FingerName>();

            tipPts = new Point[MAX_POINTS];   // coords of the finger tips
            foldPts = new Point[MAX_POINTS];  // coords of the skin folds between fingers
            depths = new float[MAX_POINTS];   // distances from tips to folds

            hueLower = 0;
            hueUpper = 20;
            satLower = 50;
            satUpper = 255;
            briLower = 0;
            briUpper = 255;
        }
开发者ID:hzhiguang,项目名称:AbuseAnalysis,代码行数:30,代码来源:HandDetector.cs


示例13: ProcessFrame

      private void ProcessFrame(object sender, EventArgs e)
      {
         using (MemStorage storage = new MemStorage()) //create storage for motion components
         {
            Image<Bgr, Byte> image = _capture.QuerySmallFrame().PyrUp(); //reduce noise from the image
            capturedImageBox.Image = image;

            //update the motion history
            _motionHistory.Update(image.Convert<Gray, Byte>());

            #region get a copy of the motion mask and enhance its color
            Image<Gray, Byte> motionMask = _motionHistory.Mask;
            double[] minValues, maxValues;
            System.Drawing.Point[] minLoc, maxLoc;
            motionMask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc);
            motionMask._Mul(255.0 / maxValues[0]);
            #endregion

            //create the motion image 
            Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size);
            //display the motion pixels in blue (first channel)
            motionImage[0] = motionMask;

            //Threshold to define a motion area, reduce the value to detect smaller motion
            double minArea = 100;

            storage.Clear(); //clear the storage
            Seq<MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage);

            //iterate through each of the motion component
            foreach (MCvConnectedComp comp in motionComponents)
            {
               //reject the components that have small area;
               if (comp.area < minArea) continue;

               // find the angle and motion pixel count of the specific area
               double angle, motionPixelCount;
               _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount);

               //reject the area that contains too few motion
               if (motionPixelCount < comp.area * 0.05) continue;

               //Draw each individual motion in red
               DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red));
            }

            // find and draw the overall motion angle
            double overallAngle, overallMotionPixelCount;
            _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount);
            DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green));

            //Display the amount of motions found on the current image
            UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount));

            //Display the image of the motion
            motionImageBox.Image = motionImage;
         }
      }
开发者ID:AnthonyNystrom,项目名称:Pikling,代码行数:58,代码来源:Form1.cs


示例14: GetModelPoints

 /// <summary>
 /// Get the model points stored in this detector
 /// </summary>
 /// <returns>The model points stored in this detector</returns>
 public MKeyPoint[] GetModelPoints()
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> modelPoints = new Seq<MKeyPoint>(stor);
     CvPlanarObjectDetectorGetModelPoints(_ptr, modelPoints);
     return modelPoints.ToArray();
      }
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:13,代码来源:PlanarObjectDetector.cs


示例15: Detect

 /// <summary>
 /// Detect planar object from the specific image
 /// </summary>
 /// <param name="image">The image where the planar object will be detected</param>
 /// <param name="h">The homography matrix which will be updated</param>
 /// <returns>The four corners of the detected region</returns>
 public PointF[] Detect(Image<Gray, Byte> image, HomographyMatrix h)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<PointF> corners = new Seq<PointF>(stor);
     CvPlanarObjectDetectorDetect(_ptr, image, h, corners);
     return corners.ToArray();
      }
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:15,代码来源:PlanarObjectDetector.cs


示例16: getContours

 private void getContours()
 {
     using (MemStorage store = new MemStorage())
         for (Contour<Point> contours1 = this.img.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, store); contours1 != null; contours1 = contours1.HNext)
         {
             Rectangle r = CvInvoke.cvBoundingRect(contours1, 1);
             this.rects.Add(r);
         }
 }
开发者ID:phong-steph,项目名称:IA_OCR,代码行数:9,代码来源:Class1.cs


示例17: HoughLineTransform

 /// <summary>
 /// Hough Line Transform, as in OpenCV (EmguCv does not wrap this function as it should be)
 /// </summary>
 /// <param name="img">Binary image</param>
 /// <param name="type">type of hough transform</param>
 /// <param name="threshold">how many votes is needed to accept line</param>
 /// <returns>Lines in theta/rho format</returns>
 public static PointF[] HoughLineTransform(Image<Gray, byte> img, Emgu.CV.CvEnum.HOUGH_TYPE type, int threshold)
 {
     using (MemStorage stor = new MemStorage())
     {
         IntPtr linePtr = CvInvoke.cvHoughLines2(img, stor.Ptr, type, 5, Math.PI / 180 * 15, threshold, 0, 0);
         Seq<PointF> seq = new Seq<PointF>(linePtr, stor);
         return seq.ToArray(); ;
     }
 }
开发者ID:rAum,项目名称:auton_net,代码行数:16,代码来源:VisionToolkit.cs


示例18: GetDefaultPeopleDetector

 /// <summary>
 /// Return the default people detector
 /// </summary>
 /// <returns>the default people detector</returns>
 public static float[] GetDefaultPeopleDetector()
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<float> desc = new Seq<float>(stor);
     CvHOGDescriptorPeopleDetectorCreate(desc);
     return desc.ToArray();
      }
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:13,代码来源:HOGDescriptor.cs


示例19: DetectKeyPoints

 /// <summary>
 /// Detect STAR key points from the image
 /// </summary>
 /// <param name="image">The image to extract key points from</param>
 /// <returns>The STAR key points of the image</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
     CvStarDetectorDetectKeyPoints(ref this, image, seq.Ptr);
     return seq.ToArray();
      }
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:14,代码来源:StarDetector.cs


示例20: DetectKeyPoints

 /// <summary>
 /// Detect the Lepetit keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract Lepetit keypoints</param>
 /// <param name="maxCount">The maximum number of keypoints to be extracted</param>
 /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
 /// <returns>The array of Lepetit keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, int maxCount, bool scaleCoords)
 {
     using (MemStorage stor = new MemStorage())
      {
     Seq<MKeyPoint> seq = new Seq<MKeyPoint>(stor);
     CvLDetectorDetectKeyPoints(ref this, image, seq.Ptr, maxCount, scaleCoords);
     return seq.ToArray();
      }
 }
开发者ID:samuto,项目名称:UnityOpenCV,代码行数:16,代码来源:LDetector.cs



注:本文中的Emgu.CV.MemStorage类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C# Structure.MCvTermCriteria类代码示例发布时间:2022-05-24
下一篇:
C# CV.Mat类代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap