OpenCvSharp 3入门

分享于 

19分钟阅读

多媒体

  繁體

本文演示了如何将C++代码转换为OpenSharp3代码,可以试验代码的参数,并查找C++文档以获取API的操作。

我考虑了C# wrapper emgu类和OpenCV.NET...但我觉得OpenCvSharp3是最简单的和更好的支持平台。

我必须至少编译https://github.com/shimat/opencvsharp/releases示例代码几次才能获得干净的构建。

OpenVC是一个流行的计算机视觉C++库,它处理图像像素来查找感兴趣的特征,但是,C++是一个非托管代码平台,与C#相比有点笨拙。

使用代码

要运行demo,创建一个新的控制台应用程序,根据需要更改API参数并重新编译并运行,注意,你需要使用Visual Studio nuget manager插件工具来安装OpenCvSharp3库,基本包将包括OpenCv C++ DLL。

/* * More Samples are available at: https://github.com/shimat/opencvsharp/releases
 * Class NameSpace listing is at: http://shimat.github.io/opencvsharp/
*/using System;using System.Collections.Generic;using System.Windows.Forms;using OpenCvSharp;using OpenCvSharp.ML;class Program
{
 staticvoid Main()
 {#region Canny Edge Detection//Mat source = new Mat("../../Images/lena.jpg", ImreadModes.Color);//Mat grayFiltered = new Mat();//Mat ClearEdge = new Mat();//Mat filtered = new Mat();//Mat d = createADiamond();//Mat x = createAXShape();//Mat Canny = new Mat();//Cv2.Canny(source, Canny, 32, 192);//binImage(source, out filtered);////A discussion on erode and dilate is at:////<ahref="http://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html">http://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html</a>//Cv2.Erode(filtered, ClearEdge, x);//Cv2.Dilate(ClearEdge, ClearEdge, x);//Cv2.Erode(ClearEdge, ClearEdge, d);//Cv2.Dilate(ClearEdge, ClearEdge, d);//grayFiltered = Mat.FromImageData(filtered.ToBytes(), ImreadModes.GrayScale);//new Window("source image", source);//new Window("CannyEdge image", Canny);//new Window("filtered image", filtered);//new Window("SharpEdge image", ClearEdge);//new Window("grayFiltered image", grayFiltered);//Cv2.WaitKey();#endregion#region Sobel Edge Detection//Mat dst = new Mat();//Mat grad_x = new Mat(), grad_y = new Mat();//Mat abs_grad_x = new Mat(), abs_grad_y = new Mat();//Mat src_gray = new Mat("../../Images/lena.jpg", ImreadModes.GrayScale);////try to reduce image noises.//Cv2.GaussianBlur(src_gray, src_gray, new Size(3, 3), 1.5);////Gradient X, ddepth is set it to CV_16S to avoid overflow//Cv2.Sobel(src_gray, grad_x, MatType.CV_16U, 1, 0, 3);//Cv2.ConvertScaleAbs(grad_x, abs_grad_x);////Gradient Y//Cv2.Sobel(src_gray, grad_y, MatType.CV_16U, 0, 1, 3);//Cv2.ConvertScaleAbs(grad_y, abs_grad_y);////Total Gradient (approximate)//Cv2.AddWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst);//Cv2.ImShow("Sobel Edge", dst);//Cv2.WaitKey(0);#endregion#region Camcorder Capture//VideoCapture capture;//Mat frame;//capture = new VideoCapture();//frame = new Mat();//capture.Open(0);////Read the video stream//{// Cv2.NamedWindow("Video", WindowMode.AutoSize);// while (true)// {// if (capture.Read(frame))// {// Cv2.ImShow("Video", frame);////press a key to end execution// int c = Cv2.WaitKey(10);// if (c!= -1) { break; }//Assuming image has focus// }// }//}#endregion#region Assigning pixel values//Mat src = new Mat("../../Images/lena.jpg", ImreadModes.Color);////place a green box at the upper left hand corner//for (int i = 10; i <99; i++)// for (int j = 10; j <99; j++)// src.Set<Vec3b>(i, j, new Vec3b(0, 255, 0));//using (new Window("dot image", src2))//{// MessageBox.Show("Depth:" + src.Depth());// MessageBox.Show("Channels:" + src.Channels());// Cv2.WaitKey();//}#endregion#region Erode/Dilate Morphing//Mat src, dst;//src = Cv2.ImRead("../../Images/lena.jpg", ImreadModes.Color);//dst = new Mat();// Create a structuring element//int erosion_size = 6;//Mat element = Cv2.GetStructuringElement(MorphShapes.Cross,// new Size(2 * erosion_size + 1, 2 * erosion_size + 1),// new Point(erosion_size, erosion_size));// Apply erosion"OR" dilation on the image//Cv2.Erode(src, dst, element);//Cv2.Dilate(src, dst, element);//using (new Window("Display window", src))//using (new Window("Result window", dst))// Cv2.WaitKey(0);#endregion#region draw a line////Create black empty images//Mat image = Mat.Zeros(400, 400, MatType.CV_8UC3);////Draw a line//Cv2.Line(image, new Point(15, 20), new Point(375, 375),// new Scalar(255, 128, 0), 2);//using (new Window("Image", image))// Cv2.WaitKey(0);#endregion#region draw a circle////Create black empty images//Mat image = Mat.Zeros(400, 400, MatType.CV_8UC3);////Draw a line//Cv2.Circle(image, new Point(200, 200), 100, new Scalar(255, 128, 0), 2);//using (new Window("Image", image))// Cv2.WaitKey(0);#endregion#region draw a Polygon//Mat src = new Mat(new Size(400, 400), MatType.CV_8U, new Scalar(1));//src.SetTo(Scalar.Black);//List<List<Point>> ListOfListOfPoint = new List<List<Point>>();//List<Point> points = new List<Point>();//points.Add(new Point(100, 100));//points.Add(new Point(100, 300));//points.Add(new Point(300, 300));//points.Add(new Point(300, 100));//ListOfListOfPoint.Add(points);//src.FillPoly(ListOfListOfPoint, Scalar.White);//Cv2.ImShow("Square", src);//Cv2.WaitKey();;#endregion#region draw a text string////Create black empty images//Mat src = Mat.Zeros( 400, 400, MatType.CV_8UC3 );//Cv2.PutText(src,"Hi all...", new Point(50,100),// HersheyFonts.HersheySimplex, 1, new Scalar(0,200,200), 4);//using (new Window("Image", src))// Cv2.WaitKey(0);#endregion#region weighed filter//Mat src = new Mat("../../Images/lena.jpg", ImreadModes.GrayScale);//Mat kernel = new Mat(3, 3, MatType.CV_32F, new Scalar(0));//Mat dst = new Mat();//kernel.Set<float>(1, 1, 5.0f);//kernel.Set<float>(0, 1, -1.0f);//kernel.Set<float>(2, 1, -1.0f);//kernel.Set<float>(1, 0, -1.0f);//kernel.Set<float>(1, 2, -1.0f);//Cv2.Filter2D(src, dst, MatType.CV_32F, kernel);//using (new Window("src image", src))//using (new Window("dst image", dst))//{// Cv2.WaitKey();//}#endregion#region find circles in image//Mat train = new Mat("../../Images/cartoon-train.png", ImreadModes.GrayScale);//CircleSegment[] circles;//Mat dst = new Mat();//Cv2.GaussianBlur(train, dst, new Size(5, 5), 1.5, 1.5);////Note, the minimum distance between concentric circles is 25. Otherwise////false circles are detected as a result of the circle's thickness.//circles = Cv2.HoughCircles(dst, HoughMethods.Gradient, 1, 25, 75, 60, 5, 200);//for (int i = 0; i <circles.Length; i++)//{// Cv2.Circle(dst, circles[i].Center, (int)circles[i].Radius, new Scalar(0), 2);//}//using (new Window("Circles", dst))//{// Cv2.WaitKey();//}#endregion#region Get corners on image//Mat src = new Mat("../../Images/building.jpg", ImreadModes.GrayScale);////Show Edges//Mat edges = getEdges(src, 50);//new Window("Edges", edges);////Corner detection////Get All Processing Images//Mat cross = createACross();//Mat diamond = createADiamond();//Mat square = createASquare();//Mat x = createAXShape();//Mat dst = new Mat();////Dilate with a cross//Cv2.Dilate(src, dst, cross);////Erode with a diamond//Cv2.Erode(dst, dst, diamond);//Mat dst2 = new Mat();////Dilate with a X//Cv2.Dilate(src, dst2, x);////Erode with a square//Cv2.Erode(dst2, dst2, square);////Corners are obtain by differencing the two closed images//Cv2.Absdiff(dst, dst2, dst);//applyThreshold(dst, 45);////The following code Identifies the founded corners by////drawing circle on the src image.//IDTheCorners(dst, src);//new Window("Corner on Image", src);//Cv2.WaitKey();#endregion#region Machine Learning// Translated from C++ article:// http://docs.opencv.org/3.1.0/d1/d73/tutorial_introduction_to_svm.html// Data for visual representationint width = 512, height = 512;
 Mat image = Mat.Zeros(height, width, MatType.CV_8UC3);
 // Set up training data int[] labels = new int[] { 1, -1, -1, -1 };
 float[,] trainingData = new float[,] { { 501, 10 }, { 255, 10 }, { 501, 255 }, { 10, 501 } };
 Mat trainingDataMat = new Mat(4, 2, MatType.CV_32FC1, trainingData);
 Mat labelsMat = new Mat(4, 1, MatType.CV_32SC1, labels);
 // Train the SVM SVM svm = SVM.Create();
 svm.Type = SVM.Types.CSvc;
 svm.KernelType = SVM.KernelTypes.Linear;
 svm.TermCriteria = new TermCriteria(CriteriaType.MaxIter, 100, 1e-6);
 svm.Train(trainingDataMat, SampleTypes.RowSample, labelsMat);
 // Show the decision regions given by the SVM Vec3b green = new Vec3b(0, 255, 0), blue = new Vec3b(255, 0, 0);
 for (int i = 0; i < image.Rows; ++i)
 for (int j = 0; j < image.Cols; ++j)
 {
 Mat sampleMat = new Mat(1, 2, MatType.CV_32F, new float[] { j, i });
 float response = svm.Predict(sampleMat);
 if (response == 1)
 image.Set<Vec3b>(i, j, green);
 elseif (response == -1)
 image.Set<Vec3b>(i, j, blue);
 }
 // Show the training dataint thickness = -1;
 Cv2.Circle(image, new Point(501, 10), 5, Scalar.Black, thickness);
 Cv2.Circle(image, new Point(255, 10), 5, Scalar.White, thickness);
 Cv2.Circle(image, new Point(501, 255), 5, Scalar.White, thickness);
 Cv2.Circle(image, new Point(10, 501), 5, Scalar.White, thickness);
 // Show support vectors thickness = 2;
 Mat sv = svm.GetSupportVectors();
 for (int i = 0; i < sv.Rows; ++i)
 {
 unsafe {
 float* v = (float*)sv.Ptr(i);
 Cv2.Circle(image, new Point((int)v[0], (int)v[1]), 6, Scalar.Gray, thickness);
 Console.WriteLine("{0:d}, {1:d}", (int)v[0], (int)v[1]);
 }
 }
 Cv2.ImWrite("result.png", image);
 // save the image Cv2.ImShow("SVM Simple Example", image); // show it to the user Cv2.WaitKey(0);#endregion#region Feature SURF flann////Token from C++ example:////http://docs.opencv.org/3.1.0/d5/d6f/tutorial_feature_flann_matcher.html////As of the writing of this routine,////SIFT and SURF is a non-free code and is moved to the////contrib repository and then link to the xfeatures2d library.////So, you will get runtime error:////Unable to find an entry point named 'xfeatures2d_SURF_create'////in DLL 'OpenCvSharpExtern'.////See these 2 links for an method on how to install the contrib library,////it's a bit trick but do-able:////https://github.com/shimat/opencvsharp/issues/146////https://github.com/shimat/opencvsharp/issues/180//Mat img_1 = Cv2.ImRead("../../Images/icons.png", ImreadModes.GrayScale);//Mat img_2 = Cv2.ImRead("../../Images/subIcons.png", ImreadModes.GrayScale);////-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors//int minHessian = 400;//SURF detector = SURF.Create(minHessian);//KeyPoint[] keypoints_1, keypoints_2;//Mat descriptors_1 = new Mat(), descriptors_2 = new Mat();//detector.DetectAndCompute(img_1, new Mat(), out keypoints_1, descriptors_1);//detector.DetectAndCompute(img_2, new Mat(), out keypoints_2, descriptors_2);////-- Step 2: Matching descriptor vectors using FLANN matcher//FlannBasedMatcher matcher = new FlannBasedMatcher();//DMatch[] matches;//matches = matcher.Match(descriptors_1, descriptors_2);//double max_dist = 0; double min_dist = 100;////-- Quick calculation of max and min distances between keypoints//for (int i = 0; i <descriptors_1.Rows; i++)//{// double dist = matches[i].Distance;// if (dist <min_dist) min_dist = dist;// if (dist> max_dist) max_dist = dist;//}//Console.WriteLine("-- Max dist : %f", max_dist);//Console.WriteLine("-- Min dist : %f", min_dist);////-- Draw only"good" matches (i.e. whose distance is less than 2*min_dist,////-- or a small arbitrary value ( 0.02 ) in the event that min_dist is very////-- small)//-- PS.- radiusMatch can also be used here.//List<DMatch> good_matches = new List<DMatch>();//for (int i = 0; i <descriptors_1.Rows; i++)//{// if (matches[i].Distance <= Math.Max(2 * min_dist, 0.02))// {// good_matches.Add(matches[i]);// }//}////-- Draw only"good" matches//Mat img_matches = new Mat();//Cv2.DrawMatches(img_1, keypoints_1, img_2, keypoints_2,// good_matches, img_matches, Scalar.All(-1), Scalar.All(-1),// new List<byte>(), DrawMatchesFlags.NotDrawSinglePoints);////-- Show detected matches imshow("Good Matches", img_matches );//for (int i = 0; i <(int)good_matches.Count; i++)//{// Console.WriteLine("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d", i,// good_matches[i].QueryIdx, good_matches[i].TrainIdx);//}//Cv2.WaitKey(0);#endregion }
 /***********************************************************************
 ************************** SUPPORT ROUTINES ***************************
 ***********************************************************************/#region Support code for Get Corners on image regionstatic Mat getEdges(Mat image, int threshold)
 {
 // Get the gradient image Mat result = new Mat();
 Cv2.MorphologyEx(image, result, MorphTypes.Gradient, new Mat());
 applyThreshold(result, threshold);
 return result;
 }
 static Mat createACross()
 {
 Mat cross = new Mat(5, 5, MatType.CV_8U, new Scalar(0));
 // creating the cross-shaped structuring elementfor (int i = 0; i <5; i++)
 {
 cross.Set<byte>(2, i, 1);
 cross.Set<byte>(i, 2, 1);
 }
 return cross;
 }
 static Mat createADiamond()
 {
 Mat diamond = new Mat(5, 5, MatType.CV_8U, new Scalar(1));
 // Creating the diamond-shaped structuring element diamond.Set<byte>(0, 0, 0);
 diamond.Set<byte>(1, 0, 0);
 diamond.Set<byte>(3, 0, 0);
 diamond.Set<byte>(4, 0, 0);
 diamond.Set<byte>(0, 1, 0);
 diamond.Set<byte>(4, 1, 0);
 diamond.Set<byte>(0, 3, 0);
 diamond.Set<byte>(4, 3, 0);
 diamond.Set<byte>(4, 4, 0);
 diamond.Set<byte>(0, 4, 0);
 diamond.Set<byte>(1, 4, 0);
 diamond.Set<byte>(3, 4, 0);
 return diamond;
 }
 static Mat createASquare()
 {
 Mat Square = new Mat(5, 5, MatType.CV_8U, new Scalar(1));
 return Square;
 }
 static Mat createAXShape()
 {
 Mat x = new Mat(5, 5, MatType.CV_8U, new Scalar(0));
 // Creating the x-shaped structuring elementfor (int i = 0; i <5; i++)
 {
 x.Set<byte>(i, i, 1);
 x.Set<byte>(4 - i, i, 1);
 }
 return x;
 }
 staticvoid applyThreshold(Mat result, int threshold)
 {
 Cv2.Threshold(result, result, threshold, 255, ThresholdTypes.Binary);
 }
 staticvoid IDTheCorners(Mat binary, Mat image)
 {
 for (int r = 0; r < binary.Rows; r++)
 for (int c = 0; c < binary.Cols; c++)
 if (binary.At<byte>(r, c)!= 0)
 Cv2.Circle(image, c, r, 5, new Scalar(255));
 }
 #endregion#region nearest palette color averaging///<summary>//////</summary>///<paramname="src">input image</param>///<paramname="dst">output image</param>///<paramname="binSize">color channel size./// i.e., Number of Red shades = floor(255/binSize),/// where binSize is between 1 and 128.</param>staticvoid binImage(Mat src, out Mat dst, int binSize = 51)
 {
 dst = src.Clone();
 if (binSize <= 0 || binSize >255) return;
 for (int r = 0; r<src.Rows; r++)
 for (int c = 0; c < src.Cols; c++)
 {
 Vec3b color = src.Get<Vec3b>(r, c);
 int binCenter = binSize/2;
 dst.Set<Vec3b>(r, c, new Vec3b(
 // nearest color, note the size of the black and white// bins are only half the size of the other bins.// Note rounding colors to the nearest color palette// can"sharply change" a gradient color in a region. (byte)(((color[0] + binCenter)/binSize) * binSize),
 (byte)(((color[1] + binCenter)/binSize) * binSize),
 (byte)(((color[2] + binCenter)/binSize) * binSize)
 ));
 }
 }
 #endregion}

STA  GET  Opencv  Opencvsharp  
相关文章