EKsumic's Blog

let today = new Beginning();

Click the left button to use the catalog.

OR

[C# learning] How to use OpenCvSharp for feature point recognition? How to use Sift algorithm and Surf algorithm?

The first thing, enter the command in the package management console:

Install-Package OpenCvSharp4.Windows -Version 4.0.0.20190108

Then, use the following two methods:

public static Bitmap MatchPicBySift(Bitmap imgSrc, Bitmap imgSub)
{
    using (Mat matSrc = imgSrc.ToMat())
    using (Mat matTo = imgSub.ToMat())
    using (Mat matSrcRet = new Mat())
    using (Mat matToRet = new Mat())
    {
        KeyPoint[] keyPointsSrc, keyPointsTo;
        using (var sift = OpenCvSharp.XFeatures2D.SIFT.Create())
        {
            sift.DetectAndCompute(matSrc, null, out keyPointsSrc, matSrcRet);
            sift.DetectAndCompute(matTo, null, out keyPointsTo, matToRet);
        }
        using (var bfMatcher = new OpenCvSharp.BFMatcher())
        {
            var matches = bfMatcher.KnnMatch(matSrcRet, matToRet, k: 2);

            var pointsSrc = new List<Point2f>();
            var pointsDst = new List<Point2f>();
            var goodMatches = new List<DMatch>();
            foreach (DMatch[] items in matches.Where(x => x.Length > 1))
            {
                if (items[0].Distance < 0.5 * items[1].Distance)
                {
                    pointsSrc.Add(keyPointsSrc[items[0].QueryIdx].Pt);
                    pointsDst.Add(keyPointsTo[items[0].TrainIdx].Pt);
                    goodMatches.Add(items[0]);
                    Console.WriteLine($"{keyPointsSrc[items[0].QueryIdx].Pt.X}, {keyPointsSrc[items[0].QueryIdx].Pt.Y}");
                }
            }

            var outMat = new Mat();

            // algorithm RANSAC Filter the matched results
            var pSrc = pointsSrc.ConvertAll(Point2fToPoint2d);
            var pDst = pointsDst.ConvertAll(Point2fToPoint2d);
            var outMask = new Mat();
            // If the original matching result is null, Skip the filtering step
            if (pSrc.Count > 0 && pDst.Count > 0)
                Cv2.FindHomography(pSrc, pDst, HomographyMethods.Ransac, mask: outMask);
            // If passed RANSAC After processing, the matching points are more than 10.,Only filters are used. Otherwise, use the original matching point result(When the matching point is too small, it passes through RANSAC After treatment,It is possible to get the result of 0 matching points.).
            if (outMask.Rows > 10)
            {                                              
                byte[] maskBytes = new byte[outMask.Rows * outMask.Cols];
                outMask.GetArray(0, 0, maskBytes);
                Cv2.DrawMatches(matSrc, keyPointsSrc, matTo, keyPointsTo, goodMatches, outMat, matchesMask: maskBytes, flags: DrawMatchesFlags.NotDrawSinglePoints);
            }
            else
                Cv2.DrawMatches(matSrc, keyPointsSrc, matTo, keyPointsTo, goodMatches, outMat, flags: DrawMatchesFlags.NotDrawSinglePoints);
            return OpenCvSharp.Extensions.BitmapConverter.ToBitmap(outMat);
        }
    }
}

//This method may be missed, you may read a lot of blogs, but none of them wrote
private static Point2d Point2fToPoint2d(Point2f input)
{
    Point2d p2 = new Point2d(input.X, input.Y);
    return p2;
}


public static Bitmap MatchPicBySurf(Bitmap imgSrc, Bitmap imgSub, double threshold = 400)
{
    using (Mat matSrc = imgSrc.ToMat())
    using (Mat matTo = imgSub.ToMat())
    using (Mat matSrcRet = new Mat())
    using (Mat matToRet = new Mat())
    {
        KeyPoint[] keyPointsSrc, keyPointsTo;
        using (var surf = OpenCvSharp.XFeatures2D.SURF.Create(threshold, 4, 3, true, true))
        {
            surf.DetectAndCompute(matSrc, null, out keyPointsSrc, matSrcRet);
            surf.DetectAndCompute(matTo, null, out keyPointsTo, matToRet);
        }

        using (var flnMatcher = new OpenCvSharp.FlannBasedMatcher())
        {
            var matches = flnMatcher.Match(matSrcRet, matToRet);
            //Finding the Minimum and Maximum Distance
            double minDistance = 1000;//Backward approximation
            double maxDistance = 0;
            for (int i = 0; i < matSrcRet.Rows; i++)
            {
                double distance = matches[i].Distance;
                if (distance > maxDistance)
                {
                    maxDistance = distance;
                }
                if (distance < minDistance)
                {
                    minDistance = distance;
                }
            }
            Console.WriteLine($"max distance : {maxDistance}");
            Console.WriteLine($"min distance : {minDistance}");

            var pointsSrc = new List<Point2f>();
            var pointsDst = new List<Point2f>();
            //Screening better matching points
            var goodMatches = new List<DMatch>();
            for (int i = 0; i < matSrcRet.Rows; i++)
            {
                double distance = matches[i].Distance;
                if (distance < Math.Max(minDistance * 2, 0.02))
                {
                    pointsSrc.Add(keyPointsSrc[matches[i].QueryIdx].Pt);
                    pointsDst.Add(keyPointsTo[matches[i].TrainIdx].Pt);
                    //Compression of new ones with distances less than ranges DMatch
                    goodMatches.Add(matches[i]);
                }
            }

            var outMat = new Mat();

            // algorithm RANSAC Filter the matched results
            var pSrc = pointsSrc.ConvertAll(Point2fToPoint2d);
            var pDst = pointsDst.ConvertAll(Point2fToPoint2d);
            var outMask = new Mat();
            // If the original matching result is null, Skip the filtering step
            if (pSrc.Count > 0 && pDst.Count > 0)
                Cv2.FindHomography(pSrc, pDst, HomographyMethods.Ransac, mask: outMask);
            // If passed RANSAC After processing, the matching points are more than 10.,Only filters are used. Otherwise, use the original matching point result(When the matching point is too small, it passes through RANSAC After treatment,It's possible to get the result of 0 matching points.).
            if (outMask.Rows > 10)
            {                                               
                byte[] maskBytes = new byte[outMask.Rows * outMask.Cols];
                outMask.GetArray(0,0,maskBytes);
                Cv2.DrawMatches(matSrc, keyPointsSrc, matTo, keyPointsTo, goodMatches, outMat, matchesMask: maskBytes, flags: DrawMatchesFlags.NotDrawSinglePoints);
            }
            else
                Cv2.DrawMatches(matSrc, keyPointsSrc, matTo, keyPointsTo, goodMatches, outMat, flags: DrawMatchesFlags.NotDrawSinglePoints);
            return OpenCvSharp.Extensions.BitmapConverter.ToBitmap(outMat);
        }
    }
}

What is the difference between Sift and Surf?

SIFT works best under the conditions of scale and rotation transformation, SURF works best under brightness changes.

The SIFT algorithm has three main processes for object recognition.
1. Extract key points;
2. Adding detailed information (local features) to key points is the so-called descriptor;
3. Through the pairwise comparison of the two feature points (with the key points of the feature vector), several pairs of matching feature points are found, and the correspondence between the scenes is established.

Surf is improved on the basis of sift, which not only improves the calculation speed, but also makes it safer.  Opencv's SURF algorithm completes the matching of objects in the two images under moderate conditions, basically realizing real-time processing.

The implementation principles of the two are very similar. 

This article was last edited at 2020-11-28 18:14:24

QUY

Thanks you

reply

2022-03-30 10:27:22

* *