MENU

数字图像处理实验代码

April 16, 2019 • Read: 242 • 专业阅读设置

//
// Created by XQ on 2019-03-28.
//

#include<iostream>
#include<string>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;


bool loadImage(string &, int);
bool binaryImage(string &, int flag = 1, int r = 80);
bool logTransformImage(string &, int flag = 1, double r = 5);
bool gammaImage(string &, int flag = 1, double r = 1.2);
bool complementColorImage(string &, int flag = 1);
/*int main(){
    string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    cout << loadImage(str,1) << endl;
    cout << binaryImage(str,0) << endl;
    cout << logTransformImage(str,0) <<endl;
    cout << gammaImage(str,0) << endl;
    cout << complementColorImage(str) << endl;
}*/

/*利用 OpenCV 读取图像。
 *
 * 具体内容
 *  用打开 OpenCV 打开图像,并在窗口中显示
 *
 * */
bool loadImage(string &src, int flag = 1){
    Mat img = imread(src, flag);
    imshow("simpleOpenImage",img);
    waitKey(0);
    destroyAllWindows();
    return true;
}

/*灰度图像二值化处理
 *
 * 具体内容
 *  设置并调整阈值对图像进行二值化处理。
 *
 * */
bool binaryImage(string &src, int flag, int r){
    Mat image, res;
    image = imread(src, flag);
    imshow("inputImage", image);

    //用于二值化的图像
    res = image.clone();
    int rows = res.rows;
    int cols = res.cols;

    //二值化
    for (int i = 0; i < rows; i++){
        for (int j = 0; j < cols; j++){
            auto gray = res.at<uchar>(i, j);
            if (gray > r)   gray = 255;
            else    gray = 0;
            res.at<uchar>(i, j) = saturate_cast<uchar>(gray);
        }
    }
    imshow("binaryImage", res);
    waitKey(0);
    destroyAllWindows();
    return true;

}

/*灰度图像的对数变换
 *
 * 具体内容:
 *  设置并调整 r 值对图像进行对数变换。
 *
 * */
bool logTransformImage(string &src, int flag, double r){
    Mat image, res;
    image = imread(src, flag);
    imshow("inputImage", image);

    //用于对数变换
    res = image.clone();
    int rows = res.rows;
    int cols = res.cols;

    //对数变换
    for (int i = 0; i < rows; i++){
        for (int j = 0; j < cols; j++){
            auto gray = (double)res.at<uchar>(i, j);
            gray = r * log((double)(1 + gray));
            res.at<uchar>(i, j) = saturate_cast<uchar>(gray);
        }
    }
    normalize(res, res, 0, 255, NORM_MINMAX);
    convertScaleAbs(res, res);
    imshow("logTransformImage", res);
    waitKey(0);
    destroyAllWindows();
    return true;
}

/*灰度图像的伽马变换
 *
 * 具体内容:
 *  设置并调整γ值对图像进行伽马变换。
 *
 * */
bool gammaImage(string &src, int flag, double r){
    Mat image, res;
    image = imread(src, flag);
    imshow("inputImage", image);

    //用于伽马变换的图像
    res = image.clone();
    int rows = res.rows;
    int cols = res.cols;

    //伽马变换
    for (int i = 0; i < rows; i++){
        for (int j = 0; j < cols; j++){
            auto gray = (double)res.at<uchar>(i, j);
            int a = 1;
            gray = a * pow(gray, r);
            res.at<uchar>(i, j) = saturate_cast<uchar>(gray);
        }
    }
    normalize(res, res, 0, 255, NORM_MINMAX);
    convertScaleAbs(res, res);
    imshow("gammaImage", res);
    waitKey(0);
    destroyAllWindows();
    return true;
}

/*彩色图像的补色变换
 *
 * 具体内容:
 *  对彩色图像进行补色变换。
 *
 * */
bool complementColorImage(string &src, int flag){
    Mat image, res;
    image = imread(src, flag);
    imshow("inputImage", image);

    //用于补色变换的图像
    res = image.clone();

    //补色变换
    Vec3b pixel, temp;

    for(int i = 0; i < image.rows; i++){
        for(int j = 0; j < image.cols; j++){
            pixel = image.at<Vec3b>(i, j);          //RGB->CMY

            /*
            pixel里的三个通道是BGR,其补色是CMY色域的,变换关系如下:
            C=255-R;
            M=255-G;
            Y=255-B;
            */

            temp[0] = 255 - pixel[2];       //C=255-R;
            temp[1] = 255 - pixel[1];       //M=255-G;
            temp[2] = 255 - pixel[0];       //Y=255-B;

            res.at <Vec3b>(i, j) = temp;

        }
    }
    imshow("complementColorImage", res);
    waitKey(0);
    destroyAllWindows();
    return true;

}
//
// Created by XQ on 2019-03-28.
//
#include<iostream>
#include<string>
#include<cmath>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;



bool normalizeHistogramImage(string &, int flag = 0);
bool equalizeHistogramImage(string &,int flag = 0);
Mat equalizeHistogram(Mat&);
bool HisRGB(Mat &);
bool HisGray(Mat &);
bool His2D(Mat &);


/*int main(){
    string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    cout << "1.normalizeHistogramImage:" << normalizeHistogramImage(str) << endl;
    cout << "2.equalizeHistogramImage-gray:" << equalizeHistogramImage(str,0) << endl;;
    cout << "3.equalizeHistogramImage-color:" << equalizeHistogramImage(str,1) << endl;

}*/

/*计算灰度图像的归一化直方图
 *
 * 具体内容:
 *  利用 OpenCV 对图像像素进行操作,计算归一化直方图.并在窗口中以图形的方式显示出来
 *
 * */
bool normalizeHistogramImage(string &src, int flag){
    Mat image = imread(src,flag);
    HisGray(image);

    image = imread(src,1);
    HisRGB(image);
    His2D(image);

    return true;
}

/*图像直方图均衡处理
 *
 * 具体内容:
 *  通过计算归一化直方图,设计算法实现直方图均衡化处理.
 *  在灰度图像直方图均衡处理的基础上实现彩色直方图均衡处理。
 *
 * */
bool equalizeHistogramImage(string &src,int flag){

    if(flag == 0){
        Mat image = imread(src,flag);
        imshow("input", image);

        Mat res = equalizeHistogram(image);
        imshow("equalizeHistogramImageGray", res);
        waitKey(0);
        destroyAllWindows();
        HisGray(res);
    } else if (flag == 1){
        Mat image = imread(src,flag);
        imshow("input", image);
        Mat res;
        vector<Mat> t;
        split(image, t);
        for (int i = 0; i < 3; i++){
            t[i] = equalizeHistogram(t[i]);
        }
        merge(t, res);  //对RGB三通道各自均衡化后,再组合输出结果

        imshow("equalizeHistogramImageColor", res);
        waitKey(0);
        destroyAllWindows();
        HisRGB(res);
    }
    return true;
}


/*函数*/
Mat equalizeHistogram(Mat& input){
    Mat res = input.clone();
    int gray[256] = {0}; //记录每个灰度级别下的像素个数

    double gray_prob[256] = {0}; //记录灰度密度
    double gray_count[256] = {0}; //记录累计密度

    int gray_equalized[256] = {0}; //均衡化后的灰度值
    int gray_sum = res.rows * res.cols; //像素总数

    for(int i = 0; i < res.rows; i++){
        auto * p = res.ptr<uchar>(i);
        for(int j = 0; j < res.cols; j++){
            //统计每个灰度下的像素个数
            gray[p[j]]++;
        }
    }

    for(int i = 0; i < 256; i++){
        //统计灰度频率
        gray_prob[i] = (double)gray[i]/gray_sum;
    }

    gray_count[0] = gray_prob[0];
    for(int i = 1; i < 256; i++){
        //计算累计密度
        gray_count[i] = gray_count[i-1] + gray_prob[i];
    }
    for (int i = 0; i < 256; i++){
        //重新计算均衡化后的灰度值,四舍五入。参考公式:(N-1)*T+0.5
        gray_equalized[i] = (int)(gray_count[i] * 255 + 0.5);
    }

    for(int i = 0; i < res.rows; i++){
        auto * p = res.ptr<uchar>(i);
        for(int j = 0; j < res.cols; j++){
            //直方图均衡化,更新原图每个点的像素值
            p[j] = gray_equalized[p[j]];
        }
    }
    return res;
}

bool HisRGB(Mat &image)
{
    imshow("input", image);
    int bins = 256;

    int hist_size[] = { bins };
    float range[] = { 0, 256 };
    const float* ranges[] = { range };

    MatND hist_r, hist_g, hist_b;
    int channels_r[] = { 0 };
    calcHist(&image, 1, channels_r, Mat(), // do not use mask
             hist_r, 1, hist_size, ranges,
             true, // the histogram is uniform
             false);

    int channels_g[] = { 1 };
    calcHist(&image, 1, channels_g, Mat(), // do not use mask
             hist_g, 1, hist_size, ranges,
             true, // the histogram is uniform
             false);

    int channels_b[] = { 2 };
    calcHist(&image, 1, channels_b, Mat(), // do not use mask
             hist_b, 1, hist_size, ranges,
             true, // the histogram is uniform
             false);

    double max_val_r, max_val_g, max_val_b;
    minMaxLoc(hist_r, 0, &max_val_r, 0, 0);
    minMaxLoc(hist_g, 0, &max_val_g, 0, 0);
    minMaxLoc(hist_b, 0, &max_val_b, 0, 0);
    int scale = 1;

    int hist_height = 256;
    Mat colorHis = Mat::zeros(hist_height, bins * 3, CV_8UC3);
    for (int i = 0; i < bins; i++)
    {
        float bin_val_r = hist_r.at<float>(i);
        float bin_val_g = hist_g.at<float>(i);
        float bin_val_b = hist_b.at<float>(i);
        int intensity_r = cvRound(bin_val_r*hist_height / max_val_r);  //要绘制的高度
        int intensity_g = cvRound(bin_val_g*hist_height / max_val_g);  //要绘制的高度
        int intensity_b = cvRound(bin_val_b*hist_height / max_val_b);  //要绘制的高度
        rectangle(colorHis, Point(i*scale, hist_height - 1),
                  Point((i + 1)*scale - 1, hist_height - intensity_r),
                  CV_RGB(255, 0, 0));

        rectangle(colorHis, Point((i + bins)*scale, hist_height - 1),
                  Point((i + bins + 1)*scale - 1, hist_height - intensity_g),
                  CV_RGB(0, 255, 0));

        rectangle(colorHis, Point((i + bins * 2)*scale, hist_height - 1),
                  Point((i + bins * 2 + 1)*scale - 1, hist_height - intensity_b),
                  CV_RGB(0, 0, 255));

    }
    namedWindow("HisRGB", WINDOW_AUTOSIZE); // Create a window for display.
    imshow("HisRGB", colorHis);
    waitKey(0);
    destroyAllWindows();
    return true;

}

bool HisGray(Mat &image){

    imshow("input", image);
    int bins = 256;

    int hist_size[] = { bins };
    float range[] = { 0, 256 };
    const float* ranges[] = { range };

    MatND hist;
    int channels[] = { 0 };
    calcHist(&image, 1, channels, Mat(), // do not use mask
             hist, 1, hist_size, ranges,
             true, // the histogram is uniform
             false);

    double max_val;
    minMaxLoc(hist, 0, &max_val, 0, 0);

    int scale = 2;
    int hist_height = 256;
    Mat hist_img = Mat::zeros(hist_height, bins*scale, CV_8UC3);
    for (int i = 0; i < bins; i++)
    {
        float bin_val = hist.at<float>(i);
        int intensity = cvRound(bin_val*hist_height / max_val);  //要绘制的高度
        rectangle(hist_img, Point(i*scale, hist_height - 1),
                  Point((i + 1)*scale - 1, hist_height - intensity),
                  CV_RGB(255, 255, 255));
    }

    imshow("HistogramGray", hist_img);
    waitKey(0);
    destroyAllWindows();
    return true;
}

bool His2D(Mat &image){
    imshow("input", image);

    int hbins = 256, sbins = 256;
    int histSize[] = { hbins, sbins };

    float hranges[] = { 0, 256 };
    float sranges[] = { 0, 256 };
    const float* ranges[] = { hranges, sranges };
    MatND hist;

    int channels[] = { 0, 1 };
    calcHist(&image, 1, channels, Mat(), // do not use mask
             hist, 2, histSize, ranges,
             true, // the histogram is uniform
             false);
    double maxVal = 0;
    minMaxLoc(hist, 0, &maxVal, 0, 0);
    int scale = 2;
    Mat histImg = Mat::zeros(sbins*scale, hbins*scale, CV_8UC3);
    for (int h = 0; h < hbins; h++)
        for (int s = 0; s < sbins; s++)
        {
            float binVal = hist.at<float>(h, s);
            int intensity = cvRound(binVal * 255 / maxVal);
            rectangle(histImg, Point(h*scale, s*scale),
                      Point((h + 1)*scale - 1, (s + 1)*scale - 1),
                      Scalar::all(intensity),
                      FILLED);
        }
    imshow("His2D", histImg);
    waitKey(0);
    destroyAllWindows();
    return true;
}


//
// Created by XQ on 2019-03-28.
//

#include<iostream>
#include<string>


#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;


bool Filter(string &, int, int, int, string);
bool enhanceFilter(string &, int flag = 0);

/*int main(){
    string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    cout << "3*3:" << Filter(str, 3, 3, 0, "mean") << endl;
    cout << "5*5:" << Filter(str, 5, 5, 0, "mean") << endl;
    cout << "9*9:" << Filter(str, 9, 9, 0, "mean") << endl;
    cout << "3*3:" << Filter(str, 3, 3, 0, "Gaussi") << endl;
    cout << "5*5:" << Filter(str, 5, 5, 0, "Gaussi") << endl;
    cout << "9*9:" << Filter(str, 9, 9, 0, "Gaussi") << endl;
    cout << "Laplacian:" << Filter(str, 0, 0, 0, "Laplacian") << endl;
    cout << "Robert:" << Filter(str, 0, 0, 0, "Robert") << endl;
    cout << "Sobel:" << Filter(str, 0, 0, 0, "Sobel") << endl;

    cout << "enhanceFilter:" << enhanceFilter(str)<< endl;
    cout << "enhanceFilter:" << enhanceFilter(str,1)<< endl;

    cout << "3*3:" << Filter(str, 3, 3, 1, "mean") << endl;
    cout << "5*5:" << Filter(str, 5, 5, 1, "mean") << endl;
    cout << "9*9:" << Filter(str, 9, 9, 1, "mean") << endl;
    cout << "3*3:" << Filter(str, 3, 3, 1, "Gaussi") << endl;
    cout << "5*5:" << Filter(str, 5, 5, 1, "Gaussi") << endl;
    cout << "9*9:" << Filter(str, 9, 9, 1, "Gaussi") << endl;
    cout << "Laplacian:" << Filter(str, 0, 0, 1, "Laplacian") << endl;
    cout << "Robert:" << Filter(str, 0, 0, 1, "Robert") << endl;
    cout << "Sobel:" << Filter(str, 0, 0, 1, "Sobel") << endl;

}*/






/*
 * 利用均值模板平滑灰度图像。
 *  具体内容:利用 OpenCV 对图像像素进行操作,分别利用 3*3、5*5 和 9*9 尺寸的均值模板平滑灰度图像。
 * 利用高斯模板平滑灰度图像。
 *  具体内容:利用 OpenCV 对图像像素进行操作,分别利用 3*3、5*5 和 9*9 尺寸的高斯模板平滑灰度图像。
 * 利用 Laplacian、Robert、Sobel 模板锐化灰度图像。
 *  具体内容:利用 OpenCV 对图像像素进行操作,分别利用 Laplacian、Robert、 Sobel 模板锐化灰度图像。
 * 利用高提升滤波算法增强灰度图像。
 *  具体内容:利用 OpenCV 对图像像素进行操作,设计高提升滤波算法增 强图像。
 * 利用均值模板平滑彩色图像。
 *  具体内容:利用 OpenCV 分别对图像像素的 RGB 三个通道进行操作,利 用 3*3、5*5 和 9*9 尺寸的均值模板平滑彩色图像。
 * 利用高斯模板平滑彩色图像。
 *  具体内容:利用 OpenCV 分别对图像像素的 RGB 三个通道进行操作,分 别利用 3*3、5*5 和 9*9 尺寸的高斯模板平滑彩色图像。
 * 利用 Laplacian、Robert、Sobel 模板锐化彩色图像
 *  具体内容:利用 OpenCV 分别对图像像素的 RGB 三个通道进行操作,分 别利用 Laplacian、Robert、Sobel 模板锐化彩色图像。
 * */
bool Filter(String &src, int a, int b, int flag, string model){
    Mat image = imread(src,flag);
    imshow("input", image);
    Mat res = image.clone();
    if(model == "mean"){
        blur(image, res, Size(a,b));
    } else if (model == "Gaussi"){
        GaussianBlur(image, res, Size(a,b),1);
    } else if (model == "Laplacian"){
        Mat kernel = (Mat_<float>(3, 3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
        filter2D(image, res, image.depth(), kernel);
    } else if (model == "Robert"){
        if(flag == 0){
            for (int i = 0; i < image.rows - 1; i++) {
                for (int j = 0; j < image.cols - 1; j++) {
                    //根据公式计算
                    int t1 = (image.at<uchar>(i, j) -
                              image.at<uchar>(i + 1, j + 1))*
                             (image.at<uchar>(i, j) -
                              image.at<uchar>(i + 1, j + 1));
                    int t2 = (image.at<uchar>(i + 1, j) -
                              image.at<uchar>(i, j + 1))*
                             (image.at<uchar>(i + 1, j) -
                              image.at<uchar>(i, j + 1));
                    //计算g(x,y)
                    res.at<uchar>(i, j) = (uchar)sqrt(t1 + t2);
                }
            }
        } else if(flag == 1){
            CvScalar t1, t2, t3, t4, t;
            auto res2 = IplImage(image);

            for (int i = 0; i < res2.height - 1; i++)
            {
                for (int j = 0; j < res2.width - 1; j++)
                {
                    t1 = cvGet2D(&res2, i, j);
                    t2 = cvGet2D(&res2, i + 1, j + 1);
                    t3 = cvGet2D(&res2, i, j + 1);
                    t4 = cvGet2D(&res2, i + 1, j);


                    for (int k = 0; k < 3; k++)
                    {
                        int t7 = (int)((t1.val[k] - t2.val[k])*(t1.val[k] - t2.val[k]) + (t4.val[k] - t3.val[k])*(t4.val[k] - t3.val[k]));
                        t.val[k] = sqrt(t7);
                    }
                    cvSet2D(&res2, i, j, t);
                }
            }
            res = cvarrToMat(&res2);
        }
    } else if (model == "Sobel"){
        Mat grad_x, grad_y;
        Mat abs_grad_x, abs_grad_y;

        Sobel(image, grad_x, image.depth(), 1, 0, 3, 1, 1, BORDER_DEFAULT);
        convertScaleAbs(grad_x, abs_grad_x);
        imshow("X-Sobel", abs_grad_x);

        Sobel(image, grad_y, image.depth(), 0, 1, 3, 1, 1, BORDER_DEFAULT);
        convertScaleAbs(grad_y, abs_grad_y);
        imshow("Y-Sobel", abs_grad_y);

        //【5】合并梯度(近似)
        addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, res);
    }






    imshow(model+"Filter", res);
    waitKey(0);
    destroyAllWindows();
    return true;
}

void EnhanceFilter(Mat img, Mat &dst, double dProportion, int nTempH, int nTempW, int nTempMY, int nTempMX, float *pfArray, float fCoef){


    int i, j, nHeight = img.rows, nWidth = img.cols;
    vector<vector<int>> GrayMat1, GrayMat2, GrayMat3;//暂存按比例叠加图像,R,G,B三通道
    vector<int> vecRow1(nWidth, 0), vecRow2(nWidth, 0), vecRow3(nWidth, 0);
    for (i = 0; i < nHeight; i++)
    {
        GrayMat1.push_back(vecRow1);
        GrayMat2.push_back(vecRow2);
        GrayMat3.push_back(vecRow3);
    }

    //锐化图像,输出带符号响应,并与原图像按比例叠加
    for (i = nTempMY; i < nHeight - (nTempH - nTempMY) + 1; i++)
    {
        for (j = nTempMX; j < nWidth - (nTempW - nTempMX) + 1; j++)
        {
            float fResult1 = 0;
            float fResult2 = 0;
            float fResult3 = 0;
            for (int k = 0; k < nTempH; k++)
            {
                for (int l = 0; l < nTempW; l++)
                {
                    //分别计算三通道加权和
                    fResult1 += img.at<Vec3b>(i, j)[0] * pfArray[k*nTempW + 1];
                    fResult2 += img.at<Vec3b>(i, j)[1] * pfArray[k*nTempW + 1];
                    fResult3 += img.at<Vec3b>(i, j)[2] * pfArray[k*nTempW + 1];
                }
            }

            //三通道加权和分别乘以系数并限制响应范围,最后和原图像按比例混合
            fResult1 *= fCoef;
            if (fResult1 > 255)
                fResult1 = 255;
            if (fResult1 < -255)
                fResult1 = -255;
            GrayMat1[i][j] = dProportion * img.at<Vec3b>(i, j)[0] + fResult1 + 0.5;

            fResult2 *= fCoef;
            if (fResult2 > 255)
                fResult2 = 255;
            if (fResult2 < -255)
                fResult2 = -255;
            GrayMat2[i][j] = dProportion * img.at<Vec3b>(i, j)[1] + fResult2 + 0.5;

            fResult3 *= fCoef;
            if (fResult3 > 255)
                fResult3 = 255;
            if (fResult3 < -255)
                fResult3 = -255;
            GrayMat3[i][j] = dProportion * img.at<Vec3b>(i, j)[2] + fResult3 + 0.5;
        }
    }
    int nMax1 = 0, nMax2 = 0, nMax3 = 0;//三通道最大灰度和值
    int nMin1 = 65535, nMin2 = 65535, nMin3 = 65535;//三通道最小灰度和值
    //分别统计三通道最大值最小值
    for (i = nTempMY; i < nHeight - (nTempH - nTempMY) + 1; i++)
    {
        for (j = nTempMX; j < nWidth - (nTempW - nTempMX) + 1; j++)
        {
            if (GrayMat1[i][j] > nMax1)
                nMax1 = GrayMat1[i][j];
            if (GrayMat1[i][j] < nMin1)
                nMin1 = GrayMat1[i][j];

            if (GrayMat2[i][j] > nMax2)
                nMax2 = GrayMat2[i][j];
            if (GrayMat2[i][j] < nMin2)
                nMin2 = GrayMat2[i][j];

            if (GrayMat3[i][j] > nMax3)
                nMax3 = GrayMat3[i][j];
            if (GrayMat3[i][j] < nMin3)
                nMin3 = GrayMat3[i][j];
        }
    }
    //将按比例叠加后的三通道图像取值范围重新归一化到[0,255]
    int nSpan1 = nMax1 - nMin1, nSpan2 = nMax2 - nMin2, nSpan3 = nMax3 - nMin3;
    for (i = nTempMY; i < nHeight - (nTempH - nTempMY) + 1; i++)
    {
        for (j = nTempMX; j < nWidth - (nTempW - nTempMX) + 1; j++)
        {
            int br, bg, bb;
            if (nSpan1 > 0)
                br = (GrayMat1[i][j] - nMin1) * 255 / nSpan1;
            else if (GrayMat1[i][j] <= 255)
                br = GrayMat1[i][j];
            else
                br = 255;
            dst.at<Vec3b>(i, j)[0] = br;

            if (nSpan2 > 0)
                bg = (GrayMat2[i][j] - nMin2) * 255 / nSpan2;
            else if (GrayMat2[i][j] <= 255)
                bg = GrayMat2[i][j];
            else
                bg = 255;
            dst.at<Vec3b>(i, j)[1] = bg;

            if (nSpan3 > 0)
                bb = (GrayMat3[i][j] - nMin3) * 255 / nSpan3;
            else if (GrayMat3[i][j] <= 255)
                bb = GrayMat3[i][j];
            else
                bb = 255;
            dst.at<Vec3b>(i, j)[2] = bb;
        }
    }
}

bool enhanceFilter(string &src, int flag){
    Mat img = imread(src,flag);
    imshow("input", img);
    Mat dst = img.clone();
    //常用滤波模板数组
    //平均平滑1/9
    float Template_Smooth_Avg[9] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 };
    //Gauss平滑1/16
    float Template_Smooth_Gauss[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 };
    //Sobel垂直边缘检测
    float Template_Smooth_HSobel[9] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
    //Sobel水平边缘检测
    float Template_Smooth_VSobel[9] = { -1, -2, -1, 0, 0, 0, 1, 2, 1 };
    //LOG边缘检测
    float Template_Log[25] = { 0, 0, -1, 0, 0, 0, -1, -2, -1, 0, -1, -2, 16, -2, -1, 0, -1, -2, -1, 0, 0, 0, -1, 0, 0 };
    //Laplacian边缘检测
    float Template_Laplacian1[9] = { 0, -1, 0, -1, 4, -1, 0, -1, 0 };//对90度各向同性
    float Template_Laplacian2[9] = { -1, -1, -1, -1, 8, -1, -1, -1, -1 };//对45度各向同性
    /*************************************************************************************************************
    高提升滤波
    dProportion:高提升滤波中原图像的混合比例
    nTempH:模板高度,nTempW:模板宽度
    nTempMY:模板中心元素坐标,nTempMX:模板中心元素坐标
    fpArray:指向模板数组的指针,可以选取不同模板实现不同滤波的高提升版本
    fCoef:模板系数
    **************************************************************************************************************/
    EnhanceFilter(img, dst, 1.8, 3, 3, 1, 1, Template_Laplacian2, 1);

    imshow("Enhance Laplacian", dst);
    waitKey(0);
    destroyAllWindows();
    return true;
}
//
// Created by XQ on 2019-03-29.
//

#include<iostream>
#include<string>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>
#include <zconf.h>

using namespace std;
using namespace cv;


bool addSaltOrPepper(Mat &image, int flag,int n);
int GaussianNoise(double mu, double sigma);
bool addGaussianNoise(Mat& image);
Mat digitalMeanFilter(Mat& image, int size);
Mat geometryMeanFilter(Mat &image, int size);
Mat harmonicMeanFilter(Mat &image, int size);
Mat inverseHarmonicMeanFilter(Mat &image, int size, double Q);

Mat medianFilter(Mat &image, int size);
Mat selfAdaptMeanFilter(Mat &image, int size);
Mat selfAdaptMedianFilter(Mat &image, int size);

//1.均值滤波
bool meanFilter(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);

    Mat noise = image.clone();
    addSaltOrPepper(noise,0,1000);
    imshow("with 1000 salt",noise);
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 5));
    imshow("geometryMeanFilter", geometryMeanFilter(noise, 5));
    imshow("harmonicMeanFilter", harmonicMeanFilter(noise, 5));
    imshow("inverseHarmonicMeanFilter", inverseHarmonicMeanFilter(noise, 5, 1));
    waitKey(0);
    destroyAllWindows();

    imshow("input", image);
    noise = image.clone();
    addSaltOrPepper(noise,1,1000);
    imshow("with 1000 pepper",noise);
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 5));
    imshow("geometryMeanFilter", geometryMeanFilter(noise, 5));
    imshow("harmonicMeanFilter", harmonicMeanFilter(noise, 5));
    imshow("inverseHarmonicMeanFilter", inverseHarmonicMeanFilter(noise, 5, 1));
    waitKey(0);
    destroyAllWindows();

    imshow("input", image);
    noise = image.clone();
    addGaussianNoise(noise);
    imshow("with gaussi",noise);
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 5));
    imshow("geometryMeanFilter", geometryMeanFilter(noise ,5));
    imshow("harmonicMeanFilter", harmonicMeanFilter(noise, 5));
    imshow("inverseHarmonicMeanFilter", inverseHarmonicMeanFilter(noise, 5, 1));
    waitKey(0);
    destroyAllWindows();

    imshow("input", image);
    noise = image.clone();
    addSaltOrPepper(noise,0,1000);
    usleep(500);
    addSaltOrPepper(noise,1,1000);
    imshow("with 1000 pepper and 1000 salt",noise);
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 5));
    imshow("geometryMeanFilter", geometryMeanFilter(noise, 5));
    imshow("harmonicMeanFilter", harmonicMeanFilter(noise, 5));
    imshow("inverseHarmonicMeanFilter", inverseHarmonicMeanFilter(noise, 5, 1));
    waitKey(0);
    destroyAllWindows();
    return true;
}
//2.中值滤波
bool medianFilter(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);

    Mat noise = image.clone();
    addSaltOrPepper(noise,0,1000);
    imshow("with 1000 salt",noise);
    imshow("with 1000 salt, 5*5 median",medianFilter(image,5));
    imshow("with 1000 salt, 9*9 median",medianFilter(image,9));
    waitKey(0);
    destroyAllWindows();

    imshow("input", image);
    noise = image.clone();
    addSaltOrPepper(noise,1,1000);
    imshow("with 1000 pepper",noise);
    imshow("with 1000 pepper, 5*5 median",medianFilter(image,5));
    imshow("with 1000 pepper, 9*9 median",medianFilter(image,9));
    waitKey(0);
    destroyAllWindows();

    imshow("input", image);
    noise = image.clone();
    addSaltOrPepper(noise,0,1000);
    usleep(500);
    addSaltOrPepper(noise,0,1000);
    imshow("with 1000 salt and 1000 pepper",noise);
    imshow("with 1000 salt and 1000 pepper, 5*5 median",medianFilter(image,5));
    imshow("with 1000 salt and 1000 pepper, 9*9 median",medianFilter(image,9));
    waitKey(0);
    destroyAllWindows();

    return true;
}
//3.自适应均值滤波
bool selfAdaptMeanFilter(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);

    Mat noise = image.clone();
    addSaltOrPepper(noise, 0,1000);
    usleep(500);
    addSaltOrPepper(noise, 1,1000);
    imshow("with 1000 pepper and 1000 salt",noise);
    imshow("selfAdaptMeanFilter",selfAdaptMeanFilter(noise,7));
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 7));

    waitKey(0);
    destroyAllWindows();
    return true;
}
//4.自适应中值滤波
bool selfAdaptMedianFilter(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);

    Mat noise = image.clone();
    addSaltOrPepper(noise, 0,1000);
    usleep(500);
    addSaltOrPepper(noise, 1,1000);
    imshow("with 1000 pepper and 1000 salt",noise);
    imshow("selfAdaptMedianFilter", selfAdaptMedianFilter(noise,7));
    imshow("medianFilter", medianFilter(noise, 7));

    waitKey(0);
    destroyAllWindows();
    return true;
}
//5.彩色图像均值滤波
bool colorMeanFilter(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);

    Mat noise = image.clone();
    addGaussianNoise(noise);
    imshow("with gaussi",noise);
    imshow("digitalMeanFilter", digitalMeanFilter(noise, 5));
    imshow("geometryMeanFilter", geometryMeanFilter(noise, 5));

    waitKey(0);
    destroyAllWindows();
    return true;
}

/*int main(){
    string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    cout << "1.meanFilter:" << meanFilter(str,0) << endl;
    cout << "2.medianFilter:" << medianFilter(str,0) << endl;
    cout << "3.selfAdaptMeanFilter:" << selfAdaptMeanFilter(str,0) << endl;
    cout << "4.selfAdaptMedianFilter:" << selfAdaptMedianFilter(str,0) << endl;
    cout << "5.colorMeanFilter:" << colorMeanFilter(str,1) << endl;
}*/

/*添加椒盐盐噪声
 *
 * flag = 0 盐噪声
 * flag = 1 椒噪声
 * */
bool addSaltOrPepper(Mat &image, int flag,int n){
    srand((unsigned)time(NULL));
    for (int k = 0; k < n; k++)//将图像中n个像素随机置零
    {
        int i = rand() % image.rows;
        int j = rand() % image.cols;
        //将图像颜色随机改变
        if (image.channels() == 1){
            if(flag == 0)   image.at<uchar>(i, j) = 255;
            if(flag == 1)   image.at<uchar>(i, j) = 0;
        }

        else{
            for (int t = 0; t < image.channels(); t++){
                if(flag == 0)   image.at<Vec3b>(i, j)[t] = 255;
                if(flag == 1)   image.at<Vec3b>(j, i)[t] = 0;
            }
        }
    }
    return true;
}
/*高斯噪声*/
int GaussianNoise(double mu, double sigma){
    //定义一个特别小的值
    const double epsilon = numeric_limits<double>::min();//返回目标数据类型能表示的最逼近1的正数和1的差的绝对值
    static double z0, z1;
    static bool flag = false;
    flag = !flag;
    //flag为假,构造高斯随机变量
    if (!flag)  return z1 * sigma + mu;
    double u1, u2;
    //构造随机变量

    do{
        u1 = rand()*(1.0 / RAND_MAX);
        u2 = rand()*(1.0 / RAND_MAX);
    } while (u1 <= epsilon);
    //flag为真构造高斯随机变量X
    z0 = sqrt(-2.0*log(u1))*cos(2 * CV_PI * u2);
    z1 = sqrt(-2.0*log(u1))*sin(2 * CV_PI * u2);
    return z1 * sigma + mu;
}
/*添加高斯噪声*/
bool addGaussianNoise(Mat &image){
    int channels = image.channels();    //获取图像的通道
    int rows = image.rows;    //图像的行数

    int cols = image.cols*channels;   //图像的总列数
    //判断图像的连续性
    if (image.isContinuous()){    //判断矩阵是否连续,若连续,我们相当于只需要遍历一个一维数组{
        cols *= rows;
        rows = 1;
    }
    for (int i = 0; i < rows; i++){
        for (int j = 0; j < cols; j++){ //添加高斯噪声
            int val = image.ptr<uchar>(i)[j] + GaussianNoise(2, 0.8) * 32;
            if (val < 0)    val = 0;
            if (val > 255)  val = 255;
            image.ptr<uchar>(i)[j] = (uchar)val;
        }
    }
    return true;
}

/*均值滤波
 *
 * 具体内容:利用 OpenCV 对灰度图像像素进行操作,分别利用算术均值滤波器、几何均值滤波器、谐波和逆谐波均值滤波器进行图像去噪。
 * 模板大小为 5*5。(注:请分别为图像添加高斯噪声、胡椒噪声、盐噪声和椒盐噪声,并观察 滤波效果)
 *
 *
 * */
//算数均值
Mat digitalMeanFilter(Mat &image, int size) {
    Mat dst = image.clone();
    int rows = dst.rows, cols = dst.cols;
    int start = size / 2;
    for (int m = start; m < rows - start; m++) {
        for (int n = start; n < cols - start; n++) {
            if (dst.channels() == 1)                //灰色图
            {
                int sum = 0;
                for (int i = -start + m; i <= start + m; i++)
                {
                    for (int j = -start + n; j <= start + n; j++) {
                        sum += dst.at<uchar>(i, j);
                    }
                }
                dst.at<uchar>(m, n) = uchar(sum / size / size);
            }
            else
            {
                Vec3b pixel;
                int sum1[3] = { 0 };
                for (int i = -start + m; i <= start + m; i++)
                {
                    for (int j = -start + n; j <= start + n; j++)
                    {
                        pixel = dst.at<Vec3b>(i, j);
                        for (int k = 0; k < dst.channels(); k++)
                        {
                            sum1[k] += pixel[k];
                        }
                    }

                }
                for (int k = 0; k < dst.channels(); k++)
                {
                    pixel[k] = sum1[k] / size / size;
                }
                dst.at<Vec3b>(m, n) = pixel;
            }
        }
    }
    return dst;
}

//几何均值
Mat geometryMeanFilter(Mat &image, int size)
{
    Mat dst = image.clone();
    int row, col;
    int h = image.rows;
    int w = image.cols;
    double mul;
    double dc;
    int mn;
    //计算每个像素的去噪后 color 值
    for (int i = 0; i < image.rows; i++)
    {
        for (int j = 0; j < image.cols; j++)
        {

            int left = -size/2;
            int right = size/2;
            if (image.channels() == 1)              //灰色图
            {
                mul = 1.0;
                mn = 0;

                //统计邻域内的几何平均值,邻域大小 5*5
                for (int m = left; m <= right; m++) {
                    row = i + m;
                    for (int n = left; n <= right; n++) {
                        col = j + n;
                        if (row >= 0 && row < h && col >= 0 && col < w) {
                            int s = image.at<uchar>(row, col);
                            mul = mul * (s == 0 ? 1 : s); //邻域内的非零像素点相乘,最小值设定为1
                            mn++;
                        }
                    }
                }
                //计算 1/mn 次方
                dc = pow(mul, 1.0 / mn);
                //统计成功赋给去噪后图像。
                int res = (int)dc;
                dst.at<uchar>(i, j) = res;
            }
            else
            {
                double multi[3] = { 1.0,1.0,1.0 };
                mn = 0;
                Vec3b pixel;

                for (int m = left; m <= right; m++)
                {
                    row = i + m;
                    for (int n = left; n <= right; n++)
                    {
                        col = j + n;
                        if (row >= 0 && row < h && col >= 0 && col < w)
                        {
                            pixel = image.at<Vec3b>(row, col);
                            for (int k = 0; k < image.channels(); k++)
                            {
                                multi[k] = multi[k] * (pixel[k] == 0 ? 1 : pixel[k]);//邻域内的非零像素点相乘,最小值设定为1
                            }
                            mn++;
                        }
                    }
                }
                double d;
                for (int k = 0; k < image.channels(); k++)
                {
                    d = pow(multi[k], 1.0 / mn);
                    pixel[k] = (int)d;
                }
                dst.at<Vec3b>(i, j) = pixel;
            }
        }
    }
    return dst;
}

//谐波均值
Mat harmonicMeanFilter(Mat &image, int size)
{
    //IplImage* dst = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    Mat dst = image.clone();
    int row, col;
    int h = image.rows;
    int w = image.cols;
    double sum;
    double dc;
    int mn;
    //计算每个像素的去噪后 color 值
    for (int i = 0; i < image.rows; i++) {
        for (int j = 0; j < image.cols; j++) {
            sum = 0.0;
            mn = 0;
            //统计邻域,5*5 模板
            int left = -size/2;
            int right = size/2;
            for (int m = left; m <= right; m++) {
                row = i + m;
                for (int n = left; n <= right; n++) {
                    col = j + n;
                    if (row >= 0 && row < h && col >= 0 && col < w) {
                        int s = image.at<uchar>(row, col);
                        sum = sum + (s == 0 ? 255 : 255.0 / s);                 //如果是0,设定为255
                        mn++;
                    }
                }
            }
            int d;
            dc = mn * 255.0 / sum;
            d = dc;
            //统计成功赋给去噪后图像。
            dst.at<uchar>(i, j) = d;
        }
    }
    return dst;
}

//逆谐波均值
Mat inverseHarmonicMeanFilter(Mat &image, int size, double Q){
    Mat dst = image.clone();
    int row, col;
    int h = image.rows;
    int w = image.cols;
    double sum;
    double sum1;
    double dc;
    //double Q = 2;
    //计算每个像素的去噪后 color 值
    for (int i = 0; i < image.rows; i++) {
        for (int j = 0; j < image.cols; j++) {
            sum = 0.0;
            sum1 = 0.0;
            //统计邻域
            int left = -size/2;
            int right = size/2;
            for (int m = left; m <= right; m++) {
                row = i + m;
                for (int n = left; n <= right; n++) {
                    col = j + n;
                    if (row >= 0 && row < h && col >= 0 && col < w) {

                        int s = image.at<uchar>(row, col);
                        sum = sum + pow(s, Q + 1);
                        sum1 = sum1 + pow(s, Q);
                    }
                }
            }
            //计算 1/mn 次方
            int d;
            dc = sum1 == 0 ? 0 : (sum / sum1);
            d = (int)dc;
            //统计成功赋给去噪后图像。
            dst.at<uchar>(i, j) = d;
        }
    }
    return dst;
}
/*中值滤波
 *
 * 具体内容:利用 OpenCV 对灰度图像像素进行操作,分别利用 5*5 和 9*9 尺寸的模板对图像进行中值滤波。
 * (注:请分别为图像添加胡椒噪声、盐噪声和 椒盐噪声,并观察滤波效果)
 *
 * */
Mat medianFilter(Mat &image, int size) {
    Mat dst = image.clone();
    int rows = dst.rows, cols = dst.cols;
    int start = size / 2;
    for (int m = start; m < rows - start; m++) {
        for (int n = start; n < cols - start; n++) {
            vector<uchar> model;
            for (int i = -start + m; i <= start + m; i++) {
                for (int j = -start + n; j <= start + n; j++) {
                    model.push_back(dst.at<uchar>(i, j));
                }
            }
            sort(model.begin(), model.end());     //采用快速排序进行
            dst.at<uchar>(m, n) = model[size*size / 2];
        }
    }
    return dst;
}

/*自适应均值滤波
 *
 * 具体内容:利用 OpenCV 对灰度图像像素进行操作,设计自适应局部降 低噪声滤波器去噪算法。
 * 模板大小 7*7(对比该算法的效果和均值滤波器的效果)
 *
 * */
Mat selfAdaptMeanFilter(Mat &image, int size)
{
    Mat dst = image.clone();
    blur(image, dst, Size(size, size));
    int row, col;
    int h = image.rows;
    int w = image.cols;
    int mn;
    double Zxy;
    double Zmed;
    double Sxy;
    double Sl;
    double Sn = 100;
    for (int i = 0; i < image.rows; i++)
    {
        for (int j = 0; j < image.cols; j++)
        {
            int Zxy = image.at<uchar>(i, j);
            int Zmed = image.at<uchar>(i, j);
            Sl = 0;
            mn = 0;
            int left = -size/2;
            int right = size/2;
            for (int m = left; m <= right; m++) {
                row = i + m;
                for (int n = left; n <= right; n++) {
                    col = j + n;
                    if (row >= 0 && row < h && col >= 0 && col < w) {
                        int Sxy = image.at<uchar>(row, col);
                        Sl = Sl + pow(Sxy - Zmed, 2);
                        mn++;
                    }
                }
            }
            Sl = Sl / mn;
            int d = (int)(Zxy - Sn / Sl * (Zxy - Zmed));
            dst.at<uchar>(i, j) = d;
        }
    }
    return dst;
}


/*自适应中值滤波
 *
 * 具体内容:利用 OpenCV 对灰度图像像素进行操作,设计自适应中值滤波算 法对椒盐图像进行去噪。
 * 模板大小 7*7(对比中值滤波器的效果)
 *
 *
 * */
Mat selfAdaptMedianFilter(Mat &image, int size) {
    Mat dst = image.clone();
    int row, col;
    int h = image.rows;
    int w = image.cols;
    double Zmin, Zmax, Zmed, Zxy, Smax = size;
    int wsize;
    //计算每个像素的去噪后 color 值
    for (int i = 0; i < image.rows; i++) {
        for (int j = 0; j < image.cols; j++) {
            //统计邻域
            wsize = 1;
            while (wsize <= size / 2) {
                Zmin = 255.0;
                Zmax = 0.0;
                Zmed = 0.0;
                int Zxy = image.at<uchar>(i, j);
                int mn = 0;
                for (int m = -wsize; m <= wsize; m++) {
                    row = i + m;
                    for (int n = -wsize; n <= wsize; n++) {
                        col = j + n;
                        if (row >= 0 && row < h && col >= 0 && col < w) {
                            int s = image.at<uchar>(row, col);
                            if (s > Zmax) {
                                Zmax = s;
                            }
                            if (s < Zmin) {
                                Zmin = s;
                            }
                            Zmed = Zmed + s;
                            mn++;
                        }
                    }
                }
                Zmed = Zmed / mn;
                int d;
                if ((Zmed - Zmin) > 0 && (Zmed - Zmax) < 0) {
                    if ((Zxy - Zmin) > 0 && (Zxy - Zmax) < 0) {
                        d = Zxy;
                    } else {
                        d = Zmed;
                    }
                    dst.at<uchar>(i, j) = d;
                    break;
                } else {
                    wsize++;
                    if (wsize > size / 2) {
                        int d;
                        d = Zmed;
                        dst.at<uchar>(i, j) = d;
                        break;
                    }
                }
            }
        }
    }
    return dst;
}


/*彩色图像均值滤波
 *
 * 具体内容:利用 OpenCV 对彩色图像 RGB 三个通道的像素进行操作,利用算 术均值滤波器和几何均值滤波器进行彩色图像去噪。
 * 模板大小为 5*5。
 *
 *
 * */
//
// Created by XQ on 2019-03-29.
//

//https://www.jianshu.com/p/1c9ddc9a7b38
//https://blog.csdn.net/qq_37059483/article/details/77979910
//https://blog.csdn.net/gxiaoyaya/article/details/72460483

#include<iostream>
#include<string>


#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;

bool DFTAndIDFT(string &src, int flag);
bool idealLowPassFilterOrHighPassFilter(string &src, int flag, double f,int model);
bool idealHighPassFilter(string &src, int flag, double f);
bool butterworthLowPassFilterOrHighPassFilter(string &src, int flag, double f, int n, int model);





/*int main(){
    string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    cout << "1.DFTAndIDFT:" << DFTAndIDFT(str,0) << endl;
    double f;
    int n;

    while (cout << "输入截止频率f 与 n:" && cin >> f >> n){
        cout << "2.1 idealLowPassFilter:" << idealLowPassFilterOrHighPassFilter(str, 0, f, 0) << endl;
        cout << "2.2 idealHighPassFilter:" << idealLowPassFilterOrHighPassFilter(str, 0, f, 1) << endl;
        cout << "3.1 ButterworthLowPassFilter:" << butterworthLowPassFilterOrHighPassFilter(str, 0, f, n, 0) << endl;
        cout << "3.2 ButterworthHighPassFilter:" << butterworthLowPassFilterOrHighPassFilter(str, 0, f, n, 1) << endl;
    }

    return 0;
}*/





/*灰度图像的 DFT 和 IDFT。
 *
 *具体内容:利用 OpenCV 提供的 cvDFT 函数对图像进行 DFT 和 IDFT 变换.
 * 在图像处理领域,通过DFT可以将图像转换到频域,实现高通和低通滤波;
 * 还可以利用矩阵的卷积运算等同于其在频域的乘法运算从而优化算法降低运算量,即先将图像转换到频域,然后做完乘法运算后,
 * 再转换到图像域,opencv中的模板匹配就利用了这一特性降低运算量。
 *
 * */

bool DFTAndIDFT(string &src, int flag){
    Mat image = imread(src,flag);
    imshow("input", image);
    int w = getOptimalDFTSize(image.cols);
    int h = getOptimalDFTSize(image.rows);
    Mat padded;
    copyMakeBorder(image, padded, 0, h - image.rows, 0, w - image.cols, BORDER_CONSTANT, Scalar::all(0));//填充图像保存到padded中
    Mat plane[] = { Mat_<float>(padded),Mat::zeros(padded.size(),CV_32F) };//创建通道
    Mat complexIm;
    merge(plane, 2, complexIm);//合并通道
    dft(complexIm, complexIm);//进行傅立叶变换,结果保存在自身
    split(complexIm, plane);//分离通道
    magnitude(plane[0], plane[1], plane[0]);//获取幅度图像,0通道为实数通道,1为虚数,因为二维傅立叶变换结果是复数
    int cx = padded.cols / 2;
    int cy = padded.rows / 2;//以下的操作是移动图像,左上与右下交换位置,右上与左下交换位置
    Mat temp;
    Mat part1(plane[0], Rect(0, 0, cx, cy));
    Mat part2(plane[0], Rect(cx, 0, cx, cy));
    Mat part3(plane[0], Rect(0, cy, cx, cy));
    Mat part4(plane[0], Rect(cx, cy, cx, cy));
    part1.copyTo(temp);
    part4.copyTo(part1);
    temp.copyTo(part4);
    part2.copyTo(temp);
    part3.copyTo(part2);
    temp.copyTo(part3);
    /**/
    Mat _complexim;
    complexIm.copyTo(_complexim);//把变换结果复制一份,进行逆变换,也就是恢复原图
    Mat iDft[] = { Mat::zeros(plane[0].size(),CV_32F),Mat::zeros(plane[0].size(),CV_32F) };//创建两个通道,类型为float,大小为填充后的尺寸
    idft(_complexim, _complexim);//傅立叶逆变换
    split(_complexim, iDft);//结果貌似也是复数
    magnitude(iDft[0], iDft[1], iDft[0]);//分离通道,主要获取0通道
    normalize(iDft[0], iDft[0], 1, 0, CV_MINMAX);//归一化处理,float类型的显示范围为0-1,大于1为白色,小于0为黑色
    imshow("IDFT", iDft[0]);//显示逆变换
    /**/
    plane[0] += Scalar::all(1);//傅立叶变换后的图片不好分析,进行对数处理,结果比较好看
    log(plane[0], plane[0]);
    normalize(plane[0], plane[0], 1, 0, CV_MINMAX);
    imshow("DFT", plane[0]);
    waitKey(0);
    destroyAllWindows();


    return true;
}

/*利用理想高通和低通滤波器对灰度图像进行频域滤波
 *
 * 具体内容:利用 cvDFT 函数实现 DFT,在频域上利用理想高通和低通滤波器进行滤波,并把滤波过后的图像显示在屏幕上(观察振铃现象),要求截止频率可输入。
 *
 * */
bool idealLowPassFilterOrHighPassFilter(string &src, int flag, double f, int model){
    Mat image = imread(src, 0); // Read the file
    imshow("input", image);
    Mat img = image.clone();
    //cvtColor(src, img, CV_BGR2GRAY);
    //调整图像加速傅里叶变换
    if(model == 0) cout << "低通 ";
    if(model == 1) cout << "高通 ";
    int M = getOptimalDFTSize(img.rows);
    int N = getOptimalDFTSize(img.cols);
    Mat padded;
    copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
    //记录傅里叶变换的实部和虚部
    Mat planes[] = { Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F) };
    Mat complexImg;
    merge(planes, 2, complexImg);
    //进行傅里叶变换
    dft(complexImg, complexImg);
    //获取图像
    Mat mag = complexImg;
    mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));//这里为什么&上-2具体查看opencv文档
    //其实是为了把行和列变成偶数 -2的二进制是11111111.......10 最后一位是0
    //获取中心点坐标
    int cx = mag.cols / 2;
    int cy = mag.rows / 2;
    //调整频域
    Mat tmp;
    Mat q0(mag, Rect(0, 0, cx, cy));
    Mat q1(mag, Rect(cx, 0, cx, cy));
    Mat q2(mag, Rect(0, cy, cx, cy));
    Mat q3(mag, Rect(cx, cy, cx, cy));

    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);

    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);
    //Do为自己设定的阀值具体看公式

    //处理按公式保留中心部分
    for (int y = 0; y < mag.rows; y++) {
        auto * data = mag.ptr<double>(y);
        for (int x = 0; x < mag.cols; x++) {
            double d = sqrt(pow((y - cy), 2) + pow((x - cx), 2));

            if(model == 0){
                if(d>f) data[x] = 0;//低通
            } else if(model == 1){
                if(d<f) data[x] = 0;//高通
            }
        }
    }
    //再调整频域
    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);
    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);
    //逆变换
    Mat invDFT, invDFTcvt;
    idft(mag, invDFT, DFT_SCALE | DFT_REAL_OUTPUT); // Applying IDFT
    invDFT.convertTo(invDFTcvt, CV_8U);

    imshow("idealLowPassFilterOrHighPassFilter", invDFTcvt);
    waitKey(0);
    destroyAllWindows();


    return true;
}

/*利用布特沃斯高通和低通滤波器对灰度图像进行频域滤波。
 *
 * 具体内容:利用 cvDFT 函数实现 DFT,在频域上进行利用布特沃斯高通和低通滤波器进行滤波,并把滤波过后的图像显示在屏幕上(观察振铃现象),要求截止频率和 n 可输入。
 *
 * */
bool butterworthLowPassFilterOrHighPassFilter(string &src, int flag, double f, int n, int model){
    Mat image = imread(src, 0); // Read the file
    imshow("原始图像", image);

    //H = 1 / (1+(D/D0)^2n)
    Mat img = image.clone();
    if(model == 0) cout << "低通 ";
    if(model == 1) cout << "高通 ";
    //cvtColor(src, img, CV_BGR2GRAY);
    //调整图像加速傅里叶变换

    int M = getOptimalDFTSize(img.rows);
    int N = getOptimalDFTSize(img.cols);
    Mat padded;
    copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));

    Mat planes[] = { Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F) };
    Mat complexImg;
    merge(planes, 2, complexImg);

    dft(complexImg, complexImg);

    Mat mag = complexImg;
    mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));

    int cx = mag.cols / 2;
    int cy = mag.rows / 2;

    Mat tmp;
    Mat q0(mag, Rect(0, 0, cx, cy));
    Mat q1(mag, Rect(cx, 0, cx, cy));
    Mat q2(mag, Rect(0, cy, cx, cy));
    Mat q3(mag, Rect(cx, cy, cx, cy));

    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);

    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);



    for (int y = 0; y < mag.rows; y++)
    {
        auto * data = mag.ptr<double>(y);
        for (int x = 0; x < mag.cols; x++)
        {
            //cout << data[x] << endl;
            double d = sqrt(pow((y - cy), 2) + pow((x - cx), 2));
            //cout << d << endl;
            double h = 0;

            if(model == 0)  h = 1.0 / (1 + pow(d / f, 2 * n));
            if(model == 1)  h = 1.0 / (1 + pow(f / d, 2 * n));
            if (h <= 0.5)
            {
                data[x] = 0;
            }

        }
    }
    q0.copyTo(tmp);
    q3.copyTo(q0);
    tmp.copyTo(q3);
    q1.copyTo(tmp);
    q2.copyTo(q1);
    tmp.copyTo(q2);
    //逆变换
    Mat invDFT, invDFTcvt;
    idft(complexImg, invDFT, DFT_SCALE | DFT_REAL_OUTPUT); // Applying IDFT
    invDFT.convertTo(invDFTcvt, CV_8U);
    imshow("butterworthLowPassFilterOrHighPassFilter", invDFTcvt);

    waitKey(0);
    destroyAllWindows();
    return true;
}


//
// Created by XQ on 2019-04-05.
//

//https://www.kancloud.cn/digest/herbertopencv/100797

#include<iostream>
#include<string>


#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;

class MorphoFeatures{
public:
    bool fourFunctions(string &src, int flag);
    bool getEdges(const string &src,int flag, int threshold);
    Mat getCorners(const string &src, int flag);
    void drawOnImage(const Mat &binary, Mat &image);
    Mat cross();
    Mat diamond();
    Mat x();
    Mat squre();
};



/*int main(){

    //第一步,先用十字形的结构元素膨胀原图像,这种情况下只会在边缘处“扩张”,角点不发生变化。接着用菱形的结构元素腐蚀原图像,只有拐角处才会被“收缩”,而直线边缘不发生变化。
    //第二步,用X型的结构元素膨胀原图像,角点膨胀的比边要多。这样第二次用方块腐蚀时,角点恢复原状,而边要腐蚀的更多。
    //第三步,将一二步的两幅输出图像相减,结果只保留了各个拐角处的细节。
    //string str = "/Volumes/数据/图片/2k/lostwall.jpg";
    string str = "/Volumes/数据/图片/2k/Lattice.png";
    MorphoFeatures input;
    //
    //input.fourFunctions(str,0);
    //
    input.getEdges(str,0,80);
    //
    Mat corner = input.getCorners(str,0);
    Mat image = imread(str,0);
    input.drawOnImage(corner,image);
    imshow("Corners On Image", image);

    //
    waitKey(0);
    destroyAllWindows();

    return 0;
}*/


bool MorphoFeatures::fourFunctions(string &src, int flag){

    Mat image = imread(src,flag);
    imshow("Default Image", image);
    Mat eroded;
    erode(image, eroded,Mat());
    Mat dilated;
    dilate(image, dilated, Mat());
    Mat closed;
    Mat element1(3, 3, CV_8U, Scalar(1));
    morphologyEx(image,             // 输入图像
                 closed,            // 输出图像
                 MORPH_CLOSE,   // 指定操作
                 element1,          // 结构元素设置
                 Point(-1,-1),  // 操作的位置
                 1);                // 操作的次数
    Mat opened;
    Mat element2(3, 3, CV_8U, Scalar(1));
    morphologyEx(image, opened, MORPH_OPEN, element2, Point(-1,-1), 1);

    imshow("Eroded Image", eroded);
    imshow("Dilated Image", dilated);
    imshow("Closed Image", closed);
    imshow("Opened Image", opened);

    waitKey(0);
    destroyAllWindows();

    return true;
}

bool MorphoFeatures::getEdges(const string &src,int flag, int threshold){
    Mat image = imread(src,flag);
    imshow("Default image", image);
    // 得到梯度图
    Mat result;
    morphologyEx(image, result, MORPH_GRADIENT, Mat());
    // 阈值化以得到二值图像

    // 使用阈值化
    if(threshold > 0){
        cv::threshold(result, result, threshold, 255, THRESH_BINARY);
    }
    imshow("Edge Image",result);

    waitKey(0);
    destroyAllWindows();

    return true;
}

Mat MorphoFeatures::cross() {
    Mat res = Mat(5,5,CV_8U,Scalar(0));
    for (int i=0; i<5; i++) {
        res.at<uchar>(2,i)= 1;
        res.at<uchar>(i,2)= 1;
    }
    return res;

}
Mat MorphoFeatures::diamond() {
    Mat res = Mat(5,5,CV_8U,Scalar(1));
    res.at<uchar>(0,0)= 0;
    res.at<uchar>(0,1)= 0;
    res.at<uchar>(1,0)= 0;
    res.at<uchar>(4,4)= 0;
    res.at<uchar>(3,4)= 0;
    res.at<uchar>(4,3)= 0;
    res.at<uchar>(4,0)= 0;
    res.at<uchar>(4,1)= 0;
    res.at<uchar>(3,0)= 0;
    res.at<uchar>(0,4)= 0;
    res.at<uchar>(0,3)= 0;
    res.at<uchar>(1,4)= 0;
    return res;


}
Mat MorphoFeatures::x() {
    Mat res = Mat(5,5,CV_8U,Scalar(0));
    for (int i=0; i<5; i++)
    {
        res.at<uchar>(i,i)= 1;
        res.at<uchar>(4-i,i)= 1;
    }
    return res;

}
Mat MorphoFeatures::squre() {
    Mat res = Mat(5,5,CV_8U,Scalar(1));
    return res;

}


Mat MorphoFeatures::getCorners(const string& src,int flag){

    Mat image = imread(src,flag);

    Mat result;

    dilate(image, result, this->cross());
    erode(result,result, this->diamond());
    Mat result2;
    dilate(image,result2, this->x());
    erode(result2,result2, this->squre());
    absdiff(result2,result,result);

    cv::threshold(result, result, 80, 255, cv::THRESH_BINARY);
    imshow("Corners",result);
    waitKey(0);
    destroyAllWindows();
    return result;
}

void MorphoFeatures::drawOnImage(const cv::Mat &binary, cv::Mat &image)
{
    cv::Mat_<uchar>::const_iterator it = binary.begin<uchar>();
    cv::Mat_<uchar>::const_iterator itend = binary.end<uchar>();
    for(int i=0; it!=itend; ++it, ++i)
    {
        if(*it) // 若该像素被标定为角点则画白色圈圈
        {
            cv::circle(image, cv::Point(i%image.step, i/image.step), 5, cv::Scalar(255, 255, 255));
        }
    }
}

//
// Created by XQ on 2019-04-05.
//

//https://www.kancloud.cn/digest/herbertopencv/100801

#include<iostream>
#include<string>


#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;


const double PI =  3.1415926;

class MyCanny{
private:
    Mat image;
    Mat sobel;
    int threshold;
    Mat sobelMagnitude;
    Mat sobelOrientation;

public:
    void setthreshold(int a){
        threshold= a;
    }

    // 获取阈值
    int getthreshold() const{
        return threshold;
    }

    // 计算Sobel结果
    void computeSobel(const Mat &image){
        Mat sobelX;
        Mat sobelY;
        Sobel(image,sobelX,CV_32F,1,0,threshold);
        Sobel(image,sobelY,CV_32F,0,1,threshold);
        cartToPolar(sobelX,sobelY,sobelMagnitude,sobelOrientation);
    }

    // 获取幅度
    Mat getMagnitude(){
        return sobelMagnitude;
    }

    // 获取Sobel方向
    Mat getOrientation(){
        return sobelOrientation;
    }

    // 输入门限获取二值图像
    Mat getBinaryMap(double Threhhold){
        Mat bgImage;
        cv::threshold(sobelMagnitude,bgImage,Threhhold,255,THRESH_BINARY_INV);
        return bgImage;
    }

    // 转化为CV_8U图像
    Mat getSobelImage(){
        Mat bgImage;
        double minval,maxval;
        minMaxLoc(sobelMagnitude,&minval,&maxval);
        sobelMagnitude.convertTo(bgImage,CV_8U,255/maxval);
        return bgImage;
    }

    // 获取梯度
    Mat getSobelOrientationImage(){
        Mat bgImage;
        sobelOrientation.convertTo(bgImage,CV_8U,90/PI);
        return bgImage;
    }
};

/*int main(){

    string str = "/Volumes/数据/图片/2k/Lattice.png";
    Mat image = imread(str,0);
    imshow("Default Image",image);
    auto * mycanny = new MyCanny();
    mycanny->computeSobel(image);
    // 获取sobel的大小和方向
    imshow("Orientation of sobel",mycanny->getSobelOrientationImage());
    imshow("Magnitude of sobel",mycanny->getSobelImage());

    // 使用两种阈值的检测结果
    imshow("The Lower Threshold",mycanny->getBinaryMap(125));
    imshow("The Higher Threshold",mycanny->getBinaryMap(225));

    // 使用canny算法
    Mat contours;
    Canny(image,contours,125,225);
    Mat contoursInv;
    cv::threshold(contours,contoursInv,128,255,cv::THRESH_BINARY_INV);
    imshow("Edge Image",contoursInv);
    waitKey(0);
    destroyAllWindows();

    return 0;
}*/
//图像灰度变换
#include<iostream>
#include<string>

#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>


using namespace std;
using namespace cv;

bool Demo1(string & src){
    //缩放
    Mat image;
    image = imread(src, 1); // Read the file

    imshow("Default Image", image);                // Show our image inside it.

    Mat dst = Mat::zeros(256, 256, CV_8UC3); //我要转化为512*512大小的
    resize(image, dst, dst.size());

    imshow("After Modify", dst);

    waitKey(0); // Wait for a keystroke in the window
    destroyAllWindows();
    return true;
}

bool Demo2(string & src,double a, double b){
    Mat image = imread(src, 1);
    imshow("Default Image", image);

    Mat dst;
    resize(image, dst, Size(), a, b);//变为原来的0.5倍

    imshow("After Modify", dst);

    waitKey(0);
    destroyAllWindows();
    return true;
}


bool Demo3(string & src){
    Mat image = imread(src, 1);
    imshow("Default Image", image);

    Mat dst, dst2;
    pyrUp(image, dst, Size(image.cols * 2, image.rows * 2)); //放大一倍
    pyrDown(image, dst2, Size(image.cols * 0.5, image.rows * 0.5)); //缩小为原来的一半

    imshow("After Modify Up", dst);
    imshow("After Modify Down", dst2);

    waitKey(0);
    destroyAllWindows();
    return true;
}

/*
int main()
{
    string str = "/Volumes/数据/图片/2k/6.jpg";
    cout << Demo1(str) << endl;
    cout << Demo2(str, 0.3, 0.3) << endl;
    cout << Demo3(str) << endl;
    return 0;
}*/
//
// Created by XQ on 2019-04-12.
//
//https://www.kancloud.cn/digest/herbertopencv/100802
#include<iostream>
#include<string>


#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/core/core_c.h>
#include <opencv2/highgui/highgui.hpp>

using namespace std;
using namespace cv;


const double PI =  3.1415926;

class LineFinder
{

private:

    // 存放原图像
    Mat img;
    // 向量中包含检测到的直线的端点
    vector<Vec4i> lines;
    // 累加器的分辨率参数
    double deltaRho;   // 距离
    double deltaTheta; // 角度
    // 被判定为直线所需要的投票数
    int minVote;
    // 直线的最小长度
    double minLength;
    // 沿直线方向的最大缺口
    double maxGap;

public:

    // 默认的累加器分辨率为单个像素,角度为1度
    // 不设置缺口和最小长度的值
    LineFinder() : deltaRho(1), deltaTheta(PI/180), minVote(10), minLength(0.), maxGap(0.) {}
    // 设置累加器的分辨率
    void setAccResolution(double dRho, double dTheta);
    // 设置最小投票数
    void setMinVote(int minv);
    // 设置缺口和最小长度
    void setLineLengthAndGap(double length, double gap);
    // 使用概率霍夫变换
    vector<Vec4i> findLines(Mat& binary);
    // 绘制检测到的直线
    void drawDetectedLines(Mat &image, Scalar color=Scalar(255,255,255));
};
// 设置累加器的分辨率
void LineFinder::setAccResolution(double dRho, double dTheta) {

    deltaRho= dRho;
    deltaTheta= dTheta;
}

// 设置最小投票数
void LineFinder::setMinVote(int minv) {

    minVote= minv;
}

// 设置缺口和最小长度
void LineFinder::setLineLengthAndGap(double length, double gap) {

    minLength= length;
    maxGap= gap;
}

// 使用概率霍夫变换
vector<Vec4i> LineFinder::findLines(Mat& binary)
{

    lines.clear();
    // 调用概率霍夫变换函数
    HoughLinesP(binary,lines,deltaRho,deltaTheta,minVote, minLength, maxGap);
    return lines;
}

// 绘制检测到的直线
void LineFinder::drawDetectedLines(Mat &image, Scalar color)
{
    vector<Vec4i>::const_iterator it2= lines.begin();

    while (it2!=lines.end()) {

        Point pt1((*it2)[0],(*it2)[1]);
        Point pt2((*it2)[2],(*it2)[3]);
        line( image, pt1, pt2, color);

        ++it2;
    }
}

void findLine(string & src){
    Mat image = imread(src,0);
    imshow("Default Image", image);
    // 首先应用Canny算法检测出图像的边缘部分
    Mat contours;
    Canny(image, contours, 125, 350);

    LineFinder finder; // 创建一对象

    // 设置概率Hough参数
    finder.setLineLengthAndGap(100, 20);
    finder.setMinVote(80); //最小投票数

    // 以下步骤检测并绘制直线
    vector<Vec4i>lines = finder.findLines(contours);
    finder.drawDetectedLines(image);

    imshow("Detected Lines", image);

    waitKey(0);
    destroyAllWindows();
}

void findCircle(string & src) {
    // 检测图像中的圆形
    Mat image = imread(src,0);

    // 在调用cv::HoughCircles函数前对图像进行平滑,减少误差
    GaussianBlur(image,image,Size(7,7),1.5);
    vector<Vec3f> circles;
    cv::HoughCircles(image, circles, HOUGH_GRADIENT,
                     2,   // 累加器的分辨率(图像尺寸/2)
                     50,  // 两个圆之间的最小距离
                     200, // Canny中的高阈值
                     100, // 最小投票数
                     20, 80); // 有效半径的最小和最大值

    // 绘制圆圈
    // 一旦检测到圆的向量,遍历该向量并绘制圆形
    // 该方法返回Vec3f类型向量
    // 包含圆圈的圆心坐标和半径三个信息

    vector<Vec3f>::const_iterator itc= circles.begin();
    while (itc!=circles.end())
    {
        circle(image,
               Point((*itc)[0], (*itc)[1]), // 圆心
               (*itc)[2], // 圆的半径
               Scalar(255), // 绘制的颜色
               6); // 圆形的厚度
        ++itc;
    }

    imshow("Detected Circles",image);
    waitKey(0);
    destroyAllWindows();
}
/*
int main(){
        string str = "/Volumes/数据/图片/2k/Lattice.png";
        findLine(str);

        //findCircle(str);
        return 0;
}*/
Archives QR Code Tip
QR Code for this page
Tipping QR Code