OpenCV3历程(5)——裂缝的检测与测量

一、开始先介绍几个即将用到的函数及知识点

1、LUT函数

函数简介:

void LUT(InputArray src,         //原始图像的地址;InputArray lut,         //查找表的地址,对于多通道图像的查找,它可以有一个通道,也可以与原始图像有相同的通道;OutputArray dst         //输出图像的地址。
)

函数介绍(单通道为例):

对于8位单通道图片,其像素灰度为0-255,假如我们想将图像某一灰度值换成其他灰度值,用查找就很好用。

  例如:我们想将一张图片灰度为0-100的像素的灰度变成0,101-200的变成100,201-255的变成255。我们就可已建立如下的一张表格;

 

当把此表格应用到图片时,图片0-100灰度的像素灰度就变成0,101-200的变成100,201-255的就变成255。映射表差不多就是这个意思。

典型用法(借助图像取反示例说明)是:

虽然手动遍历可以达到同样效果,但尽量使用 OpenCV 内置函数。调用LUT 函数可以获得最快的速度,这是因为OpenCV库可以通过英特尔线程架构启用多线程。

    //建立查找表Mat lookUpTable(1, 256, CV_8U);uchar *p = lookUpTable.data;for(int i=0; i<256; i++)p[i]=255-i;//通过LUT函数实现图像取反LUT(img1,lookUpTable,img1);

2、saturate_cast防止数据溢出

在OpenCV学习中经常看见saturate_cast的使用,为什么函数会用到saturate_cast呢,因为无论是加是减,乘除,都会超出一个像素灰度值的范围(0~255)所以,所以当运算完之后,结果为负,则转为0,结果超出255,则为255。另外在梯度锐化的函数里,也会涉及到saturate_cast。示例如下:

代码来自:https://blog.csdn.net/mjlsuccess/article/details/12401839

//使用图像混合例子中的C语言版本演示
for (int i=0; i(i);const uchar* src2_ptr = src2.ptr(i);uchar* dst_ptr  = dst.ptr(i);for (int j=0; j(src1_ptr[j]*alpha + src2_ptr[j]*beta + gama);//gama = -100, alpha = beta = 0.5//不加溢出保护//	dst_ptr[j] = (src1_ptr[j]*alpha + src2_ptr[j]*beta + gama);}
}
imshow("output",dst);

 

加了溢出保护
没加溢出保护
没加溢出保护

 

 大致的原理应该如下:

if(data<0)data=0;
else if(data>255)data=255;

3、std::stack 基本操作

C++ Stack(堆栈) 是一个容器类的改编,为程序员提供了堆栈的全部功能,——也就是说实现了一个先进后出(FILO)的数据结构。

c++ stl栈stack的头文件为

#include  

c++ stl栈stack的成员函数介绍

//操作 比较和分配堆栈empty() //堆栈为空则返回真pop()   //移除栈顶元素push()  //在栈顶增加元素size()  //返回栈中元素数目top()   //返回栈顶元素

4、C++中在一个类中定义另一个只有带参数构造函数的类的对象

此处参考网址:https://www.cnblogs.com/rednodel/p/5148156.html

#include
using namespace std;class A
{
public:    A( int i ){}
};class B {
public:    B():a(1){}   
//或:B( int i ):a( i ){ }。对a提供参数一定要按这种形式,在冒号后,不能在花括号里面!
private:    A a;
};void main()
{    B b;
}

 5、为什么要定义Mat_类

在读取矩阵元素是,以获取矩阵某行的地址时,需要指定数据类型。这样首先需要不停地写“”,让人感觉很繁琐,在繁琐和烦躁中容易犯错,如下面代码中的错误,用at()获取矩阵元素时错误的使用了double类型。这种错误不是语法错误,因此在编译时编译器不会提醒。在程序运行时,at()函数获取到的不是期望的(i,j)位置处的元素,数据已经越界,但是运行时也未必会报错。这样的错误使得你的程序忽而看上去正常,忽而弹出“段错误”,特别是在代码规模很大时,难以查错。

如果使用Mat_类,那么就可以在变量声明时确定元素的类型,访问元素时不再需要制定元素类型,即使得代码简洁,又减少了出错的可能性。上面代码可以用Mat_实现,实现代码如下面例程里的第二个双重for循环。

#include 
#include "opencv2/opencv.hpp"
#includeusing namespace std;
using namespace cv;int main(int argc,char* argv[])
{Mat M(600,800,CV_8UC1);for(int i=0;i(i);for(int j=0;j(i,j)=d1;double d2=M.at(i,j);}}//在变量声明时,指定矩阵元素类型Mat_ M1=(Mat_&)M;for(int i=0;i

二、实例裂缝检测源码

这里的代码来自(表示感谢):https://blog.csdn.net/FunnyWhiteCat/article/details/81387561

首先看原图:

 处理流程思路:

  1. 图像灰度化
  2. 增加对比度
  3. Canny边缘检测
  4. 用形态学连接临近裂缝
  5. 找出所有连通域,删除非裂缝噪点区域
  6. 对每个连通域提取骨架,测量长度和宽度

源码:

#include 
#include 
#include using namespace cv;
using namespace std;//c++中在一个类中定义另一个只有带参数构造函数的类的对象
class CrackInfo
{public:CrackInfo(Point& position, long length, float width) {};
};/* 增加对比度 */
void addContrast(Mat & srcImg);
/* 交换两个Mat */
void swapMat(Mat & srcImg, Mat & dstImg);
/* 二值化图像。0->0,非0->255 */
void binaryzation(Mat & srcImg);
/* 检测连通域,并删除不符合条件的连通域 */
void findConnectedDomain(Mat & srcImg, vector>& connectedDomains, int area, int WHRatio);
/* 提取连通域的骨架 */
void thinImage(Mat & srcImg);
/* 获取图像中白点的数量 */
void getWhitePoints(Mat &srcImg, vector& domain);
/* 计算宽高信息的放置位置 */
Point calInfoPosition(int imgRows, int imgCols, int padding, const std::vector& domain);int main(int argc, char** argv) {Mat srcImg = imread("./image/20180803215201452.jpg");Mat dstImg, dstImg2;//灰度化cvtColor(srcImg, dstImg, CV_BGR2GRAY, 1);//增加对比度addContrast(dstImg);//图像交换swapMat(srcImg, dstImg);//边缘检测Canny(srcImg, dstImg, 50, 150);//形态学变换Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));dilate(dstImg, dstImg, kernel);//膨胀morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel, Point(-1, -1), 3);morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel);//寻找连通域vector> connectedDomains;findConnectedDomain(dstImg, connectedDomains, 20, 3);kernel = getStructuringElement(MORPH_ELLIPSE, Size(7, 7));morphologyEx(dstImg, dstImg, CV_MOP_CLOSE, kernel, Point(-1, -1), 5);connectedDomains.clear();findConnectedDomain(dstImg, connectedDomains, 20, 3);kernel = getStructuringElement(MORPH_CROSS, Size(3, 3));morphologyEx(dstImg, dstImg, CV_MOP_OPEN, kernel);kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));erode(dstImg, dstImg, kernel);connectedDomains.clear();findConnectedDomain(dstImg, connectedDomains, 20, 3);cout << "开始测量" << endl;cout << "连通域数量:" << connectedDomains.size() << endl;Mat lookUpTable(1, 256, CV_8U, Scalar(0));vector crackInfos;for (auto domain_it = connectedDomains.begin(); domain_it != connectedDomains.end(); ++domain_it) {LUT(dstImg, lookUpTable, dstImg);for (auto point_it = domain_it->cbegin(); point_it != domain_it->cend(); ++point_it) {dstImg.ptr(point_it->y)[point_it->x] = 255;}double area = (double)domain_it->size();thinImage(dstImg);getWhitePoints(dstImg, *domain_it);long length = (long)domain_it->size();Point position = calInfoPosition(dstImg.rows, dstImg.cols, 50, *domain_it);crackInfos.push_back(CrackInfo(position, length, (float)(area / length)));}cout << "开始绘制信息" << endl;cout << "信息数量:" << crackInfos.size() << endl;LUT(dstImg, lookUpTable, dstImg);for (auto domain_it = connectedDomains.cbegin(); domain_it != connectedDomains.cend(); ++domain_it) {for (auto point_it = domain_it->cbegin(); point_it != domain_it->cend(); ++point_it) {dstImg.ptr(point_it->y)[point_it->x] = 255;}}//ostringstream info;//for (auto it = crackInfos.cbegin(); it != crackInfos.cend(); ++it) {//	info.str("");//	info << *it;//	putText(dstImg, info.str(), it->Position, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255));//}imwrite("result1.png", dstImg);cout << "保存图像完成" << endl;return 0;
}/*利用查找表(Look-up table)增加图像对比度*/
void addContrast(Mat & srcImg) {Mat lookUpTable(1, 256, CV_8U);double temp = pow(1.1, 5);uchar* p = lookUpTable.data;for (int i = 0; i < 256; ++i)p[i] = saturate_cast(i * temp);LUT(srcImg, lookUpTable, srcImg);
}
/*图像交换*/
void swapMat(Mat & srcImg, Mat & dstImg) {Mat tempImg = srcImg;srcImg = dstImg;dstImg = tempImg;
}
/* 检测连通域,并删除不符合条件的连通域 */
void findConnectedDomain(Mat & srcImg, vector>& connectedDomains, int area, int WHRatio) {Mat_ tempImg = (Mat_ &)srcImg;for (int i = 0; i < tempImg.rows; ++i) {uchar* row = tempImg.ptr(i);    调取存储图像内存的第i行的指针for (int j = 0; j < tempImg.cols; ++j) {if (row[j] == 255) {stack connectedPoints;vector domain;connectedPoints.push(Point(j, i));while (!connectedPoints.empty()) {Point currentPoint = connectedPoints.top();domain.push_back(currentPoint);int colNum = currentPoint.x;int rowNum = currentPoint.y;tempImg.ptr(rowNum)[colNum] = 0;connectedPoints.pop();if (rowNum - 1 >= 0 && colNum - 1 >= 0 && tempImg.ptr(rowNum - 1)[colNum - 1] == 255) {tempImg.ptr(rowNum - 1)[colNum - 1] = 0;connectedPoints.push(Point(colNum - 1, rowNum - 1));}if (rowNum - 1 >= 0 && tempImg.ptr(rowNum - 1)[colNum] == 255) {tempImg.ptr(rowNum - 1)[colNum] = 0;connectedPoints.push(Point(colNum, rowNum - 1));}if (rowNum - 1 >= 0 && colNum + 1 < tempImg.cols && tempImg.ptr(rowNum - 1)[colNum + 1] == 255) {tempImg.ptr(rowNum - 1)[colNum + 1] = 0;connectedPoints.push(Point(colNum + 1, rowNum - 1));}if (colNum - 1 >= 0 && tempImg.ptr(rowNum)[colNum - 1] == 255) {tempImg.ptr(rowNum)[colNum - 1] = 0;connectedPoints.push(Point(colNum - 1, rowNum));}if (colNum + 1 < tempImg.cols && tempImg.ptr(rowNum)[colNum + 1] == 255) {tempImg.ptr(rowNum)[colNum + 1] = 0;connectedPoints.push(Point(colNum + 1, rowNum));}if (rowNum + 1 < tempImg.rows && colNum - 1 > 0 && tempImg.ptr(rowNum + 1)[colNum - 1] == 255) {tempImg.ptr(rowNum + 1)[colNum - 1] = 0;connectedPoints.push(Point(colNum - 1, rowNum + 1));}if (rowNum + 1 < tempImg.rows && tempImg.ptr(rowNum + 1)[colNum] == 255) {tempImg.ptr(rowNum + 1)[colNum] = 0;connectedPoints.push(Point(colNum, rowNum + 1));}if (rowNum + 1 < tempImg.rows && colNum + 1 < tempImg.cols && tempImg.ptr(rowNum + 1)[colNum + 1] == 255) {tempImg.ptr(rowNum + 1)[colNum + 1] = 0;connectedPoints.push(Point(colNum + 1, rowNum + 1));}}if (domain.size() > area) {RotatedRect rect = minAreaRect(domain);float width = rect.size.width;float height = rect.size.height;if (width < height) {float temp = width;width = height;height = temp;}if (width > height * WHRatio && width > 50) {for (auto cit = domain.begin(); cit != domain.end(); ++cit) {tempImg.ptr(cit->y)[cit->x] = 250;}connectedDomains.push_back(domain);}}}}}binaryzation(srcImg);
}
/* 二值化图像。0->0,非0->255 */
void binaryzation(Mat & srcImg) {Mat lookUpTable(1, 256, CV_8U, Scalar(255));lookUpTable.data[0] = 0;LUT(srcImg, lookUpTable, srcImg);
}
/* 提取连通域的骨架 */
void thinImage(Mat & srcImg) {vector deleteList;int neighbourhood[9];int nl = srcImg.rows;int nc = srcImg.cols;bool inOddIterations = true;while (true) {for (int j = 1; j < (nl - 1); j++) {uchar* data_last = srcImg.ptr(j - 1);uchar* data = srcImg.ptr(j);uchar* data_next = srcImg.ptr(j + 1);for (int i = 1; i < (nc - 1); i++) {if (data[i] == 255) {int whitePointCount = 0;neighbourhood[0] = 1;if (data_last[i] == 255) neighbourhood[1] = 1;else  neighbourhood[1] = 0;if (data_last[i + 1] == 255) neighbourhood[2] = 1;else  neighbourhood[2] = 0;if (data[i + 1] == 255) neighbourhood[3] = 1;else  neighbourhood[3] = 0;if (data_next[i + 1] == 255) neighbourhood[4] = 1;else  neighbourhood[4] = 0;if (data_next[i] == 255) neighbourhood[5] = 1;else  neighbourhood[5] = 0;if (data_next[i - 1] == 255) neighbourhood[6] = 1;else  neighbourhood[6] = 0;if (data[i - 1] == 255) neighbourhood[7] = 1;else  neighbourhood[7] = 0;if (data_last[i - 1] == 255) neighbourhood[8] = 1;else  neighbourhood[8] = 0;for (int k = 1; k < 9; k++) {whitePointCount = whitePointCount + neighbourhood[k];}if ((whitePointCount >= 2) && (whitePointCount <= 6)) {int ap = 0;if ((neighbourhood[1] == 0) && (neighbourhood[2] == 1)) ap++;if ((neighbourhood[2] == 0) && (neighbourhood[3] == 1)) ap++;if ((neighbourhood[3] == 0) && (neighbourhood[4] == 1)) ap++;if ((neighbourhood[4] == 0) && (neighbourhood[5] == 1)) ap++;if ((neighbourhood[5] == 0) && (neighbourhood[6] == 1)) ap++;if ((neighbourhood[6] == 0) && (neighbourhood[7] == 1)) ap++;if ((neighbourhood[7] == 0) && (neighbourhood[8] == 1)) ap++;if ((neighbourhood[8] == 0) && (neighbourhood[1] == 1)) ap++;if (ap == 1) {if (inOddIterations && (neighbourhood[3] * neighbourhood[5] * neighbourhood[7] == 0)&& (neighbourhood[1] * neighbourhood[3] * neighbourhood[5] == 0)) {deleteList.push_back(Point(i, j));}else if (!inOddIterations && (neighbourhood[1] * neighbourhood[5] * neighbourhood[7] == 0)&& (neighbourhood[1] * neighbourhood[3] * neighbourhood[7] == 0)) {deleteList.push_back(Point(i, j));}}}}}}if (deleteList.size() == 0)break;for (size_t i = 0; i < deleteList.size(); i++) {Point tem;tem = deleteList[i];uchar* data = srcImg.ptr(tem.y);data[tem.x] = 0;}deleteList.clear();inOddIterations = !inOddIterations;}
}
/* 获取图像中白点的数量 */
void getWhitePoints(Mat &srcImg, vector& domain) {domain.clear();Mat_ tempImg = (Mat_ &)srcImg;for (int i = 0; i < tempImg.rows; i++) {uchar * row = tempImg.ptr(i);for (int j = 0; j < tempImg.cols; ++j) {if (row[j] != 0)domain.push_back(Point(j, i));}}
}
/* 计算宽高信息的放置位置 */
Point calInfoPosition(int imgRows, int imgCols, int padding, const std::vector& domain) {long xSum = 0;long ySum = 0;for (auto it = domain.cbegin(); it != domain.cend(); ++it) {xSum += it->x;ySum += it->y;}int x = 0;int y = 0;x = (int)(xSum / domain.size());y = (int)(ySum / domain.size());if (x < padding)x = padding;if (x > imgCols - padding)x = imgCols - padding;if (y < padding)y = padding;if (y > imgRows - padding)y = imgRows - padding;return cv::Point(x, y);
}

处理结果:

重叠结果

待优化......

 


本文来自互联网用户投稿,文章观点仅代表作者本人,不代表本站立场,不承担相关法律责任。如若转载,请注明出处。 如若内容造成侵权/违法违规/事实不符,请点击【内容举报】进行投诉反馈!

相关文章

立即
投稿

微信公众账号

微信扫一扫加关注

返回
顶部