完善资料让更多小伙伴认识你,还能领取20积分哦, 立即完善>
` 这一章主要讲使用双目摄像头进行测距,具体的过程分为 1.摄像头标定 2 设定参考物距离 3实际测量 我们这次使用的openCv3 + Qt5 + ubuntu linux的操作系统作为开发环境。 如果摄像头要进行测距,必须对摄像头进行标定,主要是为了消除摄像头产生的畸变,消除畸变以及得到内外参数矩阵,内参数矩阵可以理解为焦距相关,它是一个从平面到像素的转换,焦距不变它就不变,所以确定以后就可以重复使用,而外参数矩阵反映的是摄像机坐标系与世界坐标系的转换。而目前主流的测距方案也是通过仪器出厂的时候对其进行校准/固定住摄像头,并且保证设备垂直于被测试物,从而保证测试的图像畸变。 有人会说我看到的摄像头是平行啊,那的确是平面不过,任何物品都没办法保证绝对的垂直,所以很多高精度的摄像头都不例外。目前我们得到的这款USB摄像头也是用螺丝固定在PCB上的,也是无法保证的。而这部的校准工作则一般是由使用者进行。具体图示见下图 那下面一个问题来了,如何校准 因为我们使用的是opencv,所以我们要用opencv来进行这部分的操作。而opencv则开发了他的一个校准cpp文件,虽然这个是单目的。但是同样可以用在双目摄像头校准。而校准以后将会得到一个校准后的2个系数文件yml的文件(一个左摄像头,一个右摄像头) #include"opencv2/core.hpp" #include #include"opencv2/imgproc.hpp" #include"opencv2/calib3d.hpp" #include"opencv2/imgcodecs.hpp" #include"opencv2/videoio.hpp" #include"opencv2/highgui.hpp" #include #include #include #include usingnamespace cv; usingnamespace std; const char *usage = " example command line for calibration from a live feed. " " calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe " " " "example command line for calibration from a list of stored images: " " imagelist_creator image_list.xml*.png " " calibration -w=4 -h=5 -s=0.025 -o=camera.yml-op -oe image_list.xml " " whereimage_list.xml is the standard OpenCV XML/YAML " " useimagelist_creator to create the xml or yaml list " " fileconsisting of the list of strings, e.g.: " " " " " " " " " "view000.png " "view001.png " " " "view003.png " "view010.png " "one_extra_view.jpg " " " " "; const char*liveCaptureHelp = "When the live video from camera isused as input, the following hot-keys may be used: " " " " 'g' - start capturing images " " 'u' - switch undistortion on/off "; static voidhelp(char** argv) { printf( "This is a camera calibrationsample. " "Usage: %s " " -w= " " -h= " " [-pt= " " [-n= " " # (if notspecified, it will be set to the number " " # of board views actually available) " " [-d= " " # (used only forvideo capturing) " " [-s= " " [-o= " " [-op] # write detected featurepoints " " [-oe] # writeextrinsic parameters " " [-zt] # assume zero tangentialdistortion " " [-a= " " [-p] # fix the principal pointat the center " " [-v] # flip the captured imagesaround the horizontal axis " " [-V] # use a video file, andnot an image list, uses " " # [input_data] string for the videofile name " " [-su] # show undistorted imagesafter calibration " " [input_data] # input data, one of thefollowing: " " # -text file with a list of the images of the board " " # the text file can be generated withimagelist_creator " " # - name of video file with a video of theboard " " # if input_data not specified, a live view from the camera isused " " ", argv[0] ); printf(" %s",usage); printf( " %s", liveCaptureHelp); } enum {DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 }; enum Pattern{ CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID }; staticdouble computeReprojectionErrors( const vector const vector const vector const Mat& cameraMatrix, constMat& distCoeffs, vector { vector int i, totalPoints = 0; double totalErr = 0, err; perViewErrors.resize(objectPoints.size()); for( i = 0; i <(int)objectPoints.size(); i++ ) { projectPoints(Mat(objectPoints),rvecs, tvecs, cameraMatrix, distCoeffs,imagePoints2); err = norm(Mat(imagePoints),Mat(imagePoints2), NORM_L2); int n = (int)objectPoints.size(); perViewErrors =(float)std::sqrt(err*err/n); totalErr += err*err; totalPoints += n; } return std::sqrt(totalErr/totalPoints); } static voidcalcChes***oardCorners(Size boardSize, float squareSize,vector { corners.resize(0); switch(patternType) { case CHESSBOARD: case CIRCLES_GRID: for( int i = 0; i for( int j = 0; j corners.push_back(Point3f(float(j*squareSize), float(i*squareSize), 0)); break; case ASYMMETRIC_CIRCLES_GRID: for( int i = 0; i for( int j = 0; j corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0)); break; default: CV_Error(Error::StsBadArg,"Unknown pattern type "); } } static boolrunCalibration( vector Size imageSize, SizeboardSize, Pattern patternType, float squareSize, float aspectRatio, int flags, Mat&cameraMatrix, Mat& distCoeffs, vector vector double& totalAvgErr) { cameraMatrix = Mat::eye(3, 3, CV_64F); if( flags & CALIB_FIX_ASPECT_RATIO ) cameraMatrix.at distCoeffs = Mat::zeros(8, 1, CV_64F); vector calcChes***oardCorners(boardSize,squareSize, objectPoints[0], patternType); objectPoints.resize(imagePoints.size(),objectPoints[0]); double rms = calibrateCamera(objectPoints,imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs,flags|CALIB_FIX_K4|CALIB_FIX_K5); ///*|CALIB_FIX_K3*/|CALIB_FIX_K4|CALIB_FIX_K5); printf("RMS error reported bycalibrateCamera: %g ", rms); bool ok = checkRange(cameraMatrix)&& checkRange(distCoeffs); totalAvgErr =computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix,distCoeffs, reprojErrs); return ok; } static voidsaveCameraParams( const string& filename, Size imageSize, SizeboardSize, float squareSize, floataspectRatio, int flags, const Mat&cameraMatrix, const Mat& distCoeffs, const vector constvector constvector double totalAvgErr ) { FileStorage fs( filename,FileStorage::WRITE ); time_t tt; time( &tt ); struct tm *t2 = localtime( &tt ); char buf[1024]; strftime( buf, sizeof(buf)-1,"%c", t2 ); fs << "calibration_time"<< buf; if( !rvecs.empty() || !reprojErrs.empty() ) fs << "nframes"<< (int)std::max(rvecs.size(), reprojErrs.size()); fs << "image_width"<< imageSize.width; fs << "image_height"<< imageSize.height; fs << "board_width"<< boardSize.width; fs << "board_height"<< boardSize.height; fs << "square_size"<< squareSize; if( flags & CALIB_FIX_ASPECT_RATIO ) fs << "aspectRatio"<< aspectRatio; if( flags != 0 ) { sprintf( buf, "flags:%s%s%s%s", flags &CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "", flags & CALIB_FIX_ASPECT_RATIO? "+fix_aspectRatio" : "", flags &CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "", flags & CALIB_ZERO_TANGENT_DIST? "+zero_tangent_dist" : "" ); //cvWriteComment( *fs, buf, 0 ); } fs << "flags" < fs << "camera_matrix"<< cameraMatrix; fs <<"distortion_coefficients" << distCoeffs; fs <<"avg_reprojection_error" << totalAvgErr; if( !reprojErrs.empty() ) fs <<"per_view_reprojection_errors" << Mat(reprojErrs); if( !rvecs.empty() &&!tvecs.empty() ) { CV_Assert(rvecs[0].type() ==tvecs[0].type()); Mat bigmat((int)rvecs.size(), 6, rvecs[0].type()); for( int i = 0; i <(int)rvecs.size(); i++ ) { Mat r = bigmat(Range(i, i+1),Range(0,3)); Mat t = bigmat(Range(i, i+1),Range(3,6)); CV_Assert(rvecs.rows == 3&& rvecs.cols == 1); CV_Assert(tvecs.rows == 3&& tvecs.cols == 1); //*.t() is MatExpr (not Mat) so wecan use assignment operator r = rvecs.t(); t = tvecs.t(); } //cvWriteComment( *fs, "a set of6-tuples (rotation vector + translation vector) for each view", 0 ); fs <<"extrinsic_parameters" << bigmat; } if( !imagePoints.empty() ) { Mat imagePtMat((int)imagePoints.size(),(int)imagePoints[0].size(), CV_32FC2); for( int i = 0; i <(int)imagePoints.size(); i++ ) { Mat r =imagePtMat.row(i).reshape(2, imagePtMat.cols); Mat imgpti(imagePoints); imgpti.copyTo(r); } fs << "image_points"<< imagePtMat; } } static boolreadStringList( const string& filename, vector { l.resize(0); FileStorage fs(filename,FileStorage::READ); if( !fs.isOpened() ) return false; size_t dir_pos = filename.rfind('/'); if (dir_pos == string::npos) dir_pos = filename.rfind(''); FileNode n = fs.getFirstTopLevelNode(); if( n.type() != FileNode::SEQ ) return false; FileNodeIterator it = n.begin(), it_end =n.end(); for( ; it != it_end; ++it ) { string fname = (string)*it; if (dir_pos != string::npos) { string fpath =samples::findFile(filename.substr(0, dir_pos + 1) + fname, false); if (fpath.empty()) { fpath =samples::findFile(fname); } fname = fpath; } else { fname = samples::findFile(fname); } l.push_back(fname); } return true; } static boolrunAndSave(const string& outputFilename, constvector Size imageSize, Size boardSize,Pattern patternType, float squareSize, float aspectRatio, int flags,Mat& cameraMatrix, Mat& distCoeffs, boolwriteExtrinsics, bool writePoints ) { vector vector double totalAvgErr = 0; bool ok = runCalibration(imagePoints,imageSize, boardSize, patternType, squareSize, aspectRatio, flags,cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs,totalAvgErr); printf("%s. avg reprojection error =%.2f ", ok ? "Calibrationsucceeded" : "Calibration failed", totalAvgErr); if( ok ) saveCameraParams( outputFilename,imageSize, boardSize, squareSize,aspectRatio, flags, cameraMatrix,distCoeffs, writeExtrinsics ?rvecs : vector writeExtrinsics ?tvecs : vector writeExtrinsics ?reprojErrs : vector writePoints ?imagePoints : vector totalAvgErr ); return ok; } int main(int argc, char** argv ) { Size boardSize, imageSize; float squareSize, aspectRatio; Mat cameraMatrix, distCoeffs; string outputFilename; string inputFilename = ""; int i, nframes; bool writeExtrinsics, writePoints; bool undistortImage = false; int flags = 0; VideoCapture capture; bool flipVertical; bool showUndistorted; bool videofile; int delay; clock_t prevTimestamp = 0; int mode = DETECTION; int cameraId = 2; vector vector Pattern pattern = CHESSBOARD; cv::CommandLineParser parser(argc, argv, "{help||}{w||}{h||}{pt|ches***oard|}{n|10|}{d|1000|}{s|1|}{o|out_camera_data.yml|}" "{op||}{oe||}{zt||}{a|1|}{p||}{v||}{V||}{su||}" "{@input_data|0|}"); if (parser.has("help")) { help(argv); return 0; } boardSize.width = parser.get boardSize.height = parser.get if ( parser.has("pt") ) { string val = parser.get if( val == "circles" ) pattern = CIRCLES_GRID; else if( val == "acircles" ) pattern = ASYMMETRIC_CIRCLES_GRID; else if( val == "ches***oard") pattern = CHESSBOARD; else return fprintf( stderr,"Invalid pattern type: must be ches***oard or circles " ), -1; } squareSize =parser.get nframes =parser.get aspectRatio =parser.get delay = parser.get writePoints = parser.has("op"); writeExtrinsics =parser.has("oe"); if (parser.has("a")) flags |= CALIB_FIX_ASPECT_RATIO; if ( parser.has("zt") ) flags |= CALIB_ZERO_TANGENT_DIST; if ( parser.has("p") ) flags |= CALIB_FIX_PRINCIPAL_POINT; flipVertical = parser.has("v"); videofile = parser.has("V"); if ( parser.has("o") ) outputFilename =parser.get showUndistorted =parser.has("su"); if (isdigit(parser.get cameraId = 2; else inputFilename =parser.get if (!parser.check()) { help(argv); parser.printErrors(); return -1; } if ( squareSize <= 0 ) return fprintf( stderr, "Invalidboard square width " ), -1; if ( nframes <= 3 ) return printf("Invalid number ofimages " ), -1; if ( aspectRatio <= 0 ) return printf( "Invalid aspectratio " ), -1; if ( delay <= 0 ) return printf( "Invaliddelay " ), -1; if ( boardSize.width <= 0 ) return fprintf( stderr, "Invalidboard width " ), -1; if ( boardSize.height <= 0 ) return fprintf( stderr, "Invalidboard height " ), -1; if( !inputFilename.empty() ) { if( !videofile &&readStringList(samples::findFile(inputFilename), imageList) ) mode = CAPTURING; else capture.open(samples::findFileOrKeep(inputFilename)); } else capture.open(cameraId); if( !capture.isOpened() &&imageList.empty() ) return fprintf( stderr, "Could notinitialize video (%d) capture ",cameraId ), -2; if( !imageList.empty() ) nframes = (int)imageList.size(); if( capture.isOpened() ) printf( "%s", liveCaptureHelp); namedWindow( "Image View", 1 ); for(i = 0;;i++) { Mat view, viewGray; bool blink = false; if( capture.isOpened() ) { Mat view0; capture >> view0; view0.copyTo(view); } else if( i < (int)imageList.size() ) view = imread(imageList, 1); if(view.empty()) { if( imagePoints.size() > 0 ) runAndSave(outputFilename,imagePoints, imageSize, boardSize, pattern,squareSize, aspectRatio, flags, cameraMatrix,distCoeffs, writeExtrinsics,writePoints); break; } imageSize = view.size(); if( flipVertical ) flip( view, view, 0 ); vector cvtColor(view, viewGray,COLOR_BGR2GRAY); bool found; switch( pattern ) { case CHESSBOARD: found = findChes***oardCorners(view, boardSize, pointbuf, CALIB_CB_ADAPTIVE_THRESH |CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE); break; case CIRCLES_GRID: found = findCirclesGrid( view,boardSize, pointbuf ); break; case ASYMMETRIC_CIRCLES_GRID: found = findCirclesGrid( view,boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID ); break; default: return fprintf( stderr, "Unknownpattern type " ), -1; } // improve the found corners' coordinateaccuracy if( pattern == CHESSBOARD &&found) cornerSubPix( viewGray, pointbuf, Size(11,11), Size(-1,-1), TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 30, 0.1 )); if( mode == CAPTURING && found&& (!capture.isOpened() || clock() -prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) ) { imagePoints.push_back(pointbuf); prevTimestamp = clock(); blink = capture.isOpened(); } if(found) drawChes***oardCorners( view,boardSize, Mat(pointbuf), found ); string msg = mode == CAPTURING ?"100/100" : mode == CALIBRATED ?"Calibrated" : "Press 'g' to start"; int baseLine = 0; Size textSize = getTextSize(msg, 1, 1,1, &baseLine); Point textOrigin(view.cols - 2*textSize.width- 10, view.rows - 2*baseLine - 10); if( mode == CAPTURING ) { if(undistortImage) msg = format( "%d/%dUndist", (int)imagePoints.size(), nframes ); else msg = format("%d/%d", (int)imagePoints.size(), nframes ); } putText( view, msg, textOrigin, 1, 1, mode != CALIBRATED ?Scalar(0,0,255) : Scalar(0,255,0)); if( blink ) bitwise_not(view, view); if( mode == CALIBRATED &&undistortImage ) { Mat temp = view.clone(); undistort(temp, view, cameraMatrix,distCoeffs); } imshow("Image View", view); char key =(char)waitKey(capture.isOpened() ? 50 : 500); if( key == 27 ) break; if( key == 'u' && mode ==CALIBRATED ) undistortImage = !undistortImage; if( capture.isOpened() && key== 'g' ) { mode = CAPTURING; imagePoints.clear(); } if( mode == CAPTURING &&imagePoints.size() >= (unsigned)nframes ) { if( runAndSave(outputFilename,imagePoints, imageSize, boardSize, pattern,squareSize, aspectRatio, flags, cameraMatrix,distCoeffs, writeExtrinsics,writePoints)) mode = CALIBRATED; else mode = DETECTION; if( !capture.isOpened() ) break; } } if( !capture.isOpened() &&showUndistorted ) { Mat view, rview, map1, map2; initUndistortRectifyMap(cameraMatrix,distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1,imageSize, 0), imageSize,CV_16SC2, map1, map2); for( i = 0; i <(int)imageList.size(); i++ ) { view = imread(imageList, 1); if(view.empty()) continue; //undistort( view, rview,cameraMatrix, distCoeffs, cameraMatrix ); remap(view, rview, map1, map2,INTER_LINEAR); imshow("Image View",rview); char c = (char)waitKey(); if( c == 27 || c == 'q' || c == 'Q') break; } } return 0; } 运行程序:cal -w6 -h 8 -s 2 -n 10 // 具体的意义可以查看代码中的注释 这样我们就得到了2个摄像头的校准系数。 而校准的目标则是一个黑白间隔的棋盘。 校准完成之后我们就进入检测阶段。下面这个图就简单解释了检测原理 这个图简单的说明了双目测距的基本原理,就是想方设法求出距离Z。 右下角Z的那个等式右边的参数中: f是每个摄像头自己的焦距,也就是传感器到镜头之间的距离。 T是两个摄像头的镜头之间的距离,这些都是确定的。 d是不确定的,d是一个物体在分别两个传感器上所成的像,也就是xl和xr之间的距离,是个变量。 所以为了得出距离,每次就需要获得d的值,之后根据相似三角形原理就可以求出Z。 那么在高精度的测量,你首先要面对的就是进行被测试标靶之间的距离标定。因为我们的摄像头的高度是固定的(也必须要固定, 如果要上下移动,则需要对多点标定,这里就不多做演示了),所以我们需要一个标准的物品,比如一个一元的硬币。而实际的产品开发中,这个校准物品的精度与表面洁净程度也是非常高,这样才能减少对测试数据的校准。 而在这里,我们只使用1元硬币作为参考,仅作为演示 检测方法很简单,边缘检测圆形,转成黑白图像(注意,如果要更高的精度,底层最好是有强烈反差的这样边缘更好检测,我用的是黑色) int main(intargc, char** argv) { const char* filename = "target.jpg"; Mat img = imread(filename); if (img.empty()) { cout << "can not open "<< filename << endl; return -1; } Mat img3,img2,img4; //cvtColor(img, cimg, COLOR_GRAY2BGR); cvtColor(img, img2, COLOR_BGR2GRAY); //把彩色图转换为黑白图像 GaussianBlur(img2, img2, Size(9, 9), 2, 2); threshold(img2, img3, 90, 255,THRESH_BINARY); //图像二值化,,注意阈值变化 namedWindow("detecte circles",CV_NORMAL); imshow("detecte circles", img3); Canny(img3, img3, 50, 100);//边缘检测 namedWindow("detect circles",CV_NORMAL); imshow("detect circles", img3); vector vector findContours(img3, contours, hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);//查找出所有的圆边界 int index = 0; for (; index >= 0; index =hierarchy[index][0]) { Scalar color(rand() & 255, rand()& 255, rand() & 255); drawContours(img, contours, index,color, CV_FILLED, 8, hierarchy); } namedWindow("detected circles",CV_NORMAL); imshow("detected circles", img); //标准圆在图片上一般是椭圆,所以采用OpenCV中拟合椭圆的方法求解中心 Mat pointsf; Mat(contours[0]).convertTo(pointsf,CV_32F); RotatedRect box = fitEllipse(pointsf); cout << box.center; waitKey(); return 0; } 这样我们就得到一个像素值。就是我们从摄像头中获得的镜头尺寸。而RMB硬币的直径又是已知的,网上可以查到,那么我们就可以得到一个实际尺寸和摄像头检测项目的比例,再加上固定的距离D,就可以得到一个距离比例尺。 当然这里需要注意,检测的需要依次对2个摄像头进行检测。而且位置需要固定,至于为什么,理由也很简单,角度的偏移和摄像头的畸变都会导致校准系数偏差。而后期我们在实际测量中,也应该保证靠近这个范围内。如果你说硬要在视场中对所有位置一视同仁,那么你需要进行n次的算法比较,进度在不同的位置对应的系数,而这个系数一般来说近似于一个线性,而由于这部分比较复杂和消耗时间,所以我在这里就不做过多采样和计算。只要把被测物放在我们校准物的附近就可以 那么到了现在,一切似乎都是水到渠成,那么开始我们的最后的计算。具体的结果如下图所示: |
|
相关推荐
|
|
只有小组成员才能发言,加入小组>>
132个成员聚集在这个小组
加入小组【大联大品佳 NXP i.MX RT1050试用体验】 MCUXpresso IDE+语音识别(Tensorflow)测试
2558 浏览 0 评论
【大联大世平ON Semiconductor BLE5.0 RSL10开发板试用体验】+ 蓝牙键盘试验
2579 浏览 0 评论
【大联大友尚安森美半导体感光芯片USB双目摄像头模组试用体验】敏感人群自动追踪系统开发
3074 浏览 1 评论
【大联大品佳 Microchip PIC16F15324开发板试用体验】+初识及上电
2146 浏览 0 评论
【大联大世平ON Semiconductor BLE5.0 RSL10开发板试用体验】安森美IDE环境程序烧写失败的解决办法
1952 浏览 0 评论
小黑屋| 手机版| Archiver| 电子发烧友 ( 湘ICP备2023018690号 )
GMT+8, 2024-11-16 10:17 , Processed in 0.724915 second(s), Total 69, Slave 51 queries .
Powered by 电子发烧友网
© 2015 bbs.elecfans.com
关注我们的微信
下载发烧友APP
电子发烧友观察
版权所有 © 湖南华秋数字科技有限公司
电子发烧友 (电路图) 湘公网安备 43011202000918 号 电信与信息服务业务经营许可证:合字B2-20210191 工商网监 湘ICP备2023018690号