diff --git a/calibration_with_blender/CMakeLists.txt b/calibration_with_blender/CMakeLists.txt new file mode 100644 index 0000000..2912c0c --- /dev/null +++ b/calibration_with_blender/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.16) + +project(calibration_with_blender VERSION 0.1.0) + +#if(NOT EXISTS "${CMAKE_BINARY_DIR}/conan.cmake") +# message(STATUS "Downloading conan.cmake from https://github.com/conan-io/cmake-conan") +# file(DOWNLOAD "https://raw.githubusercontent.com/conan-io/cmake-conan/0.18.1/conan.cmake" +# "${CMAKE_BINARY_DIR}/conan.cmake" +# TLS_VERIFY ON) +#endif() +# +#include(${CMAKE_BINARY_DIR}/conan.cmake) +# +#conan_cmake_configure(REQUIRES opencv/4.5.5 +# GENERATORS cmake_find_package) +# +#conan_cmake_autodetect(settings) +# +#conan_cmake_install(PATH_OR_REFERENCE . +# BUILD missing +# REMOTE conancenter +# SETTINGS ${settings}) + +find_package(OpenCV REQUIRED) + +add_executable(image_distort image_distort.cpp) +add_executable(calibration_benchmark calibration_benchmark.cpp) + +target_link_libraries(image_distort PRIVATE opencv_highgui opencv_calib3d opencv_imgproc) +target_link_libraries(calibration_benchmark PRIVATE opencv_highgui opencv_calib3d opencv_imgproc) diff --git a/calibration_with_blender/README.md b/calibration_with_blender/README.md new file mode 100644 index 0000000..8ce1229 --- /dev/null +++ b/calibration_with_blender/README.md @@ -0,0 +1,9 @@ +# Calibration with blender +### Create synth images with calibration pattern by blender: +* Open calibration.blend in blender +* Copy render.py content to blender text editor +* Run script in blender +* Distort images by image_distort.cpp + +### Pattern generation command +gen_pattern.py -c 14 -r 19 -T checkerboard -u px -s 220 -w 3508 -h 4961 && convert out.svg checkerboard.png diff --git a/calibration_with_blender/calibration.blend b/calibration_with_blender/calibration.blend new file mode 100644 index 0000000..c6cb1bd Binary files /dev/null and b/calibration_with_blender/calibration.blend differ diff --git a/calibration_with_blender/calibration_benchmark.cpp b/calibration_with_blender/calibration_benchmark.cpp new file mode 100644 index 0000000..8fd3a7b --- /dev/null +++ b/calibration_with_blender/calibration_benchmark.cpp @@ -0,0 +1,684 @@ +#include "opencv2/core.hpp" +#include +#include "opencv2/imgproc.hpp" +#include "opencv2/3d.hpp" +#include "opencv2/calib.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/highgui.hpp" + +#include +#include +#include +#include +#include +#include + +using namespace cv; +using namespace std; + +enum +{ + DETECTION = 0, + CAPTURING = 1, + CALIBRATED = 2 +}; +enum Pattern +{ + CHESSBOARD, + CIRCLES_GRID, + ASYMMETRIC_CIRCLES_GRID, + RADON +}; + +static bool readStringList(const string& filename, vector& l); + +static bool runAndSave(const string& outputFilename, const vector>& imagePoints, + Size imageSize, Size boardSize, Pattern patternType, float squareSize, + float grid_width, bool release_object, float aspectRatio, int flags, + Mat& cameraMatrix, Mat& distCoeffs, bool writeExtrinsics, bool writePoints, + bool writeGrid); + +const char* usage = " \nexample command line for calibration from a live feed.\n" + " calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe\n" + " \n" + " example command line for calibration from a list of stored images:\n" + " imagelist_creator image_list.xml *.png\n" + " calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe image_list.xml\n" + " where image_list.xml is the standard OpenCV XML/YAML\n" + " use imagelist_creator to create the xml or yaml list\n" + " file consisting of the list of strings, e.g.:\n" + " \n" + "\n" + "\n" + "\n" + "view000.png\n" + "view001.png\n" + "\n" + "view003.png\n" + "view010.png\n" + "one_extra_view.jpg\n" + "\n" + "\n"; + +const char* liveCaptureHelp = + "When the live video from camera is used as input, the following hot-keys may be used:\n" + " , 'q' - quit the program\n" + " 'g' - start capturing images\n" + " 'u' - switch undistortion on/off\n"; + +static void help(char** argv); + +static double computeReprojectionErrors(const vector>& objectPoints, + const vector>& imagePoints, + const vector& rvecs, const vector& tvecs, + const Mat& cameraMatrix, const Mat& distCoeffs, + vector& perViewErrors); + +static void calcChessboardCorners(Size boardSize, float squareSize, vector& corners, + Pattern patternType = CHESSBOARD); + +int main(int argc, char** argv) +{ + Size boardSize, imageSize; + float squareSize, aspectRatio = 1; + Mat cameraMatrix, distCoeffs; + string outputFilename; + string inputFilename; + + int i, nframes; + bool writeExtrinsics, writePoints; + bool undistortImage = false; + int flags = 0; + VideoCapture capture; + bool flipVertical; + bool showUndistorted; + bool videofile; + int delay; + clock_t prevTimestamp = 0; + int mode = DETECTION; + int cameraId = 0; + vector> imagePoints; + vector imageList; + Pattern pattern = CHESSBOARD; + + cv::CommandLineParser parser( + argc, argv, + "{help ||}{w||}{h||}{pt|chessboard|}{n|10|}{d|1000|}{s|1|}{o|out_camera_data.yml|}" + "{op||}{oe||}{zt||}{a||}{p||}{v||}{V||}{su||}" + "{oo||}{ws|11|}{dt||}" + "{@input_data|0|}"); + if (parser.has("help")) + { + help(argv); + return 0; + } + boardSize.width = parser.get("w"); + boardSize.height = parser.get("h"); + if (parser.has("pt")) + { + string val = parser.get("pt"); + if (val == "circles") + pattern = CIRCLES_GRID; + else if (val == "acircles") + pattern = ASYMMETRIC_CIRCLES_GRID; + else if (val == "chessboard") + pattern = CHESSBOARD; + else if (val == "radon") + pattern = RADON; + else + return fprintf(stderr, "Invalid pattern type: must be chessboard or circles\n"), -1; + } + squareSize = parser.get("s"); + nframes = parser.get("n"); + delay = parser.get("d"); + writePoints = parser.has("op"); + writeExtrinsics = parser.has("oe"); + bool writeGrid = parser.has("oo"); + if (parser.has("a")) + { + flags |= CALIB_FIX_ASPECT_RATIO; + aspectRatio = parser.get("a"); + } + if (parser.has("zt")) + flags |= CALIB_ZERO_TANGENT_DIST; + if (parser.has("p")) + flags |= CALIB_FIX_PRINCIPAL_POINT; + flipVertical = parser.has("v"); + videofile = parser.has("V"); + if (parser.has("o")) + outputFilename = parser.get("o"); + showUndistorted = parser.has("su"); + if (isdigit(parser.get("@input_data")[0])) + cameraId = parser.get("@input_data"); + else + inputFilename = parser.get("@input_data"); + int winSize = parser.get("ws"); + float grid_width = squareSize * (boardSize.width - 1); + bool release_object = false; + if (parser.has("dt")) + { + grid_width = parser.get("dt"); + release_object = true; + } + if (!parser.check()) + { + help(argv); + parser.printErrors(); + return -1; + } + + /* Check input parameters */ + if (squareSize <= 0) + return fprintf(stderr, "Invalid board square width\n"), -1; + if (nframes <= 3) + return printf("Invalid number of images\n"), -1; + if (aspectRatio <= 0) + return printf("Invalid aspect ratio\n"), -1; + if (delay <= 0) + return printf("Invalid delay\n"), -1; + if (boardSize.width <= 0) + return fprintf(stderr, "Invalid board width\n"), -1; + if (boardSize.height <= 0) + return fprintf(stderr, "Invalid board height\n"), -1; + + if (!inputFilename.empty()) + { + if (!videofile && readStringList(samples::findFile(inputFilename), imageList)) + mode = CAPTURING; + else + capture.open(samples::findFileOrKeep(inputFilename)); + } + else + capture.open(cameraId); + + if (!capture.isOpened() && imageList.empty()) + return fprintf(stderr, "Could not initialize video (%d) capture\n", cameraId), -2; + + if (!imageList.empty()) + nframes = (int)imageList.size(); + + if (capture.isOpened()) + printf("%s", liveCaptureHelp); + + /* Iterate image list */ + namedWindow("Image View", 1); + + for (i = 0;; i++) + { + Mat view, viewGray; + bool blink = false; + + if (capture.isOpened()) + { + Mat view0; + capture >> view0; + view0.copyTo(view); + } + else if (i < (int)imageList.size()) + view = imread(imageList[i], 1); + + if (view.empty()) + { + if (!imagePoints.empty()) + runAndSave(outputFilename, imagePoints, imageSize, boardSize, pattern, squareSize, + grid_width, release_object, aspectRatio, flags, cameraMatrix, distCoeffs, + writeExtrinsics, writePoints, writeGrid); + break; + } + + imageSize = view.size(); + + if (flipVertical) + flip(view, view, 0); + + vector pointbuf; + cvtColor(view, viewGray, COLOR_BGR2GRAY); + + bool found; + switch (pattern) + { + case CHESSBOARD: + found = findChessboardCorners( + view, boardSize, pointbuf, + CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK /* | CALIB_CB_NORMALIZE_IMAGE*/); + break; + case CIRCLES_GRID: + found = findCirclesGrid(view, boardSize, pointbuf); + break; + case ASYMMETRIC_CIRCLES_GRID: + found = findCirclesGrid(view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID); + break; + case RADON: + found = findChessboardCornersSB(view, boardSize, pointbuf); + break; + default: + return fprintf(stderr, "Unknown pattern type\n"), -1; + } + + // improve the found corners' coordinate accuracy + if (pattern == CHESSBOARD && found) + cornerSubPix(viewGray, pointbuf, Size(winSize, winSize), Size(-1, -1), + TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.0001)); + + if (mode == CAPTURING && found && + (!capture.isOpened() || clock() - prevTimestamp > delay * 1e-3 * CLOCKS_PER_SEC)) + { + imagePoints.push_back(pointbuf); + prevTimestamp = clock(); + blink = capture.isOpened(); + } + + if (found) + drawChessboardCorners(view, boardSize, Mat(pointbuf), found); + + string msg = mode == CAPTURING ? "100/100" + : mode == CALIBRATED ? "Calibrated" + : "Press 'g' to start"; + int baseLine = 0; + Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); + Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10); + + if (mode == CAPTURING) + { + if (undistortImage) + msg = cv::format("%d/%d Undist", (int)imagePoints.size(), nframes); + else + msg = cv::format("%d/%d", (int)imagePoints.size(), nframes); + } + + putText(view, msg, textOrigin, 1, 1, + mode != CALIBRATED ? Scalar(0, 0, 255) : Scalar(0, 255, 0)); + + if (blink) + bitwise_not(view, view); + + if (mode == CALIBRATED && undistortImage) + { + Mat temp = view.clone(); + undistort(temp, view, cameraMatrix, distCoeffs); + } + + imshow("Image View", view); + char key = (char)waitKey(capture.isOpened() ? 50 : 500); + + if (key == 27) + break; + + if (key == 'u' && mode == CALIBRATED) + undistortImage = !undistortImage; + + if (capture.isOpened() && key == 'g') + { + mode = CAPTURING; + imagePoints.clear(); + } + + if (mode == CAPTURING && imagePoints.size() >= (unsigned)nframes) + { + if (runAndSave(outputFilename, imagePoints, imageSize, boardSize, pattern, squareSize, + grid_width, release_object, aspectRatio, flags, cameraMatrix, distCoeffs, + writeExtrinsics, writePoints, writeGrid)) + mode = CALIBRATED; + else + mode = DETECTION; + if (!capture.isOpened()) + break; + } + } + + if (!capture.isOpened() && showUndistorted) + { + Mat view, rview, map1, map2; + initUndistortRectifyMap( + cameraMatrix, distCoeffs, Mat(), + getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), + imageSize, CV_16SC2, map1, map2); + + for (i = 0; i < (int)imageList.size(); i++) + { + view = imread(imageList[i], 1); + if (view.empty()) + continue; + // undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix ); + remap(view, rview, map1, map2, INTER_LINEAR); + imshow("Image View", rview); + char c = (char)waitKey(); + if (c == 27 || c == 'q' || c == 'Q') + break; + } + } + + return 0; +} +static bool runCalibration(const vector>& imagePoints, Size imageSize, + Size boardSize, Pattern patternType, float squareSize, float aspectRatio, + float grid_width, bool release_object, int flags, Mat& cameraMatrix, + Mat& distCoeffs, vector& rvecs, vector& tvecs, + vector& reprojErrs, vector& newObjPoints, + double& totalAvgErr); + +static void saveCameraParams(const string& filename, Size imageSize, Size boardSize, + float squareSize, float aspectRatio, int flags, + const Mat& cameraMatrix, const Mat& distCoeffs, + const vector& rvecs, const vector& tvecs, + const vector& reprojErrs, + const vector>& imagePoints, + const vector& newObjPoints, double totalAvgErr); + +bool runAndSave(const string& outputFilename, const vector>& imagePoints, + Size imageSize, Size boardSize, Pattern patternType, float squareSize, + float grid_width, bool release_object, float aspectRatio, int flags, + Mat& cameraMatrix, Mat& distCoeffs, bool writeExtrinsics, bool writePoints, + bool writeGrid) +{ + vector rvecs, tvecs; + vector reprojErrs; + double totalAvgErr = 0; + vector newObjPoints; + + bool ok = runCalibration(imagePoints, imageSize, boardSize, patternType, squareSize, + aspectRatio, grid_width, release_object, flags, cameraMatrix, + distCoeffs, rvecs, tvecs, reprojErrs, newObjPoints, totalAvgErr); + printf("%s. avg reprojection error = %.7f\n", + ok ? "Calibration succeeded" : "Calibration failed", totalAvgErr); + + if (ok) + saveCameraParams(outputFilename, imageSize, boardSize, squareSize, aspectRatio, flags, + cameraMatrix, distCoeffs, writeExtrinsics ? rvecs : vector(), + writeExtrinsics ? tvecs : vector(), + writeExtrinsics ? reprojErrs : vector(), + writePoints ? imagePoints : vector>(), + writeGrid ? newObjPoints : vector(), totalAvgErr); + return ok; +} +void help(char** argv) +{ + printf( + "This is a camera calibration sample.\n" + "Usage: %s\n" + " -w= # the number of inner corners per one of board dimension\n" + " -h= # the number of inner corners per another board dimension\n" + " [-pt=] # the type of pattern: chessboard or circles' grid\n" + " [-n=] # the number of frames to use for calibration\n" + " # (if not specified, it will be set to the number\n" + " # of board views actually available)\n" + " [-d=] # a minimum delay in ms between subsequent attempts to " + "capture a next view\n" + " # (used only for video capturing)\n" + " [-s=] # square size in some user-defined units (1 by default)\n" + " [-o=] # the output filename for intrinsic [and extrinsic] " + "parameters\n" + " [-op] # write detected feature points\n" + " [-oe] # write extrinsic parameters\n" + " [-oo] # write refined 3D object points\n" + " [-zt] # assume zero tangential distortion\n" + " [-a=] # fix aspect ratio (fx/fy)\n" + " [-p] # fix the principal point at the center\n" + " [-v] # flip the captured images around the horizontal axis\n" + " [-V] # use a video file, and not an image list, uses\n" + " # [input_data] string for the video file name\n" + " [-su] # show undistorted images after calibration\n" + " [-ws=] # Half of search window for cornerSubPix (11 by default)\n" + " [-dt=] # actual distance between top-left and top-right corners " + "of\n" + " # the calibration grid. If this parameter is specified, a " + "more\n" + " # accurate calibration method will be used which may be " + "better\n" + " # with inaccurate, roughly planar target.\n" + " [input_data] # input data, one of the following:\n" + " # - text file with a list of the images of the board\n" + " # the text file can be generated with imagelist_creator\n" + " # - name of video file with a video of the board\n" + " # if input_data not specified, a live view from the camera " + "is used\n" + "\n", + argv[0]); + printf("\n%s", usage); + printf("\n%s", liveCaptureHelp); +} +double computeReprojectionErrors(const vector>& objectPoints, + const vector>& imagePoints, + const vector& rvecs, const vector& tvecs, + const Mat& cameraMatrix, const Mat& distCoeffs, + vector& perViewErrors) +{ + vector imagePoints2; + int i, totalPoints = 0; + double totalErr = 0, err; + perViewErrors.resize(objectPoints.size()); + + for (i = 0; i < (int)objectPoints.size(); i++) + { + projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, distCoeffs, + imagePoints2); + err = norm(Mat(imagePoints[i]), Mat(imagePoints2), NORM_L2); + int n = (int)objectPoints[i].size(); + perViewErrors[i] = (float)std::sqrt(err * err / n); + totalErr += err * err; + totalPoints += n; + } + + return std::sqrt(totalErr / totalPoints); +} + +bool readStringList(const string& filename, vector& l) +{ + l.clear(); + + std::ifstream file(filename); + if (!file.is_open()) { + return false; + } + + std::string line; + while (std::getline(file, line)) { + l.push_back(line); + } + + if (l.empty()) { + return false; + } else { + return true; + } +} + +bool readStringListXml(const string& filename, vector& l) +{ + l.resize(0); + FileStorage fs(filename, FileStorage::READ); + if (!fs.isOpened()) + return false; + size_t dir_pos = filename.rfind('/'); + if (dir_pos == string::npos) + dir_pos = filename.rfind('\\'); + FileNode n = fs.getFirstTopLevelNode(); + if (n.type() != FileNode::SEQ) + return false; + FileNodeIterator it = n.begin(), it_end = n.end(); + for (; it != it_end; ++it) + { + string fname = (string)*it; + if (dir_pos != string::npos) + { + string fpath = samples::findFile(filename.substr(0, dir_pos + 1) + fname, false); + if (fpath.empty()) + { + fpath = samples::findFile(fname); + } + fname = fpath; + } + else + { + fname = samples::findFile(fname); + } + l.push_back(fname); + } + return true; +} +void calcChessboardCorners(Size boardSize, float squareSize, vector& corners, + Pattern patternType) +{ + corners.resize(0); + + switch (patternType) + { + case CHESSBOARD: + case CIRCLES_GRID: + case RADON: + for (int i = 0; i < boardSize.height; i++) + for (int j = 0; j < boardSize.width; j++) + corners.emplace_back(float(j * squareSize), float(i * squareSize), 0); + break; + + case ASYMMETRIC_CIRCLES_GRID: + for (int i = 0; i < boardSize.height; i++) + for (int j = 0; j < boardSize.width; j++) + corners.emplace_back(float((2 * j + i % 2) * squareSize), float(i * squareSize), + 0); + break; + + default: + CV_Error(Error::StsBadArg, "Unknown pattern type\n"); + } +} +bool runCalibration(const vector>& imagePoints, Size imageSize, Size boardSize, + Pattern patternType, float squareSize, float aspectRatio, float grid_width, + bool release_object, int flags, Mat& cameraMatrix, Mat& distCoeffs, + vector& rvecs, vector& tvecs, vector& reprojErrs, + vector& newObjPoints, double& totalAvgErr) +{ + cameraMatrix = Mat::eye(3, 3, CV_64F); + if (flags & CALIB_FIX_ASPECT_RATIO) + cameraMatrix.at(0, 0) = aspectRatio; + + distCoeffs = Mat::zeros(8, 1, CV_64F); + + vector> objectPoints(1); + calcChessboardCorners(boardSize, squareSize, objectPoints[0], patternType); + objectPoints[0][boardSize.width - 1].x = objectPoints[0][0].x + grid_width; + newObjPoints = objectPoints[0]; + + objectPoints.resize(imagePoints.size(), objectPoints[0]); + + double rms; + // int iFixedPoint = -1; + // if (release_object) + // iFixedPoint = boardSize.width - 1; + // rms = calibrateCameraRO(objectPoints, imagePoints, imageSize, iFixedPoint, + // cameraMatrix, distCoeffs, rvecs, tvecs, newObjPoints, + // flags | CALIB_FIX_K3 | CALIB_USE_LU); + cv::TermCriteria tc(TermCriteria::COUNT + TermCriteria::EPS, 300, DBL_EPSILON); + cv::Mat stdIntr, stdExtr, pve; + rms = calibrateCamera(objectPoints, imagePoints, imageSize, + cameraMatrix, distCoeffs, rvecs,tvecs, + stdIntr, stdExtr, pve, + //flags | CALIB_FIX_K1 | CALIB_FIX_K2 | CALIB_FIX_K3, tc); + flags | CALIB_FIX_K3, tc); + printf("RMS error reported by calibrateCamera: %g\n", rms); + std::cout << "stdIntr" << std::endl; + std::cout << stdIntr << std::endl; + //std::cout << stdExtr << std::endl; + + bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs); + + if (release_object) + { + cout << "New board corners: " << endl; + cout << newObjPoints[0] << endl; + cout << newObjPoints[boardSize.width - 1] << endl; + cout << newObjPoints[boardSize.width * (boardSize.height - 1)] << endl; + cout << newObjPoints.back() << endl; + } + + objectPoints.clear(); + objectPoints.resize(imagePoints.size(), newObjPoints); + totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix, + distCoeffs, reprojErrs); + + return ok; +} +void saveCameraParams(const string& filename, Size imageSize, Size boardSize, float squareSize, + float aspectRatio, int flags, const Mat& cameraMatrix, const Mat& distCoeffs, + const vector& rvecs, const vector& tvecs, + const vector& reprojErrs, const vector>& imagePoints, + const vector& newObjPoints, double totalAvgErr) +{ + FileStorage fs(filename, FileStorage::WRITE); + + time_t tt; + time(&tt); + struct tm* t2 = localtime(&tt); + char buf[1024]; + strftime(buf, sizeof(buf) - 1, "%c", t2); + + fs << "calibration_time" << buf; + + if (!rvecs.empty() || !reprojErrs.empty()) + fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size()); + fs << "image_width" << imageSize.width; + fs << "image_height" << imageSize.height; + fs << "board_width" << boardSize.width; + fs << "board_height" << boardSize.height; + fs << "square_size" << squareSize; + + if (flags & CALIB_FIX_ASPECT_RATIO) + fs << "aspectRatio" << aspectRatio; + + if (flags != 0) + { + sprintf(buf, "flags: %s%s%s%s", + flags & CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "", + flags & CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "", + flags & CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point" : "", + flags & CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : ""); + // cvWriteComment( *fs, buf, 0 ); + } + + fs << "flags" << flags; + + fs << "camera_matrix" << cameraMatrix; + fs << "distortion_coefficients" << distCoeffs; + + fs << "avg_reprojection_error" << totalAvgErr; + if (!reprojErrs.empty()) + fs << "per_view_reprojection_errors" << Mat(reprojErrs); + + if (!rvecs.empty() && !tvecs.empty()) + { + CV_Assert(rvecs[0].type() == tvecs[0].type()); + Mat bigmat((int)rvecs.size(), 6, rvecs[0].type()); + for (int i = 0; i < (int)rvecs.size(); i++) + { + Mat r = bigmat(Range(i, i + 1), Range(0, 3)); + Mat t = bigmat(Range(i, i + 1), Range(3, 6)); + + CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1); + CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1); + //*.t() is MatExpr (not Mat) so we can use assignment operator + r = rvecs[i].t(); + t = tvecs[i].t(); + } + // cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each + // view", 0 ); + fs << "extrinsic_parameters" << bigmat; + } + + if (!imagePoints.empty()) + { + Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2); + for (int i = 0; i < (int)imagePoints.size(); i++) + { + Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols); + Mat imgpti(imagePoints[i]); + imgpti.copyTo(r); + } + fs << "image_points" << imagePtMat; + } + + if (!newObjPoints.empty()) + { + fs << "grid_points" << newObjPoints; + } +} diff --git a/calibration_with_blender/checkerboard.png b/calibration_with_blender/checkerboard.png new file mode 100644 index 0000000..b7a901f Binary files /dev/null and b/calibration_with_blender/checkerboard.png differ diff --git a/calibration_with_blender/circles.png b/calibration_with_blender/circles.png new file mode 100644 index 0000000..72c36ba Binary files /dev/null and b/calibration_with_blender/circles.png differ diff --git a/calibration_with_blender/image_distort.cpp b/calibration_with_blender/image_distort.cpp new file mode 100644 index 0000000..b91d3e6 --- /dev/null +++ b/calibration_with_blender/image_distort.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include + +int main(int argc, char* argv[]) +{ + if (argc < 8) + { + std::cout << "usage; " << argv[0] << "image camera_model fx fy cx cy [d0 .. dn] output" + << std::endl; + return EXIT_FAILURE; + } + + /* Load original image */ + cv::Mat image = cv::imread(argv[1]); + if (image.empty()) + { + std::cout << "Could not open " << argv[1] << std::endl; + return EXIT_FAILURE; + } + + cv::imshow("original", image); + cv::waitKey(10); + + /* Construct K */ + cv::Mat_ camera_matrix = cv::Mat::eye(3, 3, CV_32F); + camera_matrix(0, 0) = std::stof(argv[3]); + camera_matrix(1, 1) = std::stof(argv[4]); + camera_matrix(0, 2) = std::stof(argv[5]) == 0 ? image.size().width / 2.f: std::stof(argv[5]); + camera_matrix(1, 2) = std::stof(argv[6]) == 0 ? image.size().height / 2.f: std::stof(argv[6]); + + std::cout << camera_matrix << std::endl; + + /* Load distortion coefficient */ + cv::Mat distortion = cv::Mat::zeros(1, 5, CV_32F); + for (size_t i = 7; i < argc - 1; ++i) + { + distortion.at(0, i - 7) = std::stof(argv[i]); + } + + std::cout << distortion << std::endl; + + /* Collect original point location */ + std::vector image_points; + for (int i = 0; i < image.rows; i++) + { + for (int j = 0; j < image.cols; j++) + { + image_points.emplace_back(j, i); + } + } + + /* Since remap make inverse operation, for distort image we need undistort original point position */ + cv::Mat_ undistorted_points(image.size()); + cv::undistortPoints(image_points, undistorted_points, camera_matrix, distortion, + cv::noArray(), + camera_matrix); + + /* 2-channel (x & y), shape equal to result image */ + undistorted_points = undistorted_points.reshape(2, image.rows); + + std::cout << undistorted_points.size() << std::endl; + + /* Fill result image */ + cv::Mat distorted; + cv::remap(image, distorted, undistorted_points, cv::noArray(), cv::INTER_LANCZOS4); + + /* Save result */ + cv::imshow("distorted", distorted); + cv::waitKey(0); + cv::imwrite(argv[argc - 1], distorted); +} \ No newline at end of file diff --git a/calibration_with_blender/radon.png b/calibration_with_blender/radon.png new file mode 100644 index 0000000..cf9773a Binary files /dev/null and b/calibration_with_blender/radon.png differ diff --git a/calibration_with_blender/render.py b/calibration_with_blender/render.py new file mode 100644 index 0000000..a111ec5 --- /dev/null +++ b/calibration_with_blender/render.py @@ -0,0 +1,143 @@ +import bpy +from bpy_extras.object_utils import world_to_camera_view +import os +import random +from math import pi +import pathlib +import json + + +def check_projection(cam, obj): + scene = bpy.context.scene + render = scene.render + + # print(obj.data.vertices[0].co, "- Vert 0 (original)") + + # Convert vertices to mesh + me = obj.to_mesh() + # print(me.vertices[0].co, " - Vert 0 (deformed/modified)") + + # Transform mesh according to translation and rotation + me.transform(obj.matrix_world) + # print(me.vertices[0].co, " - Vert 0 (deformed/modified, world space)") + + # Collect mesh coordinates + verts = [vert.co for vert in me.vertices] + print(list(verts)) + + # Convert to normalized device coordinates + coords_2d = [world_to_camera_view(scene, cam, coord) for coord in verts] + + # x, y must be in [0, 1], z > 0 + for x, y, z in coords_2d: + print(x, y, z) + if x < 0 or x > 1: + return False + if y < 0 or y > 1: + return False + if z <= 0: + return False + + return True + + +def set_position_origin(obj): + obj.location.x = 0 + obj.location.y = 0 + obj.location.z = 0 + + obj.rotation_euler[0] = 0 + obj.rotation_euler[1] = 0 + obj.rotation_euler[2] = 0 + + +def hide_objects(names): + for name in names: + bpy.data.objects[name].location.z = 10 + + +if __name__ == '__main__': + # Get camera + c = bpy.data.objects['Camera'] + + # Set camera intrincs + # c.lens_unit = 'MILLIMETERS' + # c.lens = 20 # focus length + + # Set camera position + set_position_origin(c) + c.location.z = 0.5 + + # Settings + work_dir = '/home/xperience/development/datasets' + patterns = ['checkerboard', 'circles', 'radon'] + N = 600 # Number of genrated images + + for pattern in patterns: + hide_objects(patterns) + + p = bpy.data.objects[pattern] + set_position_origin(p) + + # Fix random seed + random.seed(1) + + # Create pattern directory + pattern_dir = os.path.join(work_dir, pattern) + pathlib.Path(pattern_dir).mkdir(parents=True, exist_ok=True) + + dataset_info = {} + camera_info = {'focus_length': 50} + renders_info = {} + + n = 0 + i = 0 + + while n < N and i < 10000000: + # Set position + p.location.x = random.uniform(-0.3, 0.3) + p.location.y = random.uniform(-0.2, 0.2) + p.location.z = random.uniform(0, 0.15) + + # Set rotation + p.rotation_euler[0] = random.uniform(-0.5, 0.5) + p.rotation_euler[1] = random.uniform(-0.5, 0.5) + # p.rotation_euler[2] = random.uniform(0.7 * pi/2, 1.3 * pi/2) + p.rotation_euler[2] = random.uniform(-0.5, 0.5) + + # Update matrices + bpy.context.view_layer.update() + + # Debug + print('>', i, ' ', n, '<') + print(p.location) + print(p.rotation_euler) + + # Render and save image if it fully visible + if check_projection(c, p): + print('True') + + render_filename = 'render-{:04d}.jpg'.format(n) + render_path = os.path.join(pattern_dir, render_filename) + + bpy.context.scene.render.filepath = render_path + bpy.ops.render.render(write_still=True) + + renders_info[render_filename] = {'pattern': + {'location': [p.location.x, p.location.y, p.location.z], + 'rotation': [p.rotation_euler[0], p.rotation_euler[1], + p.rotation_euler[2]] + } + } + n += 1 + + else: + print('False') + + i += 1 + + dataset_info['camera'] = camera_info + dataset_info['renders'] = renders_info + + with open(os.path.join(pattern_dir, 'info.json'), 'w') as info_file: + json.dump(dataset_info, info_file, indent=4) diff --git a/calibration_with_blender/requirements.txt b/calibration_with_blender/requirements.txt new file mode 100644 index 0000000..06daef8 --- /dev/null +++ b/calibration_with_blender/requirements.txt @@ -0,0 +1,5 @@ +numpy == 1.22.4 +opencv-python == 4.6.0.66 +seaborn == 0.11.2 +PyYAML == 6.0 +pycairo == 1.21.0 diff --git a/calibration_with_blender/runner-2.py b/calibration_with_blender/runner-2.py new file mode 100644 index 0000000..319bc90 --- /dev/null +++ b/calibration_with_blender/runner-2.py @@ -0,0 +1,96 @@ +import json +import os +import pathlib +import shutil +import subprocess +import yaml +import random + + +def clear_dir(folder): + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print('Failed to delete %s. Reason: %s' % (file_path, e)) + + +if __name__ == '__main__': + binary_path = '/home/xperience/development/opencv-fork/cmake-build-release/bin' + image_distort_path = os.path.join(binary_path, 'example_cpp_image_distort') + calibration_benchmark_path = os.path.join(binary_path, 'example_cpp_calibration_benchmark') + + datasets_path = '/home/xperience/development/datasets' + pattern = 'checkerboard' + dataset_path = os.path.join(datasets_path, pattern) + + work_dir = os.path.join('/home/xperience/development/opencv_benchmarks/calibration_with_blender', 'work', pattern) + distorted_dir = os.path.join(work_dir, 'distorted') + result_dir = os.path.join(work_dir, 'result') + + pathlib.Path(distorted_dir).mkdir(parents=True, exist_ok=True) + pathlib.Path(result_dir).mkdir(parents=True, exist_ok=True) + + # Set seed + random.seed(0) + + # Set distortion oefficients + k1 = random.uniform(-0.3, 0) + k2 = random.uniform(-0.1, 0) + p1 = random.uniform(-0.3, 0.3) + p2 = random.uniform(-0.1, 0.1) + k3 = random.uniform(-0.01, 0) + + result = {} + N = 5 + for i in range(N): + image_number = 20 + i + + + clear_dir(distorted_dir) + + # Distort images and update info + k = 0 + for entry in os.listdir(dataset_path): + entry_path = os.path.join(dataset_path, entry) + if os.path.isfile(entry_path): + if entry != 'info.json': + if k >= image_number: + break + subprocess.run( + [image_distort_path, entry_path, 'pinhole', '1067', '1067', '0', '0', str(k1), str(k2), str(p1), + str(p2), str(k3), + distorted_dir]) + k += 1 + else: + # with open(entry_path, 'r') as file: + # info = json.load(file) + # info['camera']['d'] = [d1, d2] + # + # with open(os.path.join(work_dir, entry), 'w') as file: + # json.dump(info, file, indent=4) + shutil.copy(entry_path, os.path.join(work_dir, entry)) + + # Create image list + image_list_path = os.path.join(work_dir, 'image_list') + with open(image_list_path, 'w') as image_list: + for entry in os.listdir(distorted_dir): + entry_path = os.path.join(distorted_dir, entry) + if os.path.isfile(entry_path): + image_list.write(entry_path + '\n') + + # Run calibration + result_filename = 'c-{:04d}.yaml'.format(i) + calibration_result_path = os.path.join(result_dir, result_filename) + subprocess.run( + [calibration_benchmark_path, '-w=13', '-h=18', '-s=1', '-op', '-o={}'.format(calibration_result_path), + image_list_path]) + + result[result_filename] = {'d': [k1, k2, p1, p2, k3], + 'n': image_number} + with open(os.path.join(result_dir, 'result.yml'), 'w') as result_file: + yaml.dump(result, result_file, default_flow_style=False) diff --git a/calibration_with_blender/runner.py b/calibration_with_blender/runner.py new file mode 100644 index 0000000..eeee78b --- /dev/null +++ b/calibration_with_blender/runner.py @@ -0,0 +1,88 @@ +import json +import os +import pathlib +import shutil +import subprocess +import yaml +import random + + +def clear_dir(folder): + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print('Failed to delete %s. Reason: %s' % (file_path, e)) + + +if __name__ == '__main__': + binary_path = '/home/xperience/development/opencv-fork/cmake-build-release/bin' + image_distort_path = os.path.join(binary_path, 'example_cpp_image_distort') + calibration_benchmark_path = os.path.join(binary_path, 'example_cpp_calibration_benchmark') + + datasets_path = '/home/xperience/development/datasets' + pattern = 'checkerboard' + dataset_path = os.path.join(datasets_path, pattern) + + work_dir = os.path.join('/home/xperience/development/opencv_benchmarks/calibration_with_blender', 'work', pattern) + distorted_dir = os.path.join(work_dir, 'distorted') + result_dir = os.path.join(work_dir, 'result') + + pathlib.Path(distorted_dir).mkdir(parents=True, exist_ok=True) + pathlib.Path(result_dir).mkdir(parents=True, exist_ok=True) + + result = {} + N = 3 + for i in range(N): + image_number = 50 + k1 = random.uniform(-0.3, 0) + k2 = random.uniform(-0.1, 0) + p1 = random.uniform(-0.3, 0.3) + p2 = random.uniform(-0.1, 0.1) + k3 = random.uniform(-0.01, 0) + + clear_dir(distorted_dir) + # Distort images and update info + k = 0 + for entry in os.listdir(dataset_path): + entry_path = os.path.join(dataset_path, entry) + if os.path.isfile(entry_path): + if entry != 'info.json': + if k >= image_number: + break + subprocess.run( + [image_distort_path, entry_path, 'pinhole', '1067', '1067', '0', '0', str(k1), str(k2), str(p1), + str(p2), str(k3), + distorted_dir]) + k += 1 + else: + # with open(entry_path, 'r') as file: + # info = json.load(file) + # info['camera']['d'] = [d1, d2] + # + # with open(os.path.join(work_dir, entry), 'w') as file: + # json.dump(info, file, indent=4) + shutil.copy(entry_path, os.path.join(work_dir, entry)) + + # Create image list + image_list_path = os.path.join(work_dir, 'image_list') + with open(image_list_path, 'w') as image_list: + for entry in os.listdir(distorted_dir): + entry_path = os.path.join(distorted_dir, entry) + if os.path.isfile(entry_path): + image_list.write(entry_path + '\n') + + # Run calibration + result_filename = 'c-{:04d}.yaml'.format(i) + calibration_result_path = os.path.join(result_dir, result_filename) + subprocess.run( + [calibration_benchmark_path, '-w=13', '-h=18', '-s=1', '-op', '-o={}'.format(calibration_result_path), + image_list_path]) + + result[result_filename] = {'d': [k1, k2, p1, p2, k3]} + with open(os.path.join(result_dir, 'result.yml'), 'w') as result_file: + yaml.dump(result, result_file, default_flow_style=False) diff --git a/calibration_with_blender/show-result.py b/calibration_with_blender/show-result.py new file mode 100644 index 0000000..4980f20 --- /dev/null +++ b/calibration_with_blender/show-result.py @@ -0,0 +1,36 @@ +import yaml +import os +import cv2 as cv +import matplotlib.pyplot as plt +import seaborn as sns + +if __name__ == '__main__': + data_dir = '/home/xperience/development/opencv_benchmarks/calibration_with_blender/work/checkerboard' + + with open(os.path.join(data_dir, 'result', 'result.yml')) as file: + data = yaml.safe_load(file) + print(data) + + errors = {} + d1_errors = {} + result_dir = os.path.join(data_dir, 'result') + for entry in os.listdir(result_dir): + if entry != 'result.yml': + camera_file = cv.FileStorage(os.path.join(result_dir, entry), cv.FILE_STORAGE_READ) + error = camera_file.getNode('avg_reprojection_error').real() + #errors.append(error) + + n = data[entry]['n'] + errors[n] = error + + D = camera_file.getNode('distortion_coefficients').mat() + d1_errors[n] = abs(data[entry]['d'][0] - D[0]) + #sns.distplot(errors) + #sns.distplot(d1_errors) + print(errors.keys()) + print(errors.values()) + #plt.plot(errors.keys(), errors.values()) + errors = d1_errors + x, y = zip(*sorted(errors.items())) + plt.plot(x, y) + plt.show()