diff --git a/.gitignore b/.gitignore
index 7227515c0a..d9a7473a5d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -71,7 +71,7 @@ lib/
cmake_modules/
cmake-build-debug/
-
+cmake-*
ExecMean.txt
SessionInfo.txt
Results_Euroc.txt
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 016e74354d..8250e6870f 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -172,6 +172,14 @@ add_executable(stereo_euroc
Examples/Stereo/stereo_euroc.cc)
target_link_libraries(stereo_euroc ${PROJECT_NAME})
+add_executable(stereo_realsense_D455
+ Examples/Stereo/stereo_realsense_D455.cc)
+target_link_libraries(stereo_realsense_D455 ${PROJECT_NAME})
+
+add_executable(stereo_zed2i
+ Examples/Stereo/stereo_zed2i.cc)
+target_link_libraries(stereo_zed2i ${PROJECT_NAME})
+
add_executable(stereo_tum_vi
Examples/Stereo/stereo_tum_vi.cc)
target_link_libraries(stereo_tum_vi ${PROJECT_NAME})
@@ -316,6 +324,7 @@ if(realsense2_FOUND)
add_executable(stereo_realsense_D435i_old
Examples_old/Stereo/stereo_realsense_D435i.cc)
target_link_libraries(stereo_realsense_D435i_old ${PROJECT_NAME})
+
endif()
#Monocular examples
diff --git a/Examples/Monocular/D455.yaml b/Examples/Monocular/D455.yaml
new file mode 100644
index 0000000000..58c21dca49
--- /dev/null
+++ b/Examples/Monocular/D455.yaml
@@ -0,0 +1,78 @@
+%YAML:1.0
+
+#--------------------------------------------------------------------------------------------
+# System config
+#--------------------------------------------------------------------------------------------
+
+# When the variables are commented, the system doesn't load a previous session or not store the current one
+
+# If the LoadFile doesn't exist, the system give a message and create a new Atlas from scratch
+#System.LoadAtlasFromFile: "Session_MH01_MH02_MH03_Mono"
+
+# The store file is created from the current session, if a file with the same name exists it is deleted
+#System.SaveAtlasToFile: "Session_MH01_MH02_MH03_Mono"
+
+#--------------------------------------------------------------------------------------------
+# Camera Parameters. Adjust them!
+#--------------------------------------------------------------------------------------------
+File.version: "1.0"
+
+Camera.type: "PinHole"
+
+# Camera calibration and distortion parameters (OpenCV)
+Camera1.fx: 423.8357849121094
+Camera1.fy: 423.8357849121094
+Camera1.cx: 430.0032653808594
+Camera1.cy: 237.4404296875
+
+Camera1.k1: 0.0
+Camera1.k2: 0.0
+Camera1.p1: 0.0
+Camera1.p2: 0.0
+
+Camera.width: 848
+Camera.height: 480
+
+Camera.newWidth: 848
+Camera.newHeight: 480
+
+# Camera frames per second
+Camera.fps: 20
+
+# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
+Camera.RGB: 1
+
+#--------------------------------------------------------------------------------------------
+# ORB Parameters
+#--------------------------------------------------------------------------------------------
+
+# ORB Extractor: Number of features per image
+ORBextractor.nFeatures: 2000
+
+# ORB Extractor: Scale factor between levels in the scale pyramid
+ORBextractor.scaleFactor: 1.2
+
+# ORB Extractor: Number of levels in the scale pyramid
+ORBextractor.nLevels: 8
+
+# ORB Extractor: Fast threshold
+# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
+# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
+# You can lower these values if your images have low contrast
+ORBextractor.iniThFAST: 20
+ORBextractor.minThFAST: 7
+
+#--------------------------------------------------------------------------------------------
+# Viewer Parameters
+#---------------------------------------------------------------------------------------------
+Viewer.KeyFrameSize: 0.05
+Viewer.KeyFrameLineWidth: 1.0
+Viewer.GraphLineWidth: 0.9
+Viewer.PointSize: 2.0
+Viewer.CameraSize: 0.08
+Viewer.CameraLineWidth: 3.0
+Viewer.ViewpointX: 0.0
+Viewer.ViewpointY: -0.7
+Viewer.ViewpointZ: -1.8
+Viewer.ViewpointF: 500.0
+
diff --git a/Examples/Monocular/mono_euroc.cc b/Examples/Monocular/mono_euroc.cc
index 3a233129be..1ea8f95784 100644
--- a/Examples/Monocular/mono_euroc.cc
+++ b/Examples/Monocular/mono_euroc.cc
@@ -80,7 +80,7 @@ int main(int argc, char **argv)
int fps = 20;
float dT = 1.f/fps;
// Create SLAM system. It initializes all system threads and gets ready to process frames.
- ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, false);
+ ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, true);
float imageScale = SLAM.GetImageScale();
double t_resize = 0.f;
diff --git a/Examples/Monocular/mono_realsense_D455.cc b/Examples/Monocular/mono_realsense_D455.cc
new file mode 100644
index 0000000000..11cbc035d2
--- /dev/null
+++ b/Examples/Monocular/mono_realsense_D455.cc
@@ -0,0 +1,291 @@
+/**
+* This file is part of ORB-SLAM3
+*
+* Copyright (C) 2017-2021 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+*
+* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
+* License as published by the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
+* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
+* If not, see .
+*/
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+#include
+#include "librealsense2/rsutil.h"
+
+#include
+
+using namespace std;
+
+bool b_continue_session;
+
+void exit_loop_handler(int s){
+ cout << "Finishing session" << endl;
+ b_continue_session = false;
+
+}
+
+rs2_vector interpolateMeasure(const double target_time,
+ const rs2_vector current_data, const double current_time,
+ const rs2_vector prev_data, const double prev_time);
+
+static rs2_option get_sensor_option(const rs2::sensor& sensor)
+{
+ // Sensors usually have several options to control their properties
+ // such as Exposure, Brightness etc.
+
+ std::cout << "Sensor supports the following options:\n" << std::endl;
+
+ // The following loop shows how to iterate over all available options
+ // Starting from 0 until RS2_OPTION_COUNT (exclusive)
+ for (int i = 0; i < static_cast(RS2_OPTION_COUNT); i++)
+ {
+ rs2_option option_type = static_cast(i);
+ //SDK enum types can be streamed to get a string that represents them
+ std::cout << " " << i << ": " << option_type;
+
+ // To control an option, use the following api:
+
+ // First, verify that the sensor actually supports this option
+ if (sensor.supports(option_type))
+ {
+ std::cout << std::endl;
+
+ // Get a human readable description of the option
+ const char* description = sensor.get_option_description(option_type);
+ std::cout << " Description : " << description << std::endl;
+
+ // Get the current value of the option
+ float current_value = sensor.get_option(option_type);
+ std::cout << " Current Value : " << current_value << std::endl;
+
+ //To change the value of an option, please follow the change_sensor_option() function
+ }
+ else
+ {
+ std::cout << " is not supported" << std::endl;
+ }
+ }
+
+ uint32_t selected_sensor_option = 0;
+ return static_cast(selected_sensor_option);
+}
+
+int main(int argc, char **argv) {
+
+ if (argc < 3 || argc > 4) {
+ cerr << endl
+ << "Usage: ./mono_realsense_D435i path_to_vocabulary path_to_settings (trajectory_file_name)"
+ << endl;
+ return 1;
+ }
+
+ string file_name;
+
+ if (argc == 4) {
+ file_name = string(argv[argc - 1]);
+ }
+
+ struct sigaction sigIntHandler;
+
+ sigIntHandler.sa_handler = exit_loop_handler;
+ sigemptyset(&sigIntHandler.sa_mask);
+ sigIntHandler.sa_flags = 0;
+
+ sigaction(SIGINT, &sigIntHandler, NULL);
+ b_continue_session = true;
+
+ double offset = 0; // ms
+
+ rs2::context ctx;
+ rs2::device_list devices = ctx.query_devices();
+ rs2::device selected_device;
+ if (devices.size() == 0)
+ {
+ std::cerr << "No device connected, please connect a RealSense device" << std::endl;
+ return 0;
+ }
+ else
+ selected_device = devices[0];
+
+ std::vector sensors = selected_device.query_sensors();
+ int index = 0;
+ // We can now iterate the sensors and print their names
+ for (rs2::sensor sensor : sensors)
+ if (sensor.supports(RS2_CAMERA_INFO_NAME)) {
+ ++index;
+ if (index == 1) {
+ sensor.set_option(RS2_OPTION_ENABLE_AUTO_EXPOSURE, 1);
+ sensor.set_option(RS2_OPTION_AUTO_EXPOSURE_LIMIT,5000);
+ sensor.set_option(RS2_OPTION_EMITTER_ENABLED, 0); // switch off emitter
+ }
+ // std::cout << " " << index << " : " << sensor.get_info(RS2_CAMERA_INFO_NAME) << std::endl;
+ get_sensor_option(sensor);
+ if (index == 2){
+ // RGB camera (not used here...)
+ sensor.set_option(RS2_OPTION_EXPOSURE,100.f);
+ }
+ }
+
+ // Declare RealSense pipeline, encapsulating the actual device and sensors
+ rs2::pipeline pipe;
+ // Create a configuration for configuring the pipeline with a non default profile
+ rs2::config cfg;
+ cfg.enable_stream(RS2_STREAM_INFRARED, 1, 640, 480, RS2_FORMAT_Y8, 30);
+
+ // IMU callback
+ std::mutex imu_mutex;
+ std::condition_variable cond_image_rec;
+
+ cv::Mat imCV;
+ int width_img, height_img;
+ double timestamp_image = -1.0;
+ bool image_ready = false;
+ int count_im_buffer = 0; // count dropped frames
+
+ auto imu_callback = [&](const rs2::frame& frame)
+ {
+ std::unique_lock lock(imu_mutex);
+
+ if(rs2::frameset fs = frame.as())
+ {
+ count_im_buffer++;
+
+ double new_timestamp_image = fs.get_timestamp()*1e-3;
+ if(abs(timestamp_image-new_timestamp_image)<0.001){
+ // cout << "Two frames with the same timeStamp!!!\n";
+ count_im_buffer--;
+ return;
+ }
+
+ rs2::video_frame ir_frameL = fs.get_infrared_frame(1);
+ imCV = cv::Mat(cv::Size(width_img, height_img), CV_8U, (void*)(ir_frameL.get_data()), cv::Mat::AUTO_STEP);
+
+ timestamp_image = fs.get_timestamp()*1e-3;
+ image_ready = true;
+
+ lock.unlock();
+ cond_image_rec.notify_all();
+ }
+ };
+
+ rs2::pipeline_profile pipe_profile = pipe.start(cfg, imu_callback);
+
+ rs2::stream_profile cam_left = pipe_profile.get_stream(RS2_STREAM_INFRARED, 1);
+
+ rs2_intrinsics intrinsics_left = cam_left.as().get_intrinsics();
+ width_img = intrinsics_left.width;
+ height_img = intrinsics_left.height;
+ cout << "Left camera: \n";
+ std::cout << " fx = " << intrinsics_left.fx << std::endl;
+ std::cout << " fy = " << intrinsics_left.fy << std::endl;
+ std::cout << " cx = " << intrinsics_left.ppx << std::endl;
+ std::cout << " cy = " << intrinsics_left.ppy << std::endl;
+ std::cout << " height = " << intrinsics_left.height << std::endl;
+ std::cout << " width = " << intrinsics_left.width << std::endl;
+ std::cout << " Coeff = " << intrinsics_left.coeffs[0] << ", " << intrinsics_left.coeffs[1] << ", " <<
+ intrinsics_left.coeffs[2] << ", " << intrinsics_left.coeffs[3] << ", " << intrinsics_left.coeffs[4] << ", " << std::endl;
+ std::cout << " Model = " << intrinsics_left.model << std::endl;
+
+ // Create SLAM system. It initializes all system threads and gets ready to process frames.
+ ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::MONOCULAR, true, 0, file_name);
+ float imageScale = SLAM.GetImageScale();
+
+ double timestamp;
+ cv::Mat im;
+
+ double t_resize = 0.f;
+ double t_track = 0.f;
+
+ while (!SLAM.isShutDown())
+ {
+ std::vector vGyro;
+ std::vector vGyro_times;
+ std::vector vAccel;
+ std::vector vAccel_times;
+
+ {
+ std::unique_lock lk(imu_mutex);
+ if(!image_ready)
+ cond_image_rec.wait(lk);
+
+#ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point time_Start_Process = std::chrono::steady_clock::now();
+#else
+ std::chrono::monotonic_clock::time_point time_Start_Process = std::chrono::monotonic_clock::now();
+#endif
+
+ if(count_im_buffer>1)
+ cout << count_im_buffer -1 << " dropped frs\n";
+ count_im_buffer = 0;
+
+ timestamp = timestamp_image;
+ im = imCV.clone();
+
+ image_ready = false;
+ }
+
+ if(imageScale != 1.f)
+ {
+#ifdef REGISTER_TIMES
+ #ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t_Start_Resize = std::chrono::steady_clock::now();
+ #else
+ std::chrono::monotonic_clock::time_point t_Start_Resize = std::chrono::monotonic_clock::now();
+ #endif
+#endif
+ int width = im.cols * imageScale;
+ int height = im.rows * imageScale;
+ cv::resize(im, im, cv::Size(width, height));
+
+#ifdef REGISTER_TIMES
+ #ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t_End_Resize = std::chrono::steady_clock::now();
+ #else
+ std::chrono::monotonic_clock::time_point t_End_Resize = std::chrono::monotonic_clock::now();
+ #endif
+ t_resize = std::chrono::duration_cast >(t_End_Resize - t_Start_Resize).count();
+ SLAM.InsertResizeTime(t_resize);
+#endif
+ }
+
+#ifdef REGISTER_TIMES
+ #ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t_Start_Track = std::chrono::steady_clock::now();
+ #else
+ std::chrono::monotonic_clock::time_point t_Start_Track = std::chrono::monotonic_clock::now();
+ #endif
+#endif
+ // Stereo images are already rectified.
+ SLAM.TrackMonocular(im, timestamp);
+#ifdef REGISTER_TIMES
+ #ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t_End_Track = std::chrono::steady_clock::now();
+ #else
+ std::chrono::monotonic_clock::time_point t_End_Track = std::chrono::monotonic_clock::now();
+ #endif
+ t_track = t_resize + std::chrono::duration_cast >(t_End_Track - t_Start_Track).count();
+ SLAM.InsertTrackTime(t_track);
+#endif
+ }
+ cout << "System shutdown!\n";
+}
\ No newline at end of file
diff --git a/Examples/Stereo/RealSense_D455.yaml b/Examples/Stereo/RealSense_D455.yaml
new file mode 100755
index 0000000000..52f85bb3c7
--- /dev/null
+++ b/Examples/Stereo/RealSense_D455.yaml
@@ -0,0 +1,96 @@
+%YAML:1.0
+
+#--------------------------------------------------------------------------------------------
+# System config
+#--------------------------------------------------------------------------------------------
+
+# When the variables are commented, the system doesn't load a previous session or not store the current one
+
+# If the LoadFile doesn't exist, the system give a message and create a new Atlas from scratch
+#System.LoadAtlasFromFile: "Session_MH01_MH02_MH03_Stereo60_Pseudo"
+
+# The store file is created from the current session, if a file with the same name exists it is deleted
+#System.SaveAtlasToFile: "Session_MH01_MH02_MH03_Stereo60_Pseudo"
+
+#--------------------------------------------------------------------------------------------
+# Camera Parameters. Adjust them!
+#--------------------------------------------------------------------------------------------
+File.version: "1.0"
+
+Camera.type: "PinHole"
+
+# Camera calibration and distortion parameters (OpenCV)
+Camera1.fx: 432.062
+Camera1.fy: 432.062
+Camera1.cx: 425.99
+Camera1.cy: 242.604
+
+Camera1.k1: 0.0
+Camera1.k2: 0.0
+Camera1.p1: 0.0
+Camera1.p2: 0.0
+
+Camera2.fx: 432.062
+Camera2.fy: 432.062
+Camera2.cx: 425.99
+Camera2.cy: 242.604
+
+Camera2.k1: 0.0
+Camera2.k2: 0.0
+Camera2.p1: 0.0
+Camera2.p2: 0.0
+
+Camera.width: 848
+Camera.height: 480
+
+# Camera frames per second
+Camera.fps: 20
+
+# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
+Camera.RGB: 1
+
+Stereo.ThDepth: 60.0
+Stereo.T_c1_c2: !!opencv-matrix
+ rows: 4
+ cols: 4
+ dt: f
+ data: [1.0, 0.0, 0.0, 0.0951799,
+ 0.0, 1.0, 0.0, 0.0,
+ 0.0, 0.0, 1.0, 0.0,
+ 0,0,0,1.0]
+
+#--------------------------------------------------------------------------------------------
+# ORB Parameters
+#--------------------------------------------------------------------------------------------
+
+# ORB Extractor: Number of features per image
+ORBextractor.nFeatures: 1200
+
+# ORB Extractor: Scale factor between levels in the scale pyramid
+ORBextractor.scaleFactor: 1.2
+
+# ORB Extractor: Number of levels in the scale pyramid
+ORBextractor.nLevels: 8
+
+# ORB Extractor: Fast threshold
+# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
+# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
+# You can lower these values if your images have low contrast
+ORBextractor.iniThFAST: 20
+ORBextractor.minThFAST: 7
+
+#--------------------------------------------------------------------------------------------
+# Viewer Parameters
+#--------------------------------------------------------------------------------------------
+Viewer.KeyFrameSize: 0.05
+Viewer.KeyFrameLineWidth: 1.0
+Viewer.GraphLineWidth: 0.9
+Viewer.PointSize: 2.0
+Viewer.CameraSize: 0.08
+Viewer.CameraLineWidth: 3.0
+Viewer.ViewpointX: 0.0
+Viewer.ViewpointY: -0.7
+Viewer.ViewpointZ: -1.8
+Viewer.ViewpointF: 500.0
+Viewer.imageViewScale: 1.0
+
diff --git a/Examples/Stereo/ZED2i.yaml b/Examples/Stereo/ZED2i.yaml
new file mode 100755
index 0000000000..18c4119856
--- /dev/null
+++ b/Examples/Stereo/ZED2i.yaml
@@ -0,0 +1,97 @@
+%YAML:1.0
+
+#--------------------------------------------------------------------------------------------
+# System config
+#--------------------------------------------------------------------------------------------
+
+# When the variables are commented, the system doesn't load a previous session or not store the current one
+
+# If the LoadFile doesn't exist, the system give a message and create a new Atlas from scratch
+#System.LoadAtlasFromFile: "Session_MH01_MH02_MH03_Stereo60_Pseudo"
+
+# The store file is created from the current session, if a file with the same name exists it is deleted
+#System.SaveAtlasToFile: "Session_MH01_MH02_MH03_Stereo60_Pseudo"
+
+#--------------------------------------------------------------------------------------------
+# Camera Parameters. Adjust them!
+#--------------------------------------------------------------------------------------------
+File.version: "1.0"
+
+Camera.type: "PinHole"
+
+# Camera calibration and distortion parameters (OpenCV)
+Camera1.fx: 811.63284302
+Camera1.fy: 811.63284302
+Camera1.cx: 949.82876587
+Camera1.cy: 528.09759521
+
+Camera1.k1: 0.0
+Camera1.k2: 0.0
+Camera1.p1: 0.0
+Camera1.p2: 0.0
+
+Camera2.fx: 811.63284302
+Camera2.fy: 811.63284302
+Camera2.cx: 949.82876587
+Camera2.cy: 528.09759521
+
+Camera2.k1: 0.0
+Camera2.k2: 0.0
+Camera2.p1: 0.0
+Camera2.p2: 0.0
+
+Camera.width: 1920
+Camera.height: 1080
+
+# Camera frames per second
+Camera.fps: 30
+
+# Color order of the images (0: BGR, 1: RGB. It is ignored if images are grayscale)
+Camera.RGB: 1
+
+Stereo.ThDepth: 60.0
+
+Stereo.T_c1_c2: !!opencv-matrix
+ rows: 4
+ cols: 4
+ dt: f
+ data: [1.0, 0.0, 0.0, 0.120,
+ 0.0, 1.0, 0.0, 0.0,
+ 0.0, 0.0, 1.0, 0.0,
+ 0,0,0,1.0]
+
+#--------------------------------------------------------------------------------------------
+# ORB Parameters
+#--------------------------------------------------------------------------------------------
+
+# ORB Extractor: Number of features per image
+ORBextractor.nFeatures: 1200
+
+# ORB Extractor: Scale factor between levels in the scale pyramid
+ORBextractor.scaleFactor: 1.2
+
+# ORB Extractor: Number of levels in the scale pyramid
+ORBextractor.nLevels: 8
+
+# ORB Extractor: Fast threshold
+# Image is divided in a grid. At each cell FAST are extracted imposing a minimum response.
+# Firstly we impose iniThFAST. If no corners are detected we impose a lower value minThFAST
+# You can lower these values if your images have low contrast
+ORBextractor.iniThFAST: 20
+ORBextractor.minThFAST: 7
+
+#--------------------------------------------------------------------------------------------
+# Viewer Parameters
+#--------------------------------------------------------------------------------------------
+Viewer.KeyFrameSize: 0.05
+Viewer.KeyFrameLineWidth: 1.0
+Viewer.GraphLineWidth: 0.9
+Viewer.PointSize: 2.0
+Viewer.CameraSize: 0.08
+Viewer.CameraLineWidth: 3.0
+Viewer.ViewpointX: 0.0
+Viewer.ViewpointY: -0.7
+Viewer.ViewpointZ: -1.8
+Viewer.ViewpointF: 500.0
+Viewer.imageViewScale: 1.0
+
diff --git a/Examples/Stereo/stereo_realsense_D455.cc b/Examples/Stereo/stereo_realsense_D455.cc
new file mode 100644
index 0000000000..ad27024ee4
--- /dev/null
+++ b/Examples/Stereo/stereo_realsense_D455.cc
@@ -0,0 +1,212 @@
+/**
+* This file is part of ORB-SLAM3
+*
+* Copyright (C) 2017-2021 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+*
+* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
+* License as published by the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
+* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
+* If not, see .
+*/
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+using namespace std;
+
+void LoadImages(const string &strPathLeft, const string &strPathRight, const string &strPathTimes,
+ vector &vstrImageLeft, vector &vstrImageRight, vector &vTimeStamps);
+
+int main(int argc, char **argv)
+{
+ if(argc < 5)
+ {
+ cerr << endl << "Usage: ./stereo_euroc path_to_vocabulary path_to_settings path_to_sequence_folder_1 path_to_times_file_1 (path_to_image_folder_2 path_to_times_file_2 ... path_to_image_folder_N path_to_times_file_N) (trajectory_file_name)" << endl;
+
+ return 1;
+ }
+
+ const int num_seq = (argc-3)/2;
+ cout << "num_seq = " << num_seq << endl;
+ bool bFileName= (((argc-3) % 2) == 1);
+ string file_name;
+ if (bFileName)
+ {
+ file_name = string(argv[argc-1]);
+ cout << "file name: " << file_name << endl;
+ }
+
+ // Load all sequences:
+ int seq;
+ vector< vector > vstrImageLeft;
+ vector< vector > vstrImageRight;
+ vector< vector > vTimestampsCam;
+ vector nImages;
+
+ vstrImageLeft.resize(num_seq);
+ vstrImageRight.resize(num_seq);
+ vTimestampsCam.resize(num_seq);
+ nImages.resize(num_seq);
+
+ int tot_images = 0;
+ for (seq = 0; seq vTimesTrack;
+ vTimesTrack.resize(tot_images);
+
+ cout << endl << "-------" << endl;
+ cout.precision(17);
+
+ // Create SLAM system. It initializes all system threads and gets ready to process frames.
+ ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::STEREO, true);
+
+ cv::Mat imLeft, imRight;
+ for (seq = 0; seq(), vstrImageLeft[seq][ni]);
+
+#ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
+#else
+ std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now();
+#endif
+
+#ifdef REGISTER_TIMES
+ t_track = t_resize + t_rect + std::chrono::duration_cast >(t2 - t1).count();
+ SLAM.InsertTrackTime(t_track);
+#endif
+
+ double ttrack= std::chrono::duration_cast >(t2 - t1).count();
+
+ vTimesTrack[ni]=ttrack;
+
+ // Wait to load the next frame
+ double T=0;
+ if(ni0)
+ T = tframe-vTimestampsCam[seq][ni-1];
+
+ if(ttrack &vstrImageLeft, vector &vstrImageRight, vector &vTimeStamps)
+{
+ ifstream fTimes;
+ fTimes.open(strPathTimes.c_str());
+ vTimeStamps.reserve(5000);
+ vstrImageLeft.reserve(5000);
+ vstrImageRight.reserve(5000);
+ while(!fTimes.eof())
+ {
+ string s;
+ getline(fTimes,s);
+ if(!s.empty())
+ {
+ stringstream ss;
+ ss << s;
+ vstrImageLeft.push_back(strPathLeft + "/" + ss.str() + ".png");
+ vstrImageRight.push_back(strPathRight + "/" + ss.str() + ".png");
+ double t;
+ ss >> t;
+ vTimeStamps.push_back(t/1e9);
+
+ }
+ }
+}
diff --git a/Examples/Stereo/stereo_zed2i.cc b/Examples/Stereo/stereo_zed2i.cc
new file mode 100644
index 0000000000..ad27024ee4
--- /dev/null
+++ b/Examples/Stereo/stereo_zed2i.cc
@@ -0,0 +1,212 @@
+/**
+* This file is part of ORB-SLAM3
+*
+* Copyright (C) 2017-2021 Carlos Campos, Richard Elvira, Juan J. Gómez Rodríguez, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+* Copyright (C) 2014-2016 Raúl Mur-Artal, José M.M. Montiel and Juan D. Tardós, University of Zaragoza.
+*
+* ORB-SLAM3 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public
+* License as published by the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* ORB-SLAM3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
+* the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License along with ORB-SLAM3.
+* If not, see .
+*/
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+using namespace std;
+
+void LoadImages(const string &strPathLeft, const string &strPathRight, const string &strPathTimes,
+ vector &vstrImageLeft, vector &vstrImageRight, vector &vTimeStamps);
+
+int main(int argc, char **argv)
+{
+ if(argc < 5)
+ {
+ cerr << endl << "Usage: ./stereo_euroc path_to_vocabulary path_to_settings path_to_sequence_folder_1 path_to_times_file_1 (path_to_image_folder_2 path_to_times_file_2 ... path_to_image_folder_N path_to_times_file_N) (trajectory_file_name)" << endl;
+
+ return 1;
+ }
+
+ const int num_seq = (argc-3)/2;
+ cout << "num_seq = " << num_seq << endl;
+ bool bFileName= (((argc-3) % 2) == 1);
+ string file_name;
+ if (bFileName)
+ {
+ file_name = string(argv[argc-1]);
+ cout << "file name: " << file_name << endl;
+ }
+
+ // Load all sequences:
+ int seq;
+ vector< vector > vstrImageLeft;
+ vector< vector > vstrImageRight;
+ vector< vector > vTimestampsCam;
+ vector nImages;
+
+ vstrImageLeft.resize(num_seq);
+ vstrImageRight.resize(num_seq);
+ vTimestampsCam.resize(num_seq);
+ nImages.resize(num_seq);
+
+ int tot_images = 0;
+ for (seq = 0; seq vTimesTrack;
+ vTimesTrack.resize(tot_images);
+
+ cout << endl << "-------" << endl;
+ cout.precision(17);
+
+ // Create SLAM system. It initializes all system threads and gets ready to process frames.
+ ORB_SLAM3::System SLAM(argv[1],argv[2],ORB_SLAM3::System::STEREO, true);
+
+ cv::Mat imLeft, imRight;
+ for (seq = 0; seq(), vstrImageLeft[seq][ni]);
+
+#ifdef COMPILEDWITHC11
+ std::chrono::steady_clock::time_point t2 = std::chrono::steady_clock::now();
+#else
+ std::chrono::monotonic_clock::time_point t2 = std::chrono::monotonic_clock::now();
+#endif
+
+#ifdef REGISTER_TIMES
+ t_track = t_resize + t_rect + std::chrono::duration_cast >(t2 - t1).count();
+ SLAM.InsertTrackTime(t_track);
+#endif
+
+ double ttrack= std::chrono::duration_cast >(t2 - t1).count();
+
+ vTimesTrack[ni]=ttrack;
+
+ // Wait to load the next frame
+ double T=0;
+ if(ni0)
+ T = tframe-vTimestampsCam[seq][ni-1];
+
+ if(ttrack &vstrImageLeft, vector &vstrImageRight, vector &vTimeStamps)
+{
+ ifstream fTimes;
+ fTimes.open(strPathTimes.c_str());
+ vTimeStamps.reserve(5000);
+ vstrImageLeft.reserve(5000);
+ vstrImageRight.reserve(5000);
+ while(!fTimes.eof())
+ {
+ string s;
+ getline(fTimes,s);
+ if(!s.empty())
+ {
+ stringstream ss;
+ ss << s;
+ vstrImageLeft.push_back(strPathLeft + "/" + ss.str() + ".png");
+ vstrImageRight.push_back(strPathRight + "/" + ss.str() + ".png");
+ double t;
+ ss >> t;
+ vTimeStamps.push_back(t/1e9);
+
+ }
+ }
+}
diff --git a/src/LocalMapping.cc b/src/LocalMapping.cc
index 53df3324fa..d51a14ceee 100644
--- a/src/LocalMapping.cc
+++ b/src/LocalMapping.cc
@@ -247,7 +247,7 @@ void LocalMapping::Run()
vdKFCullingSync_ms.push_back(timeKFCulling_ms);
#endif
- mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame);
+ //mpLoopCloser->InsertKeyFrame(mpCurrentKeyFrame);
#ifdef REGISTER_TIMES
std::chrono::steady_clock::time_point time_EndLocalMap = std::chrono::steady_clock::now();
diff --git a/src/Tracking.cc b/src/Tracking.cc
index 5191451a81..87f4784829 100644
--- a/src/Tracking.cc
+++ b/src/Tracking.cc
@@ -2724,12 +2724,12 @@ bool Tracking::TrackReferenceKeyFrame()
// We perform first an ORB matching with the reference keyframe
// If enough matches are found we setup a PnP solver
- ORBmatcher matcher(0.7,true);
+ ORBmatcher matcher(0.7, true);
vector vpMapPointMatches;
- int nmatches = matcher.SearchByBoW(mpReferenceKF,mCurrentFrame,vpMapPointMatches);
+ int nmatches = matcher.SearchByBoW(mpReferenceKF, mCurrentFrame, vpMapPointMatches);
- if(nmatches<15)
+ if(nmatches < 15)
{
cout << "TRACK_REF_KF: Less than 15 matches!!\n";
return false;
@@ -2810,7 +2810,7 @@ void Tracking::UpdateLastFrame()
// We insert all close points (depth &vbMatchesInliers, float &score, Eigen::Matrix3f &F21)
{
// Number of putative matches
- const int N = vbMatchesInliers.size();
+ const int N = mvMatches12.size();
// Normalize coordinates
vector vPn1, vPn2;