diff --git a/bin/preprocess_data.py b/bin/preprocess_data.py new file mode 100644 index 0000000..4aaec48 --- /dev/null +++ b/bin/preprocess_data.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import os + +import numpy as np +import torch +import yaml + +import preprocessing.preprocesser + + +def yes_or_no(question): + while "the answer is invalid": + reply = str(input(question + ' (y/n): ')).lower().strip() + if reply[0] == 'y': + return True + else: + return False + + +def config(): + # Load parameters + f = open('config/config_datasets.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/deployment_options.yaml') + deployment_options = yaml.load(f, Loader=yaml.FullLoader) + config.update(deployment_options) + + # Device to be used + config["device"] = torch.device(config["device"]) + + for dataset in config["datasets"]: + config[dataset]["horizontal_cells"] = config[dataset]["horizontal_cells_preprocessing"] + config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + config[dataset][ + "testing_identifiers"] + + # Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + # Check whether rosbag exists + for dataset in config["datasets"]: + print("Checking whether path to " + config[dataset]["data_path"] + " exists.") + if not os.path.exists(config[dataset]["data_path"]): + raise Exception("Path " + config[dataset]["data_path"] + " does not exist. Exiting.") + + # User check for correctness of paths ------------- + print("----------------------------------") + print("Run for the datasets: " + str(config["datasets"])) + print("which are located at") + for dataset in config["datasets"]: + print(config[dataset]["data_path"]) + print("and will be stored at") + for dataset in config["datasets"]: + print(config[dataset]["preprocessed_path"]) + print("----------") + if not yes_or_no("Continue?"): + print("Okay, then program will be stopped.") + exit() + + # ------------------------------------------------- + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + + +if __name__ == "__main__": + config = config() + preprocesser = preprocessing.preprocesser.Preprocesser(config=config) + preprocesser.preprocess_data() diff --git a/bin/run_rosnode.py b/bin/run_rosnode.py new file mode 100644 index 0000000..7a561ee --- /dev/null +++ b/bin/run_rosnode.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import click +import numpy as np +import torch +import yaml + +import ros_utils.odometry_publisher + + +@click.command() +@click.option('--checkpoint', prompt='Path to the saved model you want to test') +@click.option('--dataset', + prompt='On which dataset configuration do you want to get predictions? [kitti, darpa, ....]. Does not ' + 'need to be one of those, but the sensor paramaters are looked up in the config_datasets.yaml.') +@click.option('--lidar_topic', prompt='Topic of the published LiDAR pointcloud2 messages.') +@click.option('--lidar_frame', prompt='LiDAR frame in TF tree.') +@click.option('--integrate_odometry', help='Whether the published odometry should be integrated in the TF tree.', + default=True) +def config(checkpoint, dataset, lidar_topic, lidar_frame, integrate_odometry): + f = open('config/config_datasets.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/deployment_options.yaml') + deployment_options = yaml.load(f, Loader=yaml.FullLoader) + config.update(deployment_options) + f = open('config/hyperparameters.yaml') + network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) + config.update(network_hyperparameters) + + # Mode + config["mode"] = "training" + + # No dropout during testing + if config["use_dropout"]: + config["use_dropout"] = False + print("Deactivating dropout for this mode.") + + # CLI Input + ## Checkpoint + config["checkpoint"] = str(checkpoint) + ## Dataset + config["datasets"] = [str(dataset)] + ## LiDAR Topic + config["lidar_topic"] = str(lidar_topic) + ## LiDAR Frame + config["lidar_frame"] = str(lidar_frame) + ## Integrate odometry + config["integrate_odometry"] = integrate_odometry + + # Device to be used + if config["device"] == "cuda": + config["device"] = torch.device("cuda") + else: + config["device"] = torch.device("cpu") + + # Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + + +if __name__ == "__main__": + config = config(standalone_mode=False) + publisher = ros_utils.odometry_publisher.OdometryPublisher(config=config) + publisher.publish_odometry() diff --git a/bin/run_testing.py b/bin/run_testing.py new file mode 100644 index 0000000..0799a68 --- /dev/null +++ b/bin/run_testing.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import click +import numpy as np +import torch +import yaml + +import deploy.tester + + +@click.command() +@click.option('--testing_run_name', prompt='MLFlow name of the run', + help='The name under which the run can be found afterwards.') +@click.option('--experiment_name', help='High-level testing sequence name for clustering in MLFlow.', + default="testing") +@click.option('--checkpoint', prompt='Path to the saved checkpoint of the model you want to test') +def config(testing_run_name, experiment_name, checkpoint): + f = open('config/config_datasets.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/deployment_options.yaml') + deployment_options = yaml.load(f, Loader=yaml.FullLoader) + config.update(deployment_options) + f = open('config/hyperparameters.yaml') + network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) + config.update(network_hyperparameters) + + # Parameters from previous run? + if 'parameters' in torch.load(checkpoint): + print("\033[92m" + + "Found parameters in checkpoint! Setting part of parameters to those ones." + + "\033[0;0m") + parameters_exist = True + else: + print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.") + parameters_exist = False + + # Parameters that are set depending on whether provided in checkpoint + if parameters_exist: + loaded_config = torch.load(checkpoint)['parameters'] + ## Device to be used + loaded_config["device"] = torch.device(config["device"]) + ## Dataset selection + loaded_config["datasets"] = config["datasets"] + for dataset in loaded_config["datasets"]: + loaded_config[dataset]["testing_identifiers"] = config[dataset]["testing_identifiers"] + loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["testing_identifiers"] + ## Inference only + loaded_config["inference_only"] = config["inference_only"] + loaded_config["store_dataset_in_RAM"] = config["store_dataset_in_RAM"] + config = loaded_config + # Some parameters are only initialized when not taken from checkpoint + else: + ## Device to be used + config["device"] = torch.device(config["device"]) + for dataset in config["datasets"]: + config[dataset]["data_identifiers"] = config[dataset]["testing_identifiers"] + ## Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + # Parameters that are always set + ## No dropout during testing + if config["use_dropout"]: + config["use_dropout"] = False + print("Deactivating dropout for this mode.") + + ## CLI Input + ### Testing run name + config["run_name"] = str(testing_run_name) + ### Checkpoint + config["checkpoint"] = str(checkpoint) + ### Experiment name, default specified in deployment_options.yaml + if experiment_name: + config["experiment"] = experiment_name + ## Mode + config["mode"] = "testing" + ## Unsupervised + config["unsupervised_at_start"] = True + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + + +if __name__ == "__main__": + config = config(standalone_mode=False) + tester = deploy.tester.Tester(config=config) + tester.test() diff --git a/bin/run_training.py b/bin/run_training.py new file mode 100644 index 0000000..380c211 --- /dev/null +++ b/bin/run_training.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import click +import numpy as np +import torch +import yaml + +import deploy.trainer + + +@click.command() +@click.option('--training_run_name', prompt='MLFlow name of the run', + help='The name under which the run can be found afterwards.') +@click.option('--experiment_name', help='High-level training sequence name for clustering in MLFlow.', + default="") +@click.option('--checkpoint', help='Path to the saved checkpoint. Leave empty if none.', + default="") +def config(training_run_name, experiment_name, checkpoint): + f = open('config/config_datasets.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/deployment_options.yaml') + deployment_options = yaml.load(f, Loader=yaml.FullLoader) + config.update(deployment_options) + f = open('config/hyperparameters.yaml') + network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) + config.update(network_hyperparameters) + + # Default: load parameters from yaml + parameters_exist = False + + # CLI Input + ## Checkpoint for continuing training + if checkpoint: + ### Parameters from previous run? + if 'parameters' in torch.load(checkpoint): + print("\033[92m" + + "Found parameters in checkpoint of previous run! Setting part of parameters to those ones." + + "\033[0;0m") + parameters_exist = True + else: + print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.") + + # Parameters that are set depending on whether provided in checkpoint + if parameters_exist: + loaded_config = torch.load(checkpoint)['parameters'] + ## Device to be used + loaded_config["device"] = torch.device(config["device"]) + loaded_config["datasets"] = config["datasets"] + for dataset in loaded_config["datasets"]: + loaded_config[dataset]["training_identifiers"] = config[dataset]["training_identifiers"] + loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["training_identifiers"] + config = loaded_config + # Some parameters are only initialized when not taken from checkpoint + else: + ## Device to be used + config["device"] = torch.device(config["device"]) + for dataset in config["datasets"]: + config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + ## Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + # Parameters that are always set + if checkpoint: + config["checkpoint"] = str(checkpoint) + else: + config["checkpoint"] = None + ## Trainings run name --> mandatory + config["training_run_name"] = str(training_run_name) + config["run_name"] = config["training_run_name"] + ## Experiment name, default specified in deployment_options.yaml + if experiment_name: + config["experiment"] = experiment_name + ## Mode + config["mode"] = "training" + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + + +if __name__ == "__main__": + config = config(standalone_mode=False) + trainer = deploy.trainer.Trainer(config=config) + trainer.train() diff --git a/bin/visualize_pointcloud_normals.py b/bin/visualize_pointcloud_normals.py new file mode 100644 index 0000000..f08e443 --- /dev/null +++ b/bin/visualize_pointcloud_normals.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import numpy as np +import torch +import yaml + +import ros_utils.publish_point_cloud_and_normals + +def config(): + f = open('config/config_datasets.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/deployment_options.yaml') + deployment_options = yaml.load(f, Loader=yaml.FullLoader) + config.update(deployment_options) + + # Device to be used + if config["device"] == "cuda": + config["device"] = torch.device("cuda") + else: + config["device"] = torch.device("cpu") + + # Data identifiers + for dataset in config["datasets"]: + if config["mode"] == "training": + config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + elif config["mode"] == "testing": + config[dataset]["data_identifiers"] = config[dataset]["testing_identifiers"] + else: + raise Exception('Only modes "training" and "testing" are valid.') + + # Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + +if __name__ == "__main__": + config = config() + publisher = ros_utils.publish_point_cloud_and_normals.ROSPublisher(config=config) + publisher.publish_dataset() \ No newline at end of file diff --git a/scripts/convert_kitti_to_rosbag.py b/scripts/convert_kitti_to_rosbag.py new file mode 100644 index 0000000..290b92f --- /dev/null +++ b/scripts/convert_kitti_to_rosbag.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import numpy as np +import torch +import yaml + +import ros_utils.convert_to_rosbag + + +def config(): + f = open('config/deployment_options.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/config_datasets.yaml') + dataset_config = yaml.load(f, Loader=yaml.FullLoader) + config.update(dataset_config) + + # Device to be used + if config["device"] == "cuda": + config["device"] = torch.device("cuda") + else: + config["device"] = torch.device("cpu") + + for dataset in config["datasets"]: + config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + config[dataset][ + "testing_identifiers"] + + # Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + print("----------------------------------") + print("Configuration for this run: ") + print(config) + print("----------------------------------") + + return config + + +if __name__ == "__main__": + config = config() + converter = ros_utils.convert_to_rosbag.RosbagConverter(config=config) + converter.convert() diff --git a/scripts/convert_pytorch_models.py b/scripts/convert_pytorch_models.py new file mode 100644 index 0000000..27feb3f --- /dev/null +++ b/scripts/convert_pytorch_models.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import click +import torch + + +@click.command() +@click.option('--checkpoint', + prompt='Path to the saved model (without .pth) you want to convert to older PyTorch compatibility.') +def convert_pytorch_model(checkpoint): + state_dict = torch.load(checkpoint + ".pth", map_location=torch.device("cpu")) + print(state_dict) + torch.save(state_dict, checkpoint + "_py27.pth", _use_new_zipfile_serialization=False) + + +if __name__ == "__main__": + convert_pytorch_model() diff --git a/scripts/time_network.py b/scripts/time_network.py new file mode 100644 index 0000000..9d4c867 --- /dev/null +++ b/scripts/time_network.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. +# All rights reserved. +# This file is released under the "BSD-3-Clause License". +# Please see the LICENSE file that has been included as part of this package. +import click +import numpy as np +import torch +import yaml + +import models.model +import time + +from torch.utils import mkldnn as mkldnn_utils + + +@click.command() +@click.option('--checkpoint', help='Path to the saved model you want to continue training from.', + default="") +def config(checkpoint): + f = open('config/deployment_options.yaml') + config = yaml.load(f, Loader=yaml.FullLoader) + f = open('config/config_datasets.yaml') + dataset_config = yaml.load(f, Loader=yaml.FullLoader) + config.update(dataset_config) + f = open('config/hyperparameters.yaml') + hyperparameters_config = yaml.load(f, Loader=yaml.FullLoader) + config.update(hyperparameters_config) + + # CLI Input + if not checkpoint: + config["checkpoint"] = None + else: + config["checkpoint"] = str(checkpoint) + + # Device to be used + config["device"] = torch.device(config["device"]) + + # Convert angles to radians + for dataset in config["datasets"]: + config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) + config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) + config["horizontal_field_of_view"][0] *= (np.pi / 180.0) + config["horizontal_field_of_view"][1] *= (np.pi / 180.0) + + print("Configuration for this run: ") + print(config) + + return config + + +if __name__ == "__main__": + config = config(standalone_mode=False) + iterations = 1000 + torch.set_num_threads(4) + + # CUDA synchronisation + torch.cuda.synchronize() + + # Velodyne VLP-16 + print("Velodyne VLP-16 --------------") + sample_input = torch.rand(1, 4, 16, 720).to(config["device"]) + print("Used device is: " + str(config["device"])) + ## Standard Model + model = models.model.OdometryModel(config=config).to(config["device"]).eval() + _, _ = model(sample_input, sample_input) + torch.cuda.synchronize() + t_accum = 0.0 + for iteration in range(iterations): + t = time.time() + _, _ = model(sample_input, sample_input) + torch.cuda.synchronize() + t_delta = time.time() - t + t_accum += t_delta + print(str(t_delta * 1000) + "ms") + print("Average execution time of model is: " + str(t_accum / iterations * 1000) + " milliseconds.") + + del model + model_jit = torch.jit.trace( + models.model.OdometryModel(config=config).to(config["device"]), + example_inputs=(sample_input, sample_input)).eval() + t_accum = 0.0 + for iteration in range(iterations + 1): + torch.cuda.synchronize() + t = time.time() + _, _ = model_jit(sample_input, sample_input) + torch.cuda.synchronize() + t_delta = time.time() - t + if iteration != 0: + t_accum += t_delta + print(t_delta) + print( + "Average execution time of jit model is: " + str(t_accum / iterations * 1000) + " milliseconds.")