-
Notifications
You must be signed in to change notification settings - Fork 51
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
8 changed files
with
560 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,80 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. | ||
# All rights reserved. | ||
# This file is released under the "BSD-3-Clause License". | ||
# Please see the LICENSE file that has been included as part of this package. | ||
import os | ||
|
||
import numpy as np | ||
import torch | ||
import yaml | ||
|
||
import preprocessing.preprocesser | ||
|
||
|
||
def yes_or_no(question): | ||
while "the answer is invalid": | ||
reply = str(input(question + ' (y/n): ')).lower().strip() | ||
if reply[0] == 'y': | ||
return True | ||
else: | ||
return False | ||
|
||
|
||
def config(): | ||
# Load parameters | ||
f = open('config/config_datasets.yaml') | ||
config = yaml.load(f, Loader=yaml.FullLoader) | ||
f = open('config/deployment_options.yaml') | ||
deployment_options = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(deployment_options) | ||
|
||
# Device to be used | ||
config["device"] = torch.device(config["device"]) | ||
|
||
for dataset in config["datasets"]: | ||
config[dataset]["horizontal_cells"] = config[dataset]["horizontal_cells_preprocessing"] | ||
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] + config[dataset][ | ||
"testing_identifiers"] | ||
|
||
# Convert angles to radians | ||
for dataset in config["datasets"]: | ||
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) | ||
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][0] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][1] *= (np.pi / 180.0) | ||
|
||
# Check whether rosbag exists | ||
for dataset in config["datasets"]: | ||
print("Checking whether path to " + config[dataset]["data_path"] + " exists.") | ||
if not os.path.exists(config[dataset]["data_path"]): | ||
raise Exception("Path " + config[dataset]["data_path"] + " does not exist. Exiting.") | ||
|
||
# User check for correctness of paths ------------- | ||
print("----------------------------------") | ||
print("Run for the datasets: " + str(config["datasets"])) | ||
print("which are located at") | ||
for dataset in config["datasets"]: | ||
print(config[dataset]["data_path"]) | ||
print("and will be stored at") | ||
for dataset in config["datasets"]: | ||
print(config[dataset]["preprocessed_path"]) | ||
print("----------") | ||
if not yes_or_no("Continue?"): | ||
print("Okay, then program will be stopped.") | ||
exit() | ||
|
||
# ------------------------------------------------- | ||
|
||
print("----------------------------------") | ||
print("Configuration for this run: ") | ||
print(config) | ||
print("----------------------------------") | ||
|
||
return config | ||
|
||
|
||
if __name__ == "__main__": | ||
config = config() | ||
preprocesser = preprocessing.preprocesser.Preprocesser(config=config) | ||
preprocesser.preprocess_data() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. | ||
# All rights reserved. | ||
# This file is released under the "BSD-3-Clause License". | ||
# Please see the LICENSE file that has been included as part of this package. | ||
import click | ||
import numpy as np | ||
import torch | ||
import yaml | ||
|
||
import ros_utils.odometry_publisher | ||
|
||
|
||
@click.command() | ||
@click.option('--checkpoint', prompt='Path to the saved model you want to test') | ||
@click.option('--dataset', | ||
prompt='On which dataset configuration do you want to get predictions? [kitti, darpa, ....]. Does not ' | ||
'need to be one of those, but the sensor paramaters are looked up in the config_datasets.yaml.') | ||
@click.option('--lidar_topic', prompt='Topic of the published LiDAR pointcloud2 messages.') | ||
@click.option('--lidar_frame', prompt='LiDAR frame in TF tree.') | ||
@click.option('--integrate_odometry', help='Whether the published odometry should be integrated in the TF tree.', | ||
default=True) | ||
def config(checkpoint, dataset, lidar_topic, lidar_frame, integrate_odometry): | ||
f = open('config/config_datasets.yaml') | ||
config = yaml.load(f, Loader=yaml.FullLoader) | ||
f = open('config/deployment_options.yaml') | ||
deployment_options = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(deployment_options) | ||
f = open('config/hyperparameters.yaml') | ||
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(network_hyperparameters) | ||
|
||
# Mode | ||
config["mode"] = "training" | ||
|
||
# No dropout during testing | ||
if config["use_dropout"]: | ||
config["use_dropout"] = False | ||
print("Deactivating dropout for this mode.") | ||
|
||
# CLI Input | ||
## Checkpoint | ||
config["checkpoint"] = str(checkpoint) | ||
## Dataset | ||
config["datasets"] = [str(dataset)] | ||
## LiDAR Topic | ||
config["lidar_topic"] = str(lidar_topic) | ||
## LiDAR Frame | ||
config["lidar_frame"] = str(lidar_frame) | ||
## Integrate odometry | ||
config["integrate_odometry"] = integrate_odometry | ||
|
||
# Device to be used | ||
if config["device"] == "cuda": | ||
config["device"] = torch.device("cuda") | ||
else: | ||
config["device"] = torch.device("cpu") | ||
|
||
# Convert angles to radians | ||
for dataset in config["datasets"]: | ||
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) | ||
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][0] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][1] *= (np.pi / 180.0) | ||
|
||
print("----------------------------------") | ||
print("Configuration for this run: ") | ||
print(config) | ||
print("----------------------------------") | ||
|
||
return config | ||
|
||
|
||
if __name__ == "__main__": | ||
config = config(standalone_mode=False) | ||
publisher = ros_utils.odometry_publisher.OdometryPublisher(config=config) | ||
publisher.publish_odometry() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,97 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. | ||
# All rights reserved. | ||
# This file is released under the "BSD-3-Clause License". | ||
# Please see the LICENSE file that has been included as part of this package. | ||
import click | ||
import numpy as np | ||
import torch | ||
import yaml | ||
|
||
import deploy.tester | ||
|
||
|
||
@click.command() | ||
@click.option('--testing_run_name', prompt='MLFlow name of the run', | ||
help='The name under which the run can be found afterwards.') | ||
@click.option('--experiment_name', help='High-level testing sequence name for clustering in MLFlow.', | ||
default="testing") | ||
@click.option('--checkpoint', prompt='Path to the saved checkpoint of the model you want to test') | ||
def config(testing_run_name, experiment_name, checkpoint): | ||
f = open('config/config_datasets.yaml') | ||
config = yaml.load(f, Loader=yaml.FullLoader) | ||
f = open('config/deployment_options.yaml') | ||
deployment_options = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(deployment_options) | ||
f = open('config/hyperparameters.yaml') | ||
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(network_hyperparameters) | ||
|
||
# Parameters from previous run? | ||
if 'parameters' in torch.load(checkpoint): | ||
print("\033[92m" + | ||
"Found parameters in checkpoint! Setting part of parameters to those ones." | ||
+ "\033[0;0m") | ||
parameters_exist = True | ||
else: | ||
print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.") | ||
parameters_exist = False | ||
|
||
# Parameters that are set depending on whether provided in checkpoint | ||
if parameters_exist: | ||
loaded_config = torch.load(checkpoint)['parameters'] | ||
## Device to be used | ||
loaded_config["device"] = torch.device(config["device"]) | ||
## Dataset selection | ||
loaded_config["datasets"] = config["datasets"] | ||
for dataset in loaded_config["datasets"]: | ||
loaded_config[dataset]["testing_identifiers"] = config[dataset]["testing_identifiers"] | ||
loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["testing_identifiers"] | ||
## Inference only | ||
loaded_config["inference_only"] = config["inference_only"] | ||
loaded_config["store_dataset_in_RAM"] = config["store_dataset_in_RAM"] | ||
config = loaded_config | ||
# Some parameters are only initialized when not taken from checkpoint | ||
else: | ||
## Device to be used | ||
config["device"] = torch.device(config["device"]) | ||
for dataset in config["datasets"]: | ||
config[dataset]["data_identifiers"] = config[dataset]["testing_identifiers"] | ||
## Convert angles to radians | ||
for dataset in config["datasets"]: | ||
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) | ||
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][0] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][1] *= (np.pi / 180.0) | ||
|
||
# Parameters that are always set | ||
## No dropout during testing | ||
if config["use_dropout"]: | ||
config["use_dropout"] = False | ||
print("Deactivating dropout for this mode.") | ||
|
||
## CLI Input | ||
### Testing run name | ||
config["run_name"] = str(testing_run_name) | ||
### Checkpoint | ||
config["checkpoint"] = str(checkpoint) | ||
### Experiment name, default specified in deployment_options.yaml | ||
if experiment_name: | ||
config["experiment"] = experiment_name | ||
## Mode | ||
config["mode"] = "testing" | ||
## Unsupervised | ||
config["unsupervised_at_start"] = True | ||
|
||
print("----------------------------------") | ||
print("Configuration for this run: ") | ||
print(config) | ||
print("----------------------------------") | ||
|
||
return config | ||
|
||
|
||
if __name__ == "__main__": | ||
config = config(standalone_mode=False) | ||
tester = deploy.tester.Tester(config=config) | ||
tester.test() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,94 @@ | ||
#!/usr/bin/env python3 | ||
# Copyright 2021 by Julian Nubert, Robotic Systems Lab, ETH Zurich. | ||
# All rights reserved. | ||
# This file is released under the "BSD-3-Clause License". | ||
# Please see the LICENSE file that has been included as part of this package. | ||
import click | ||
import numpy as np | ||
import torch | ||
import yaml | ||
|
||
import deploy.trainer | ||
|
||
|
||
@click.command() | ||
@click.option('--training_run_name', prompt='MLFlow name of the run', | ||
help='The name under which the run can be found afterwards.') | ||
@click.option('--experiment_name', help='High-level training sequence name for clustering in MLFlow.', | ||
default="") | ||
@click.option('--checkpoint', help='Path to the saved checkpoint. Leave empty if none.', | ||
default="") | ||
def config(training_run_name, experiment_name, checkpoint): | ||
f = open('config/config_datasets.yaml') | ||
config = yaml.load(f, Loader=yaml.FullLoader) | ||
f = open('config/deployment_options.yaml') | ||
deployment_options = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(deployment_options) | ||
f = open('config/hyperparameters.yaml') | ||
network_hyperparameters = yaml.load(f, Loader=yaml.FullLoader) | ||
config.update(network_hyperparameters) | ||
|
||
# Default: load parameters from yaml | ||
parameters_exist = False | ||
|
||
# CLI Input | ||
## Checkpoint for continuing training | ||
if checkpoint: | ||
### Parameters from previous run? | ||
if 'parameters' in torch.load(checkpoint): | ||
print("\033[92m" + | ||
"Found parameters in checkpoint of previous run! Setting part of parameters to those ones." | ||
+ "\033[0;0m") | ||
parameters_exist = True | ||
else: | ||
print("Checkpoint does not contain any parameters. Using those ones specified in the YAML files.") | ||
|
||
# Parameters that are set depending on whether provided in checkpoint | ||
if parameters_exist: | ||
loaded_config = torch.load(checkpoint)['parameters'] | ||
## Device to be used | ||
loaded_config["device"] = torch.device(config["device"]) | ||
loaded_config["datasets"] = config["datasets"] | ||
for dataset in loaded_config["datasets"]: | ||
loaded_config[dataset]["training_identifiers"] = config[dataset]["training_identifiers"] | ||
loaded_config[dataset]["data_identifiers"] = loaded_config[dataset]["training_identifiers"] | ||
config = loaded_config | ||
# Some parameters are only initialized when not taken from checkpoint | ||
else: | ||
## Device to be used | ||
config["device"] = torch.device(config["device"]) | ||
for dataset in config["datasets"]: | ||
config[dataset]["data_identifiers"] = config[dataset]["training_identifiers"] | ||
## Convert angles to radians | ||
for dataset in config["datasets"]: | ||
config[dataset]["vertical_field_of_view"][0] *= (np.pi / 180.0) | ||
config[dataset]["vertical_field_of_view"][1] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][0] *= (np.pi / 180.0) | ||
config["horizontal_field_of_view"][1] *= (np.pi / 180.0) | ||
|
||
# Parameters that are always set | ||
if checkpoint: | ||
config["checkpoint"] = str(checkpoint) | ||
else: | ||
config["checkpoint"] = None | ||
## Trainings run name --> mandatory | ||
config["training_run_name"] = str(training_run_name) | ||
config["run_name"] = config["training_run_name"] | ||
## Experiment name, default specified in deployment_options.yaml | ||
if experiment_name: | ||
config["experiment"] = experiment_name | ||
## Mode | ||
config["mode"] = "training" | ||
|
||
print("----------------------------------") | ||
print("Configuration for this run: ") | ||
print(config) | ||
print("----------------------------------") | ||
|
||
return config | ||
|
||
|
||
if __name__ == "__main__": | ||
config = config(standalone_mode=False) | ||
trainer = deploy.trainer.Trainer(config=config) | ||
trainer.train() |
Oops, something went wrong.