Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Benchmark Scripts #94

Closed
wants to merge 34 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
87ca6e1
Dataset Init from valid_files.json
aaravpandya Mar 26, 2024
97cd004
Add bindings
aaravpandya Mar 26, 2024
0bc3ec5
Merge branch 'main' into ap_datasetInit
aaravpandya Mar 27, 2024
fb07b8c
Correct initialization for padding
aaravpandya Mar 27, 2024
f2b4f89
Merge branch 'main' into ap_datasetInit
aaravpandya Apr 15, 2024
13b9070
Fix main merge
aaravpandya Apr 15, 2024
e3195da
Merge branch 'ap_datasetInit' into ap_bench
aaravpandya Apr 19, 2024
2f2190c
Add benchmarking scripts
aaravpandya Apr 20, 2024
477eadd
Half cherry picked
aaravpandya Apr 20, 2024
0e3391a
Merge branch 'main' into ap_bench
aaravpandya Apr 26, 2024
a455db0
Add profiling code
aaravpandya Apr 27, 2024
6fda12e
Seperating reset
aaravpandya Apr 27, 2024
ab212dc
add stepsize
aaravpandya Apr 27, 2024
2c2199e
Add profiling
aaravpandya Apr 29, 2024
7936d13
Merge branch 'main' into ap_bench
aaravpandya May 7, 2024
0da8f7c
Merge branch 'main' into ap_bench
aaravpandya May 7, 2024
81711b7
Merge branch 'main' into ap_bench
aaravpandya May 14, 2024
e83ab3e
Add pynvml support for memory benchmarking
aaravpandya May 14, 2024
b1092ea
Merge branch 'main' into ap_bench
aaravpandya May 14, 2024
d59eb72
Merge branch 'main' into ap_bench
aaravpandya May 26, 2024
ca80971
refactor
aaravpandya May 26, 2024
b2b7333
Merge branch 'main' into ap_bench
aaravpandya May 29, 2024
b947df4
Merge branch 'main' into ap_bench
aaravpandya May 30, 2024
7a7d6b3
Add more params
aaravpandya May 30, 2024
bf24761
Merge branch 'main' into ap_bench
aaravpandya May 30, 2024
d44ade9
remove auto reset
aaravpandya May 30, 2024
7781567
Add exponential run option
aaravpandya May 30, 2024
cc099e3
Merge branch 'main' into ap_bench
aaravpandya May 31, 2024
0b4cb8e
Fix bindings
aaravpandya May 31, 2024
83cd63d
add return
aaravpandya May 31, 2024
46101e6
Add config
aaravpandya May 31, 2024
5545e1d
Merge branch 'main' into ap_bench
aaravpandya Jun 2, 2024
702c802
Make bench more similar
aaravpandya Jun 2, 2024
8392709
Merge branch 'main' into ap_bench
aaravpandya Jun 2, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
parameters:
collisionBehaviour: AgentRemoved
datasetInitOptions: FirstN
maxNumControlledVehicles: 10000
observationRadius: 1000.0
polylineReductionThreshold: 0.5
rewardParams: reward_params
IgnoreNonVehicles: true
roadObservationAlgorithm: KNearestEntitiesWithRadiusFiltering
initOnlyValidAgentsAtFirstStep: true

reward_params:
distanceToExpertThreshold: 1.0
distanceToGoalThreshold: 1.0
rewardType: DistanceBased

sim_manager:
exec_mode: CUDA
gpu_id: 0
json_path: /home/aarav/nocturne_data/formatted_json_v2_no_tl_valid/
num_worlds: 100

experiment: 2
Empty file added scripts/__init__.py
Empty file.
Empty file added scripts/bench_utils/__init__.py
Empty file.
173 changes: 173 additions & 0 deletions scripts/bench_utils/bench.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
from gpudrive import SimManager
from ..sim_utils.creator import SimCreator

from time import perf_counter
import argparse
import csv
import yaml
import torch
import os
from pynvml import *


def get_gpu_memory_usage():
handle = nvmlDeviceGetHandleByIndex(0)
info = nvmlDeviceGetMemoryInfo(handle)
return info.free, info.used, info.total

def get_shapes():
shape = sim.shape_tensor().to_torch()
useful_num_agents, useful_num_roads = (
torch.sum(shape[:, 0]).item(),
torch.sum(shape[:, 1]).item(),
) # shape is a tensor of shape (num_envs, 2)
num_envs = shape.shape[0]

useful_num_agents = sim.controlled_state_tensor().to_torch().sum().item()

actual_num_agents = sim.self_observation_tensor().to_torch().shape[1] * num_envs
actual_num_roads = sim.map_observation_tensor().to_torch().shape[1] * num_envs
return actual_num_agents, actual_num_roads, useful_num_agents, useful_num_roads, num_envs

def timeit(func):
def wrapper(*args, **kwargs):
start = perf_counter()
func(*args, **kwargs) # Execute the function but ignore its result
end = perf_counter()
return end - start # Return only the elapsed time
return wrapper

@timeit
def reset(sim: SimManager, num_envs: int):
for i in range(num_envs):
sim.reset(i)
sim.step()

@timeit
def step(sim: SimManager, num_steps: int, actions: torch.Tensor):
for i in range(num_steps):
sim.action_tensor().to_torch().copy_(actions)
sim.step()

def save_results():
pass

def run_stress_test(sim: SimManager, config: dict, num_steps: int = 91):
actual_num_agents, actual_num_roads, useful_num_agents, useful_num_roads, num_envs = get_shapes()
episode_length = 91
step_times = []
reset_times = []
used, free, available = [], [], []
for i in range(0, num_steps, episode_length):
time_to_reset = reset(sim, num_envs)
time_to_step = step(sim, num_steps)
reset_times.append(time_to_reset)
step_times.append(time_to_step)
f, u, t = get_gpu_memory_usage()
used.append(u)
free.append(f)
available.append(t)

def run_benchmark(
sim: SimManager, config: dict, profile_memory: bool = False, num_steps: int = 91
):
actual_num_agents, actual_num_roads, useful_num_agents, useful_num_roads, num_envs = get_shapes()

time_to_reset = reset(sim, num_envs)
actions_tensor = torch.randn_like(sim.action_tensor().to_torch())
time_to_step = step(sim, num_steps,actions_tensor)

fps = num_steps / time_to_step
afps = fps * actual_num_agents
useful_afps = fps * useful_num_agents

print(
f"{useful_num_agents=}, {useful_num_roads=}, {num_envs=}, {time_to_reset=}, {num_steps=},{time_to_step=}, {fps=}, {afps=}, {useful_afps=}"
)
# check if benchmark_results.csv exists
file_path = "benchmark_results.csv"
# Check if the file exists
if not os.path.exists(file_path):
# Open the file in write mode and write the header
with open(file_path, mode="w", newline="") as file:
writer = csv.writer(file)
writer.writerow(
[
"actual_num_agents",
"actual_num_roads",
"useful_num_agents",
"useful_num_roads",
"num_envs",
"time_to_reset",
"time_to_step",
"num_steps",
"fps",
"afps",
"useful_afps",
"exec_mode",
"datasetInitOptions",
"experiment"
]
)

with open("benchmark_results.csv", mode="a") as file:
writer = csv.writer(file)
writer.writerow(
[
actual_num_agents,
actual_num_roads,
useful_num_agents,
useful_num_roads,
num_envs,
time_to_reset,
time_to_step,
num_steps,
fps,
afps,
useful_afps,
config["sim_manager"]["exec_mode"],
config["parameters"]["datasetInitOptions"],
config["experiment"]
]
)


if __name__ == "__main__":
# Export the
nvmlInit()
print(f"Driver Version: {nvmlSystemGetDriverVersion()}")
parser = argparse.ArgumentParser(description="GPUDrive Benchmarking Tool")
parser.add_argument(
"--datasetPath",
type=str,
help="Path to the config file",
default="/home/aarav/gpudrive/config.yml",
required=False,
)
parser.add_argument(
"--numEnvs", type=int, help="Number of environments", default=2, required=False
)
parser.add_argument(
"--profileMemory",
action="store_true",
help="Profile memory usage",
default=False,
required=False,
)
parser.add_argument(
"--numSteps",
type=int,
help="Number of steps to run the simulation for",
default=91,
required=False,
)
args = parser.parse_args()
with open(args.datasetPath, "r") as file:
config = yaml.safe_load(file)
config["sim_manager"]["num_worlds"] = args.numEnvs
sim = SimCreator(config)
if args.profileMemory:
run_stress_test(sim, config, args.numSteps)
else:
run_benchmark(sim, config, args.numSteps)
# run_benchmark(sim, config, args.profileMemory)
105 changes: 105 additions & 0 deletions scripts/bench_utils/dataset_binning.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import json
import os
import shutil
import yaml
import csv
from tqdm import tqdm
import pandas as pd
import argparse

VALID_FILES_PATH = "/home/aarav/nocturne_data/formatted_json_v2_no_tl_valid"
FINAL_BINNED_JSON_PATHS = "/home/aarav/nocturne_data/binned_jsons"
CSV_PATH = "/home/aarav/nocturne_data/datacsv.csv"

def modify_valid_files_json(valid_files_path: str, file_path: str):
if(os.path.exists(valid_files_path + "/valid_files.json") == False):
with open(valid_files_path + "/valid_files.json", 'w') as file:
json.dump({}, file)
with open(valid_files_path + "/valid_files.json", 'r') as file:
valid_files = json.load(file)
valid_files.clear()
valid_files[file_path] = []
with open(valid_files_path + "/valid_files.json", 'w') as file:
json.dump(valid_files, file)

def delete_file_from_dest(file_path: str):
os.remove(file_path)

def copy_file_to_dest(file_path: str, dest_path: str):
shutil.copy(file_path, dest_path)
return os.path.join(dest_path, os.path.basename(file_path))

def return_list_of_files(valid_files_path: str):
with open(valid_files_path + "/valid_files.json", 'r') as file:
valid_files = json.load(file)
file_list = []
for file in valid_files:
file_list.append(os.path.join(valid_files_path, file))
return file_list

def return_agent_numbers(file_path: str):
with open(file_path, 'r') as file:
data = json.load(file)
num_agents = len(data['objects'])
num_roads = len(data['roads'])
num_road_segments = 0
for road in data['roads']:
if(road['type'] == "road_edge" or road['type'] == "road_line" or road['type'] == "lane"):
num_road_segments += len(road['geometry']) - 1
else:
num_road_segments += 1
return num_agents, num_road_segments

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Nocturne Dataset Binning Tool')
parser.add_argument('--files_path', type=str, help='Path to the valid files', default=VALID_FILES_PATH, required=False)
parser.add_argument('--final_binned_json_paths', type=str, help='Path to the final binned json paths', default=FINAL_BINNED_JSON_PATHS, required=False)
parser.add_argument('--csv_path', type=str, help='Path to the csv file', default=CSV_PATH, required=False)
parser.add_argument('--bin_size', type=int, help='Num envs in a bin', default=100, required=False)
args = parser.parse_args()

VALID_FILES_PATH = args.files_path
FINAL_BINNED_JSON_PATHS = args.final_binned_json_paths
CSV_PATH = args.csv_path
bin_size = args.bin_size

file_list = return_list_of_files(VALID_FILES_PATH)
file_meta_data = []
file_meta_data.append(["File Path", "Number of Agents", "Number of Roads"])
for file in tqdm(file_list):
num_entities = return_agent_numbers(file)
file_meta_data.append([file, num_entities[0], num_entities[1]])

# Save bins for future use
with open(CSV_PATH, 'w') as file:
writer = csv.writer(file)
writer.writerows(file_meta_data)

data = pd.read_csv(CSV_PATH)
sorted_data = data.sort_values('Number of Agents')

bins = []
number_of_bins = len(sorted_data) // bin_size + (1 if len(sorted_data) % bin_size > 0 else 0)

for i in range(number_of_bins):
bin_start = i * bin_size
bin_end = min((i + 1) * bin_size, len(sorted_data))
bins.append(sorted_data.iloc[bin_start:bin_end])

if not os.path.exists(FINAL_BINNED_JSON_PATHS):
os.makedirs(FINAL_BINNED_JSON_PATHS)

for i, bin in enumerate(bins):
if not os.path.exists(FINAL_BINNED_JSON_PATHS + f"/bin_{i}"):
os.makedirs(FINAL_BINNED_JSON_PATHS + f"/bin_{i}")
bin_folder = FINAL_BINNED_JSON_PATHS + f"/bin_{i}"
print(bin_folder)
d = {}
for index, row in bin.iterrows():
file_path = row['File Path']
d[file_path] = [row['Number of Agents'], row['Number of Roads']]
filepath = os.path.join(bin_folder, f"valid_files.json")
print(filepath)
with open(filepath, 'w') as file:
json.dump(d, file)
print("Binning complete")
23 changes: 23 additions & 0 deletions scripts/bench_utils/plot_bench_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from matplotlib import pyplot as plt
import pandas as pd
import argparse

def plot_afps_vs_num_envs():
return

if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot Benchmark Results')
parser.add_argument('--file_path', type=str, help='Path to the benchmark results file', default='benchmark_results.csv', required=False)
parser.add_argument('--mode', type=int, help='(1) Normal , (2) Randomized, (3) Binned', default=1, required=False)
parser.add_argument('--x_axis', type=str, help='X-axis label', default='num_envs', required=False)
parser.add_argument('--y_axis', type=str, help='Y-axis label', default='afps', required=False)
args = parser.parse_args()
df = pd.read_csv("benchmark_results.csv")
df = df.groupby(['actual_num_agents', 'actual_num_roads', 'useful_num_agents', 'useful_num_roads', 'num_envs']).mean().reset_index()
df = df.sort_values(by='num_envs')
plt.plot(df['num_envs'], df['time_to_reset'], label='Time to Reset')
plt.plot(df['num_envs'], df['time_to_step'], label='Time to Step')
plt.xlabel('Number of Environments')
plt.ylabel('Time (s)')
plt.legend()
plt.show()
Loading
Loading