Skip to content

Commit

Permalink
Improved visualizer and gym environment (cloning all tensors) (#320)
Browse files Browse the repository at this point in the history
  • Loading branch information
daphne-cornelisse authored Jan 10, 2025
1 parent 0700d38 commit 2544847
Show file tree
Hide file tree
Showing 10 changed files with 735 additions and 209 deletions.
33 changes: 33 additions & 0 deletions pygpudrive/datatypes/info.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import torch
import gpudrive


class Info:
"""A class to represent the information about the state of the environment.
Initialized from info_tensor (src/bindings) of shape (num_worlds, max_agents_in_scene, 5).
For details, see `Info` in src/types.hpp.
"""

def __init__(self, info_tensor: torch.Tensor):
"""Initializes the ego state with an observation tensor."""
self.off_road = info_tensor[:, :, 0]
self.collided = info_tensor[:, :, 1:3].sum(axis=2)
self.goal_achieved = info_tensor[:, :, 3]

@classmethod
def from_tensor(
cls,
info_tensor: gpudrive.madrona.Tensor,
backend="torch",
device="cuda",
):
"""Creates an LocalEgoState from the agent_observation_tensor."""
if backend == "torch":
return cls(info_tensor.to_torch().clone().to(device))
elif backend == "jax":
raise NotImplementedError("JAX backend not implemented yet.")

@property
def shape(self):
"""Returns the shape of the info tensor (num_worlds, max_agents_in_scene)."""
return self.off_road.shape
4 changes: 2 additions & 2 deletions pygpudrive/datatypes/roadgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def from_tensor(
):
"""Creates a GlobalRoadGraphPoints instance from a tensor."""
if backend == "torch":
return cls(roadgraph_tensor.to_torch().to(device))
return cls(roadgraph_tensor.to_torch().clone().to(device))
elif backend == "jax":
raise NotImplementedError("JAX backend not implemented yet.")

Expand Down Expand Up @@ -148,7 +148,7 @@ def from_tensor(
):
"""Creates a GlobalRoadGraphPoints instance from a tensor."""
if backend == "torch":
return cls(local_roadgraph_tensor.to_torch().to(device))
return cls(local_roadgraph_tensor.to_torch().clone().to(device))
elif backend == "jax":
raise NotImplementedError("JAX backend not implemented yet.")

Expand Down
31 changes: 21 additions & 10 deletions pygpudrive/datatypes/trajectory.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

TRAJ_LEN = 91 # Length of the logged trajectory


@dataclass
class LogTrajectory:
"""A class to represent the logged human trajectories. Initialized from `expert_trajectory_tensor` (src/bindings.cpp).
Expand All @@ -16,25 +17,35 @@ class LogTrajectory:
actions: Expert actions performed by the agent(s) across the trajectory.
"""

def __init__(self, raw_logs: torch.Tensor, num_worlds: int, max_agents: int):
"""Initializes the expert trajectory with an observation tensor."""
def __init__(
self, raw_logs: torch.Tensor, num_worlds: int, max_agents: int
):
"""Initializes the expert trajectory with an observation tensor."""
self.pos_xy = raw_logs[:, :, : 2 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.vel_xy = raw_logs[:, :, 2 * TRAJ_LEN: 4 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.yaw = raw_logs[:, :, 4 * TRAJ_LEN: 5 * TRAJ_LEN].view(
self.vel_xy = raw_logs[:, :, 2 * TRAJ_LEN : 4 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.inferred_actions = raw_logs[:, :, 6 * TRAJ_LEN: 16 * TRAJ_LEN].view(
self.yaw = raw_logs[:, :, 4 * TRAJ_LEN : 5 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.inferred_actions = raw_logs[
:, :, 6 * TRAJ_LEN : 16 * TRAJ_LEN
].view(num_worlds, max_agents, TRAJ_LEN, -1)

@classmethod
def from_tensor(cls, expert_traj_tensor: gpudrive.madrona.Tensor, num_worlds: int, max_agents: int, backend="torch"):
def from_tensor(
cls,
expert_traj_tensor: gpudrive.madrona.Tensor,
num_worlds: int,
max_agents: int,
backend="torch",
):
"""Creates an LogTrajectory from a tensor."""
if backend == "torch":
return cls(expert_traj_tensor.to_torch(), num_worlds, max_agents) # Pass the entire tensor
return cls(
expert_traj_tensor.to_torch().clone(), num_worlds, max_agents
) # Pass the entire tensor
elif backend == "jax":
raise NotImplementedError("JAX backend not implemented yet.")
raise NotImplementedError("JAX backend not implemented yet.")
24 changes: 2 additions & 22 deletions pygpudrive/env/base_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def _setup_environment_parameters(self):

return params

def _initialize_simulator(self, params, scene_config):
def _initialize_simulator(self, params, data_batch):
"""Initializes the simulation with the specified parameters.
Args:
Expand All @@ -167,11 +167,10 @@ def _initialize_simulator(self, params, scene_config):
else gpudrive.madrona.ExecMode.CUDA
)

self.dataset = select_scenes(scene_config)
sim = gpudrive.SimManager(
exec_mode=exec_mode,
gpu_id=0,
scenes=self.dataset,
scenes=data_batch,
params=params,
enable_batch_renderer=self.render_config
and self.render_config.render_mode
Expand Down Expand Up @@ -235,25 +234,6 @@ def _set_collision_behavior(self, params):
)
return params

def reinit_scenarios(self, dataset: List[str]):
"""Resample the scenes.
Args:
dataset (List[str]): List of scene names to resample.
Returns:
None
"""

# Resample the scenes
self.sim.set_maps(dataset)

# Re-initialize the controlled agents mask
self.cont_agent_mask = self.get_controlled_agents_mask()
self.max_agent_count = self.cont_agent_mask.shape[1]
self.num_valid_controlled_agents_across_worlds = (
self.cont_agent_mask.sum().item()
)

def close(self):
"""Destroy the simulator and visualizer."""
del self.sim
Expand Down
49 changes: 17 additions & 32 deletions pygpudrive/env/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ class EnvConfig:

# Road observation algorithm settings
road_obs_algorithm: str = "linear" # Algorithm for road observations
obs_radius: float = 100.0 # Radius for road observations
obs_radius: float = 50.0 # Radius for road observations
polyline_reduction_threshold: float = (
1.0 # Threshold for polyline reduction
0.1 # Threshold for polyline reduction
)

# Dynamics model
Expand All @@ -56,7 +56,7 @@ class EnvConfig:
# Action space settings (if discretized)
# Classic or Invertible Bicycle dynamics model
steer_actions: torch.Tensor = torch.round(
torch.linspace(-torch.pi, torch.pi, 36), decimals=3
torch.linspace(-torch.pi, torch.pi, 42), decimals=3
)
accel_actions: torch.Tensor = torch.round(
torch.linspace(-4.0, 4.0, 16), decimals=3
Expand Down Expand Up @@ -93,7 +93,7 @@ class EnvConfig:
reward_type: str = "sparse_on_goal_achieved" # Alternatively, "weighted_combination", "distance_to_logs"

dist_to_goal_threshold: float = (
3.0 # Radius around goal considered as "goal achieved"
2.0 # Radius around goal considered as "goal achieved"
)

# C++ and Python shared settings (modifiable via C++ codebase)
Expand All @@ -111,10 +111,10 @@ class EnvConfig:
) # Length of an episode in the simulator
num_lidar_samples: int = gpudrive.numLidarSamples


#Param to init all objects:
# Param to init all objects:
init_all_objects: bool = False


class SelectionDiscipline(Enum):
"""Enum for selecting scenes discipline in dataset configuration."""

Expand All @@ -138,8 +138,10 @@ class SceneConfig:
seed (Optional[int]): Seed for random scene selection.
"""

path: str
num_scenes: int
batch_size: int # Number of scenes per batch (should be equal to number of worlds in the env).
dataset_size: int # Maximum number of files to include in the dataset.
path: str = None
num_scenes: int = None
discipline: SelectionDiscipline = SelectionDiscipline.PAD_N
k_unique_scenes: Optional[int] = None
seed: Optional[int] = None
Expand All @@ -148,8 +150,7 @@ class SceneConfig:
class RenderMode(Enum):
"""Enum for specifying rendering mode."""

PYGAME_ABSOLUTE = "pygame_absolute"
PYGAME_EGOCENTRIC = "pygame_egocentric"
MATPLOTLIB = "matplotlib"
PYGAME_LIDAR = "pygame_lidar"
MADRONA_RGB = "madrona_rgb"
MADRONA_DEPTH = "madrona_depth"
Expand All @@ -171,31 +172,15 @@ class MadronaOption(Enum):

@dataclass
class RenderConfig:
"""Configuration settings for rendering the environment.
"""
Configuration settings for rendering the environment.
Attributes:
render_mode (RenderMode): The mode used for rendering the environment.
view_option (Enum): Rendering view option (e.g., RGB, human view).
resolution (Tuple[int, int]): Resolution of the rendered image.
line_thickness (int): Thickness of the road lines in the rendering.
draw_obj_idx (bool): Whether to draw object indices on objects.
obj_idx_font_size (int): Font size for object indices.
color_scheme (str): Color mode for the rendering ("light" or "dark").
render_mode (RenderMode): The mode used for rendering the environment. Default is MATPLOTLIB.
view_option (MadronaOption): Rendering view option used for the Madrona viewer (e.g., agent or top-down view).
"""

render_mode: RenderMode = RenderMode.PYGAME_ABSOLUTE
view_option: Enum = PygameOption.RGB
render_mode: RenderMode = RenderMode.MATPLOTLIB
view_option: Enum = None
resolution: Tuple[int, int] = (1024, 1024)
line_thickness: int = 0.7
draw_obj_idx: bool = False
obj_idx_font_size: int = 9
color_scheme: str = "light"

def __str__(self) -> str:
"""Returns a string representation of the rendering configuration."""
return (
f"RenderMode: {self.render_mode.value}, ViewOption: {self.view_option.value}, "
f"Resolution: {self.resolution}, LineThickness: {self.line_thickness}, "
f"DrawObjectIdx: {self.draw_obj_idx}, ObjectIdxFontSize: {self.obj_idx_font_size}, "
f"ColorScheme: {self.color_scheme}"
)
135 changes: 135 additions & 0 deletions pygpudrive/env/dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
from dataclasses import dataclass
from typing import Iterator, List
import os
import random


@dataclass
class SceneDataLoader:
root: str
batch_size: int
dataset_size: int
sample_with_replacement: bool = False
file_prefix: str = "tfrecord"
seed: int = 42
shuffle: bool = False

"""
A data loader for sampling batches of traffic scenarios from a directory of files.
Attributes:
root (str): Path to the directory containing scene files.
batch_size (int): Number of scenes per batch (usually equal to number of worlds in the env).
dataset_size (int): Maximum number of files to include in the dataset.
sample_with_replacement (bool): Whether to sample files with replacement.
file_prefix (str): Prefix for scene files to include in the dataset.
seed (int): Seed for random number generator to ensure reproducibility.
shuffle (bool): Whether to shuffle the dataset before batching.
"""

def __post_init__(self):
# Validate the path
if not os.path.exists(self.root):
raise FileNotFoundError(
f"The specified path does not exist: {self.root}"
)

# Set the random seed for reproducibility
self.random_gen = random.Random(self.seed)

# Create the dataset from valid files in the directory
self.dataset = [
os.path.join(self.root, scene)
for scene in sorted(os.listdir(self.root))
if scene.startswith(self.file_prefix)
]

# Adjust dataset size based on the provided dataset_size
self.dataset = self.dataset[
: min(self.dataset_size, len(self.dataset))
]

# If dataset_size < batch_size, repeat the dataset until it matches the batch size
if self.dataset_size < self.batch_size:
repeat_count = (self.batch_size // self.dataset_size) + 1
self.dataset *= repeat_count
self.dataset = self.dataset[: self.batch_size]

# Shuffle the dataset if required
if self.shuffle:
self.random_gen.shuffle(self.dataset)

# Initialize state for iteration
self._reset_indices()

def _reset_indices(self):
"""Reset indices for sampling."""
if self.sample_with_replacement:
self.indices = [
self.random_gen.randint(0, len(self.dataset) - 1)
for _ in range(len(self.dataset))
]
else:
self.indices = list(range(len(self.dataset)))
self.current_index = 0

def __iter__(self) -> Iterator[List[str]]:
self._reset_indices()
return self

def __len__(self):
"""Get the number of batches in the dataloader."""
return len(self.dataset) // self.batch_size

def __next__(self) -> List[str]:
if self.sample_with_replacement:
# Get the next batch of "deterministic" random indices
batch_indices = self.indices[
self.current_index : self.current_index + self.batch_size
]
self.current_index += self.batch_size

if self.current_index > len(self.indices):
raise StopIteration

# Retrieve the corresponding scenes
batch = [self.dataset[i] for i in batch_indices]
else:
if self.current_index >= len(self.indices):
raise StopIteration

# Get the next batch of indices
end_index = min(
self.current_index + self.batch_size, len(self.indices)
)
batch_indices = self.indices[self.current_index : end_index]
self.current_index = end_index

# Retrieve the corresponding scenes
batch = [self.dataset[i] for i in batch_indices]

return batch


# Example usage
if __name__ == "__main__":
from pprint import pprint

data_loader = SceneDataLoader(
root="data/processed/training",
batch_size=2,
dataset_size=2,
sample_with_replacement=True, # Sampling with replacement
shuffle=False, # Shuffle the dataset before batching
)

print("\nDataset")
pprint(data_loader.dataset[:5])

print("\nBatch 1")
batch = next(iter(data_loader))
pprint(batch)

print("\nBatch 2")
batch = next(iter(data_loader))
pprint(batch)
Loading

0 comments on commit 2544847

Please sign in to comment.