Skip to content

Commit

Permalink
Merge dev into branch
Browse files Browse the repository at this point in the history
  • Loading branch information
daphne-cornelisse committed Jan 10, 2025
2 parents bb640f5 + 2544847 commit dddcdf5
Show file tree
Hide file tree
Showing 7 changed files with 312 additions and 93 deletions.
31 changes: 21 additions & 10 deletions pygpudrive/datatypes/trajectory.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

TRAJ_LEN = 91 # Length of the logged trajectory


@dataclass
class LogTrajectory:
"""A class to represent the logged human trajectories. Initialized from `expert_trajectory_tensor` (src/bindings.cpp).
Expand All @@ -16,25 +17,35 @@ class LogTrajectory:
actions: Expert actions performed by the agent(s) across the trajectory.
"""

def __init__(self, raw_logs: torch.Tensor, num_worlds: int, max_agents: int):
"""Initializes the expert trajectory with an observation tensor."""
def __init__(
self, raw_logs: torch.Tensor, num_worlds: int, max_agents: int
):
"""Initializes the expert trajectory with an observation tensor."""
self.pos_xy = raw_logs[:, :, : 2 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.vel_xy = raw_logs[:, :, 2 * TRAJ_LEN: 4 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.yaw = raw_logs[:, :, 4 * TRAJ_LEN: 5 * TRAJ_LEN].view(
self.vel_xy = raw_logs[:, :, 2 * TRAJ_LEN : 4 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.inferred_actions = raw_logs[:, :, 6 * TRAJ_LEN: 16 * TRAJ_LEN].view(
self.yaw = raw_logs[:, :, 4 * TRAJ_LEN : 5 * TRAJ_LEN].view(
num_worlds, max_agents, TRAJ_LEN, -1
)
self.inferred_actions = raw_logs[
:, :, 6 * TRAJ_LEN : 16 * TRAJ_LEN
].view(num_worlds, max_agents, TRAJ_LEN, -1)

@classmethod
def from_tensor(cls, expert_traj_tensor: gpudrive.madrona.Tensor, num_worlds: int, max_agents: int, backend="torch"):
def from_tensor(
cls,
expert_traj_tensor: gpudrive.madrona.Tensor,
num_worlds: int,
max_agents: int,
backend="torch",
):
"""Creates an LogTrajectory from a tensor."""
if backend == "torch":
return cls(expert_traj_tensor.to_torch().clone(), num_worlds, max_agents) # Pass the entire tensor
return cls(
expert_traj_tensor.to_torch().clone(), num_worlds, max_agents
) # Pass the entire tensor
elif backend == "jax":
raise NotImplementedError("JAX backend not implemented yet.")
raise NotImplementedError("JAX backend not implemented yet.")
35 changes: 9 additions & 26 deletions pygpudrive/env/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ class EnvConfig:
road_map_obs: bool = True # Include road graph in observations
partner_obs: bool = True # Include partner vehicle info in observations
norm_obs: bool = True # Normalize observations

# Maximum number of controlled agents in the scene
max_controlled_agents: int = gpudrive.kMaxAgentCount
max_controlled_agents: int = gpudrive.kMaxAgentCount
num_worlds: int = 1 # Number of worlds in the environment

# NOTE: If disable_classic_obs is True, ego_state, road_map_obs,
Expand Down Expand Up @@ -154,8 +154,7 @@ class SceneConfig:
class RenderMode(Enum):
"""Enum for specifying rendering mode."""

PYGAME_ABSOLUTE = "pygame_absolute"
PYGAME_EGOCENTRIC = "pygame_egocentric"
MATPLOTLIB = "matplotlib"
PYGAME_LIDAR = "pygame_lidar"
MADRONA_RGB = "madrona_rgb"
MADRONA_DEPTH = "madrona_depth"
Expand All @@ -177,31 +176,15 @@ class MadronaOption(Enum):

@dataclass
class RenderConfig:
"""Configuration settings for rendering the environment.
"""
Configuration settings for rendering the environment.
Attributes:
render_mode (RenderMode): The mode used for rendering the environment.
view_option (Enum): Rendering view option (e.g., RGB, human view).
resolution (Tuple[int, int]): Resolution of the rendered image.
line_thickness (int): Thickness of the road lines in the rendering.
draw_obj_idx (bool): Whether to draw object indices on objects.
obj_idx_font_size (int): Font size for object indices.
color_scheme (str): Color mode for the rendering ("light" or "dark").
render_mode (RenderMode): The mode used for rendering the environment. Default is MATPLOTLIB.
view_option (MadronaOption): Rendering view option used for the Madrona viewer (e.g., agent or top-down view).
"""

render_mode: RenderMode = RenderMode.PYGAME_ABSOLUTE
view_option: Enum = PygameOption.RGB
render_mode: RenderMode = RenderMode.MATPLOTLIB
view_option: Enum = None
resolution: Tuple[int, int] = (1024, 1024)
line_thickness: int = 0.7
draw_obj_idx: bool = False
obj_idx_font_size: int = 9
color_scheme: str = "light"

def __str__(self) -> str:
"""Returns a string representation of the rendering configuration."""
return (
f"RenderMode: {self.render_mode.value}, ViewOption: {self.view_option.value}, "
f"Resolution: {self.resolution}, LineThickness: {self.line_thickness}, "
f"DrawObjectIdx: {self.draw_obj_idx}, ObjectIdxFontSize: {self.obj_idx_font_size}, "
f"ColorScheme: {self.color_scheme}"
)
31 changes: 17 additions & 14 deletions pygpudrive/env/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,10 @@ def __post_init__(self):
def _reset_indices(self):
"""Reset indices for sampling."""
if self.sample_with_replacement:
self.indices = [self.random_gen.randint(0, len(self.dataset) - 1) for _ in range(len(self.dataset))]
self.indices = [
self.random_gen.randint(0, len(self.dataset) - 1)
for _ in range(len(self.dataset))
]
else:
self.indices = list(range(len(self.dataset)))
self.current_index = 0
Expand All @@ -81,7 +84,9 @@ def __len__(self):
def __next__(self) -> List[str]:
if self.sample_with_replacement:
# Get the next batch of "deterministic" random indices
batch_indices = self.indices[self.current_index:self.current_index + self.batch_size]
batch_indices = self.indices[
self.current_index : self.current_index + self.batch_size
]
self.current_index += self.batch_size

if self.current_index > len(self.indices):
Expand All @@ -94,8 +99,10 @@ def __next__(self) -> List[str]:
raise StopIteration

# Get the next batch of indices
end_index = min(self.current_index + self.batch_size, len(self.indices))
batch_indices = self.indices[self.current_index:end_index]
end_index = min(
self.current_index + self.batch_size, len(self.indices)
)
batch_indices = self.indices[self.current_index : end_index]
self.current_index = end_index

# Retrieve the corresponding scenes
Expand All @@ -107,26 +114,22 @@ def __next__(self) -> List[str]:
# Example usage
if __name__ == "__main__":
from pprint import pprint

data_loader = SceneDataLoader(
root="data/processed/training",
batch_size=2,
dataset_size=2,
sample_with_replacement=True, # Sampling with replacement
shuffle=False, # Shuffle the dataset before batching
)
print('\nDataset')

print("\nDataset")
pprint(data_loader.dataset[:5])


print('\nBatch 1')

print("\nBatch 1")
batch = next(iter(data_loader))
pprint(batch)


print('\nBatch 2')
print("\nBatch 2")
batch = next(iter(data_loader))
pprint(batch)

print('done')
4 changes: 2 additions & 2 deletions pygpudrive/env/env_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,13 +73,13 @@ def __init__(
self.single_observation_space = gymnasium.spaces.Box(
low=0, high=255, shape=(self.observation_space.shape[-1],), dtype=np.float32
)

self._setup_action_space(action_type)
self.num_agents = self.cont_agent_mask.sum().item()
self.single_action_space = self.action_space
self.action_space = pufferlib.spaces.joint_space(self.single_action_space, self.num_agents)
self.observation_space = pufferlib.spaces.joint_space(self.single_observation_space, self.num_agents)

self.info_dim = 5 # Number of info features
self.episode_len = self.config.episode_len

Expand Down
2 changes: 1 addition & 1 deletion pygpudrive/env/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import math
import gpudrive

from pygpudrive.env.config import MadronaOption, PygameOption, RenderMode
from pygpudrive.env.config import MadronaOption, RenderMode

# AGENT COLORS
PINK = (255, 105, 180)
Expand Down
Loading

0 comments on commit dddcdf5

Please sign in to comment.