Skip to content

Instantly share code, notes, and snippets.

@daphne-cornelisse
Last active August 6, 2024 23:49
Show Gist options
  • Save daphne-cornelisse/c0502bb19e1940814d79d4e5c34be27c to your computer and use it in GitHub Desktop.
Save daphne-cornelisse/c0502bb19e1940814d79d4e5c34be27c to your computer and use it in GitHub Desktop.
GPUDrive run an episode
import numpy as np
import torch
from pygpudrive.env.config import EnvConfig, RenderConfig, SceneConfig
from pygpudrive.env.env_torch import GPUDriveTorchEnv
# CONFIGURE
TOTAL_STEPS = 90
MAX_NUM_OBJECTS = 128
NUM_WORLDS = 50
env_config = EnvConfig()
env_config = EnvConfig()
render_config = RenderConfig()
scene_config = SceneConfig("data", NUM_WORLDS)
# MAKE ENV
env = GPUDriveTorchEnv(
config=env_config,
scene_config=scene_config,
max_cont_agents=MAX_NUM_OBJECTS, # Number of agents to control
device="cpu",
render_config=render_config,
)
# RUN
obs = env.reset()
for _ in range(TOTAL_STEPS):
# Take a random actions
rand_action = torch.Tensor(
[
[
env.action_space.sample()
for _ in range(MAX_NUM_OBJECTS * NUM_WORLDS)
]
]
).reshape(NUM_WORLDS, MAX_NUM_OBJECTS)
# Step the environment
env.step_dynamics(rand_action)
obs = env.get_obs()
reward = env.get_rewards()
done = env.get_dones()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment