Skip to content

Commit 8589523

Browse files
Change import gymnasium to import gymnasium as gym (#20)
1 parent 54872ab commit 8589523

File tree

110 files changed

+658
-634
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+658
-634
lines changed

gymnasium/core.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class Env(Generic[ObsType, ActType]):
5151
- :attr:`action_space` - The Space object corresponding to valid actions
5252
- :attr:`observation_space` - The Space object corresponding to valid observations
5353
- :attr:`reward_range` - A tuple corresponding to the minimum and maximum possible rewards
54-
- :attr:`spec` - An environment spec that contains the information used to initialise the environment from `gymnasium.make`
54+
- :attr:`spec` - An environment spec that contains the information used to initialise the environment from `gym.make`
5555
- :attr:`metadata` - The metadata of the environment, i.e. render modes
5656
- :attr:`np_random` - The random number generator for the environment
5757
@@ -187,7 +187,7 @@ def unwrapped(self) -> "Env":
187187
"""Returns the base non-wrapped environment.
188188
189189
Returns:
190-
Env: The base non-wrapped gymnasium.Env instance
190+
Env: The base non-wrapped gym.Env instance
191191
"""
192192
return self
193193

@@ -361,7 +361,7 @@ class ObservationWrapper(Wrapper):
361361
``observation["target_position"] - observation["agent_position"]``. For this, you could implement an
362362
observation wrapper like this::
363363
364-
class RelativePosition(gymnasium.ObservationWrapper):
364+
class RelativePosition(gym.ObservationWrapper):
365365
def __init__(self, env):
366366
super().__init__(env)
367367
self.observation_space = Box(shape=(2,), low=-np.inf, high=np.inf)
@@ -435,7 +435,7 @@ class ActionWrapper(Wrapper):
435435
Let’s say you have an environment with action space of type :class:`gymnasium.spaces.Box`, but you would only like
436436
to use a finite subset of actions. Then, you might want to implement the following wrapper::
437437
438-
class DiscreteActions(gymnasium.ActionWrapper):
438+
class DiscreteActions(gym.ActionWrapper):
439439
def __init__(self, env, disc_to_cont):
440440
super().__init__(env)
441441
self.disc_to_cont = disc_to_cont
@@ -445,7 +445,7 @@ def action(self, act):
445445
return self.disc_to_cont[act]
446446
447447
if __name__ == "__main__":
448-
env = gymnasium.make("LunarLanderContinuous-v2")
448+
env = gym.make("LunarLanderContinuous-v2")
449449
wrapped_env = DiscreteActions(env, [np.array([1,0]), np.array([-1,0]),
450450
np.array([0,1]), np.array([0,-1])])
451451
print(wrapped_env.action_space) #Discrete(4)

gymnasium/envs/box2d/bipedal_walker.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import numpy as np
77

8-
import gymnasium
8+
import gymnasium as gym
99
from gymnasium import error, spaces
1010
from gymnasium.error import DependencyNotInstalled
1111
from gymnasium.utils import EzPickle
@@ -100,7 +100,7 @@ def EndContact(self, contact):
100100
leg.ground_contact = False
101101

102102

103-
class BipedalWalker(gymnasium.Env, EzPickle):
103+
class BipedalWalker(gym.Env, EzPickle):
104104
"""
105105
### Description
106106
This is a simple 4-joint walker robot environment.
@@ -144,12 +144,12 @@ class BipedalWalker(gymnasium.Env, EzPickle):
144144
To use to the _hardcore_ environment, you need to specify the
145145
`hardcore=True` argument like below:
146146
```python
147-
import gymnasium
148-
env = gymnasium.make("BipedalWalker-v3", hardcore=True)
147+
import gymnasium as gym
148+
env = gym.make("BipedalWalker-v3", hardcore=True)
149149
```
150150
151151
### Version History
152-
- v3: returns closest lidar trace instead of furthest;
152+
- v3: Returns the closest lidar trace instead of furthest;
153153
faster video recording
154154
- v2: Count energy spent
155155
- v1: Legs now report contact with ground; motors have higher torque and
@@ -762,8 +762,8 @@ def __init__(self):
762762
raise error.Error(
763763
"Error initializing BipedalWalkerHardcore Environment.\n"
764764
"Currently, we do not support initializing this mode of environment by calling the class directly.\n"
765-
"To use this environment, instead create it by specifying the hardcore keyword in gymnasium.make, i.e.\n"
766-
'gymnasium.make("BipedalWalker-v3", hardcore=True)'
765+
"To use this environment, instead create it by specifying the hardcore keyword in gym.make, i.e.\n"
766+
'gym.make("BipedalWalker-v3", hardcore=True)'
767767
)
768768

769769

gymnasium/envs/box2d/car_racing.py

+15-13
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import numpy as np
77

8-
import gymnasium
8+
import gymnasium as gym
99
from gymnasium import spaces
1010
from gymnasium.envs.box2d.car_dynamics import Car
1111
from gymnasium.error import DependencyNotInstalled, InvalidAction
@@ -104,7 +104,7 @@ def _contact(self, contact, begin):
104104
obj.tiles.remove(tile)
105105

106106

107-
class CarRacing(gymnasium.Env, EzPickle):
107+
class CarRacing(gym.Env, EzPickle):
108108
"""
109109
### Description
110110
The easiest control task to learn from pixels - a top-down
@@ -127,7 +127,8 @@ class CarRacing(gymnasium.Env, EzPickle):
127127
There are 5 actions: do nothing, steer left, steer right, gas, brake.
128128
129129
### Observation Space
130-
State consists of 96x96 pixels.
130+
131+
A top-down 96x96 RGB image of the car and race track.
131132
132133
### Rewards
133134
The reward is -0.1 every frame and +1000/N for every track tile visited,
@@ -139,8 +140,8 @@ class CarRacing(gymnasium.Env, EzPickle):
139140
The car starts at rest in the center of the road.
140141
141142
### Episode Termination
142-
The episode finishes when all of the tiles are visited. The car can also go
143-
outside of the playfield - that is, far off the track, in which case it will
143+
The episode finishes when all the tiles are visited. The car can also go
144+
outside the playfield - that is, far off the track, in which case it will
144145
receive -100 reward and die.
145146
146147
### Arguments
@@ -158,17 +159,18 @@ class CarRacing(gymnasium.Env, EzPickle):
158159
Correspondingly, passing the option `options["randomize"] = False` will not change the current colour of the environment.
159160
`domain_randomize` must be `True` on init for this argument to work.
160161
Example usage:
161-
```py
162-
env = gymnasium.make("CarRacing-v1", domain_randomize=True)
162+
```python
163+
import gymnasium as gym
164+
env = gym.make("CarRacing-v1", domain_randomize=True)
163165
164-
# normal reset, this changes the colour scheme by default
165-
env.reset()
166+
# normal reset, this changes the colour scheme by default
167+
env.reset()
166168
167-
# reset with colour scheme change
168-
env.reset(options={"randomize": True})
169+
# reset with colour scheme change
170+
env.reset(options={"randomize": True})
169171
170-
# reset with no colour scheme change
171-
env.reset(options={"randomize": False})
172+
# reset with no colour scheme change
173+
env.reset(options={"randomize": False})
172174
```
173175
174176
### Version History

gymnasium/envs/box2d/lunar_lander.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
import numpy as np
88

9-
import gymnasium
9+
import gymnasium as gym
1010
from gymnasium import error, spaces
1111
from gymnasium.error import DependencyNotInstalled
1212
from gymnasium.utils import EzPickle, colorize
@@ -74,7 +74,7 @@ def EndContact(self, contact):
7474
self.env.legs[i].ground_contact = False
7575

7676

77-
class LunarLander(gymnasium.Env, EzPickle):
77+
class LunarLander(gym.Env, EzPickle):
7878
"""
7979
### Description
8080
This environment is a classic rocket trajectory optimization problem.
@@ -141,8 +141,8 @@ class LunarLander(gymnasium.Env, EzPickle):
141141
To use to the _continuous_ environment, you need to specify the
142142
`continuous=True` argument like below:
143143
```python
144-
import gymnasium
145-
env = gymnasium.make(
144+
import gymnasium as gym
145+
env = gym.make(
146146
"LunarLander-v2",
147147
continuous: bool = False,
148148
gravity: float = -10.0,
@@ -173,7 +173,7 @@ class LunarLander(gymnasium.Env, EzPickle):
173173
`turbulence_power` dictates the maximum magnitude of rotational wind applied to the craft. The recommended value for `turbulence_power` is between 0.0 and 2.0.
174174
175175
### Version History
176-
- v2: Count energy spent and in v0.24, added turbulance with wind power and turbulence_power parameters
176+
- v2: Count energy spent and in v0.24, added turbulence with wind power and turbulence_power parameters
177177
- v1: Legs contact with ground added in state vector; contact with ground
178178
give +10 reward points, and -10 if then lose contact; reward
179179
renormalized to 200; harder initial random push.
@@ -802,8 +802,8 @@ def __init__(self):
802802
raise error.Error(
803803
"Error initializing LunarLanderContinuous Environment.\n"
804804
"Currently, we do not support initializing this mode of environment by calling the class directly.\n"
805-
"To use this environment, instead create it by specifying the continuous keyword in gymnasium.make, i.e.\n"
806-
'gymnasium.make("LunarLander-v2", continuous=True)'
805+
"To use this environment, instead create it by specifying the continuous keyword in gym.make, i.e.\n"
806+
'gym.make("LunarLander-v2", continuous=True)'
807807
)
808808

809809

gymnasium/envs/classic_control/acrobot.py

+11-7
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import numpy as np
55
from numpy import cos, pi, sin
66

7-
from gymnasium import core, spaces
7+
from gymnasium import Env, spaces
88
from gymnasium.error import DependencyNotInstalled
99

1010
__copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy"
@@ -23,7 +23,7 @@
2323
from gymnasium.envs.classic_control import utils
2424

2525

26-
class AcrobotEnv(core.Env):
26+
class AcrobotEnv(Env):
2727
"""
2828
### Description
2929
@@ -94,20 +94,24 @@ class AcrobotEnv(core.Env):
9494
9595
### Arguments
9696
97-
No additional arguments are currently supported.
97+
No additional arguments are currently supported during construction.
9898
99+
```python
100+
import gymnasium as gym
101+
env = gym.make('Acrobot-v1')
99102
```
100-
env = gymnasium.make('Acrobot-v1')
101-
```
103+
104+
On reset, the `options` parameter allows the user to change the bounds used to determine
105+
the new random state.
102106
103107
By default, the dynamics of the acrobot follow those described in Sutton and Barto's book
104108
[Reinforcement Learning: An Introduction](http://incompleteideas.net/book/11/node4.html).
105109
However, a `book_or_nips` parameter can be modified to change the pendulum dynamics to those described
106110
in the original [NeurIPS paper](https://papers.nips.cc/paper/1995/hash/8f1d43620bc6bb580df6e80b0dc05c48-Abstract.html).
107111
108-
```
112+
```python
109113
# To change the dynamics as described above
110-
env.env.book_or_nips = 'nips'
114+
env.unwrapped.book_or_nips = 'nips'
111115
```
112116
113117
See the following note for details:

gymnasium/envs/classic_control/cartpole.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@
88

99
import numpy as np
1010

11-
import gymnasium
11+
import gymnasium as gym
1212
from gymnasium import logger, spaces
1313
from gymnasium.envs.classic_control import utils
1414
from gymnasium.error import DependencyNotInstalled
1515

1616

17-
class CartPoleEnv(gymnasium.Env[np.ndarray, Union[int, np.ndarray]]):
17+
class CartPoleEnv(gym.Env[np.ndarray, Union[int, np.ndarray]]):
1818
"""
1919
### Description
2020
@@ -74,11 +74,13 @@ class CartPoleEnv(gymnasium.Env[np.ndarray, Union[int, np.ndarray]]):
7474
7575
### Arguments
7676
77-
```
78-
gymnasium.make('CartPole-v1')
77+
```python
78+
import gymnasium as gym
79+
gym.make('CartPole-v1')
7980
```
8081
81-
No additional arguments are currently supported.
82+
On reset, the `options` parameter allows the user to change the bounds used to determine
83+
the new random state.
8284
"""
8385

8486
metadata = {

gymnasium/envs/classic_control/continuous_mountain_car.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,13 @@
1818

1919
import numpy as np
2020

21-
import gymnasium
21+
import gymnasium as gym
2222
from gymnasium import spaces
2323
from gymnasium.envs.classic_control import utils
2424
from gymnasium.error import DependencyNotInstalled
2525

2626

27-
class Continuous_MountainCarEnv(gymnasium.Env):
27+
class Continuous_MountainCarEnv(gym.Env):
2828
"""
2929
### Description
3030
@@ -91,9 +91,13 @@ class Continuous_MountainCarEnv(gymnasium.Env):
9191
9292
### Arguments
9393
94+
```python
95+
import gymnasium as gym
96+
gym.make('MountainCarContinuous-v0')
9497
```
95-
gymnasium.make('MountainCarContinuous-v0')
96-
```
98+
99+
On reset, the `options` parameter allows the user to change the bounds used to determine
100+
the new random state.
97101
98102
### Version History
99103

gymnasium/envs/classic_control/mountain_car.py

+8-5
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,13 @@
77

88
import numpy as np
99

10-
import gymnasium
10+
import gymnasium as gym
1111
from gymnasium import spaces
1212
from gymnasium.envs.classic_control import utils
1313
from gymnasium.error import DependencyNotInstalled
1414

1515

16-
class MountainCarEnv(gymnasium.Env):
16+
class MountainCarEnv(gym.Env):
1717
"""
1818
### Description
1919
@@ -66,7 +66,6 @@ class MountainCarEnv(gymnasium.Env):
6666
upon collision with the wall. The position is clipped to the range `[-1.2, 0.6]` and
6767
velocity is clipped to the range `[-0.07, 0.07]`.
6868
69-
7069
### Reward:
7170
7271
The goal is to reach the flag placed on top of the right hill as quickly as possible, as such the agent is
@@ -86,9 +85,13 @@ class MountainCarEnv(gymnasium.Env):
8685
8786
### Arguments
8887
88+
```python
89+
import gymnasium as gym
90+
gym.make('MountainCar-v0')
8991
```
90-
gymnasium.make('MountainCar-v0')
91-
```
92+
93+
On reset, the `options` parameter allows the user to change the bounds used to determine
94+
the new random state.
9295
9396
### Version History
9497

gymnasium/envs/classic_control/pendulum.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import numpy as np
77

8-
import gymnasium
8+
import gymnasium as gym
99
from gymnasium import spaces
1010
from gymnasium.envs.classic_control import utils
1111
from gymnasium.error import DependencyNotInstalled
@@ -14,7 +14,7 @@
1414
DEFAULT_Y = 1.0
1515

1616

17-
class PendulumEnv(gymnasium.Env):
17+
class PendulumEnv(gym.Env):
1818
"""
1919
### Description
2020
@@ -76,9 +76,13 @@ class PendulumEnv(gymnasium.Env):
7676
- `g`: acceleration of gravity measured in *(m s<sup>-2</sup>)* used to calculate the pendulum dynamics.
7777
The default value is g = 10.0 .
7878
79+
```python
80+
import gymnasium as gym
81+
gym.make('Pendulum-v1', g=9.81)
7982
```
80-
gymnasium.make('Pendulum-v1', g=9.81)
81-
```
83+
84+
On reset, the `options` parameter allows the user to change the bounds used to determine
85+
the new random state.
8286
8387
### Version History
8488

0 commit comments

Comments
 (0)