remove the stub calls from rllab

This commit is contained in:
Jie Tan 2017-03-10 13:41:05 -08:00
parent 37a809f5d1
commit 8c27a62e04
3 changed files with 2 additions and 62 deletions

View File

@ -1,27 +0,0 @@
"""One-line documentation for gym_example module.
A detailed description of gym_example.
"""
import gym
from envs.bullet.minitaur import MinitaurWalkEnv
import setuptools
import time
import numpy as np
def main():
env = gym.make('MinitaurWalkEnv-v0')
for i_episode in range(1):
observation = env.reset()
done = False
while not done:
print(observation)
action = np.array([1.3, 0, 0, 0, 1.3, 0, 0, 0, 1.3, 3.14, 0, 0, 1.3, 3.14, 0, 0])
print(action)
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
main()

View File

@ -3,12 +3,7 @@ from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
import subprocess
import time
stub(globals())
env = normalize(GymEnv("CartPoleBulletEnv-v0"))
@ -33,19 +28,4 @@ algo = TRPO(
# plot=True,
)
#cmdStartBulletServer=['~/Projects/rllab/bullet_examples/run_physics_server.sh']
#subprocess.Popen(cmdStartBulletServer, shell=True)
#time.sleep(1)
run_experiment_lite(
algo.train(),
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
# plot=True,
)
algo.train()

View File

@ -6,9 +6,6 @@ from sandbox.rocky.tf.envs.base import TfEnv
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import stub, run_experiment_lite
stub(globals())
env = TfEnv(normalize(GymEnv("CartPoleBulletEnv-v0")))
@ -35,14 +32,4 @@ algo = TRPO(
#plot=True,
)
run_experiment_lite(
algo.train(),
# Number of parallel workers for sampling
n_parallel=1,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
#plot=True,
)
algo.train()