mirror of
https://github.com/bulletphysics/bullet3
synced 2024-12-15 06:00:12 +00:00
Merge pull request #1085 from jietan/pullRequest
A simple trained DDPG agent for minitaur walking
This commit is contained in:
commit
08b35563be
0
examples/pybullet/gym/agents/__init__.py
Normal file
0
examples/pybullet/gym/agents/__init__.py
Normal file
21
examples/pybullet/gym/agents/actor_net.py
Normal file
21
examples/pybullet/gym/agents/actor_net.py
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
"""An actor network."""
|
||||||
|
import tensorflow as tf
|
||||||
|
import sonnet as snt
|
||||||
|
|
||||||
|
class ActorNetwork(snt.AbstractModule):
|
||||||
|
"""An actor network as a sonnet Module."""
|
||||||
|
|
||||||
|
def __init__(self, layer_sizes, action_size, name='target_actor'):
|
||||||
|
super(ActorNetwork, self).__init__(name=name)
|
||||||
|
self._layer_sizes = layer_sizes
|
||||||
|
self._action_size = action_size
|
||||||
|
|
||||||
|
def _build(self, inputs):
|
||||||
|
state = inputs
|
||||||
|
for output_size in self._layer_sizes:
|
||||||
|
state = snt.Linear(output_size)(state)
|
||||||
|
state = tf.nn.relu(state)
|
||||||
|
|
||||||
|
action = tf.tanh(
|
||||||
|
snt.Linear(self._action_size, name='action')(state))
|
||||||
|
return action
|
46
examples/pybullet/gym/agents/simpleAgent.py
Normal file
46
examples/pybullet/gym/agents/simpleAgent.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
"""Loads a DDPG agent without too much external dependencies
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import os
|
||||||
|
import collections
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
import sonnet as snt
|
||||||
|
from agents import actor_net
|
||||||
|
|
||||||
|
class SimpleAgent():
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
session,
|
||||||
|
ckpt_path,
|
||||||
|
actor_layer_size,
|
||||||
|
observation_size=(31,),
|
||||||
|
action_size=8,
|
||||||
|
):
|
||||||
|
self._ckpt_path = ckpt_path
|
||||||
|
self._actor_layer_size = actor_layer_size
|
||||||
|
self._observation_size = observation_size
|
||||||
|
self._action_size = action_size
|
||||||
|
self._session = session
|
||||||
|
self._build()
|
||||||
|
|
||||||
|
def _build(self):
|
||||||
|
self._agent_net = actor_net.ActorNetwork(self._actor_layer_size, self._action_size)
|
||||||
|
self._obs = tf.placeholder(tf.float32, (31,))
|
||||||
|
with tf.name_scope('Act'):
|
||||||
|
batch_obs = snt.nest.pack_iterable_as(self._obs,
|
||||||
|
snt.nest.map(lambda x: tf.expand_dims(x, 0),
|
||||||
|
snt.nest.flatten_iterable(self._obs)))
|
||||||
|
self._action = self._agent_net(batch_obs)
|
||||||
|
saver = tf.train.Saver()
|
||||||
|
saver.restore(
|
||||||
|
sess=self._session,
|
||||||
|
save_path=self._ckpt_path)
|
||||||
|
|
||||||
|
def __call__(self, observation):
|
||||||
|
out_action = self._session.run(self._action, feed_dict={self._obs: observation})
|
||||||
|
return out_action[0]
|
@ -0,0 +1,2 @@
|
|||||||
|
model_checkpoint_path: "/cns/ij-d/home/jietan/persistent/minitaur/minitaur_vizier_3_153645653/Bullet/MinitaurSimEnv/28158/0003600000/agent/tf_graph_data/tf_graph_data.ckpt"
|
||||||
|
all_model_checkpoint_paths: "/cns/ij-d/home/jietan/persistent/minitaur/minitaur_vizier_3_153645653/Bullet/MinitaurSimEnv/28158/0003600000/agent/tf_graph_data/tf_graph_data.ckpt"
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
109
examples/pybullet/gym/envs/bullet/minitaurGymEnv.py
Normal file
109
examples/pybullet/gym/envs/bullet/minitaurGymEnv.py
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
import math
|
||||||
|
import gym
|
||||||
|
from gym import spaces
|
||||||
|
from gym.utils import seeding
|
||||||
|
import numpy as np
|
||||||
|
import time
|
||||||
|
import pybullet as p
|
||||||
|
import minitaur_new
|
||||||
|
|
||||||
|
class MinitaurGymEnv(gym.Env):
|
||||||
|
metadata = {
|
||||||
|
'render.modes': ['human', 'rgb_array'],
|
||||||
|
'video.frames_per_second' : 50
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
urdfRoot="",
|
||||||
|
actionRepeat=1,
|
||||||
|
render=False):
|
||||||
|
self._timeStep = 0.01
|
||||||
|
self._urdfRoot = urdfRoot
|
||||||
|
self._actionRepeat = actionRepeat
|
||||||
|
self._observation = []
|
||||||
|
self._envStepCounter = 0
|
||||||
|
self._render = render
|
||||||
|
self._lastBasePosition = [0, 0, 0]
|
||||||
|
if self._render:
|
||||||
|
p.connect(p.GUI)
|
||||||
|
else:
|
||||||
|
p.connect(p.DIRECT)
|
||||||
|
self._seed()
|
||||||
|
self.reset()
|
||||||
|
observationDim = self._minitaur.getObservationDimension()
|
||||||
|
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
|
||||||
|
actionDim = 8
|
||||||
|
action_high = np.array([1] * actionDim)
|
||||||
|
self.action_space = spaces.Box(-action_high, action_high)
|
||||||
|
self.observation_space = spaces.Box(-observation_high, observation_high)
|
||||||
|
self.viewer = None
|
||||||
|
|
||||||
|
def _reset(self):
|
||||||
|
p.resetSimulation()
|
||||||
|
p.setPhysicsEngineParameter(numSolverIterations=300)
|
||||||
|
p.setTimeStep(self._timeStep)
|
||||||
|
p.loadURDF("%splane.urdf" % self._urdfRoot)
|
||||||
|
p.setGravity(0,0,-10)
|
||||||
|
self._minitaur = minitaur_new.Minitaur(self._urdfRoot)
|
||||||
|
self._envStepCounter = 0
|
||||||
|
self._lastBasePosition = [0, 0, 0]
|
||||||
|
for i in range(100):
|
||||||
|
p.stepSimulation()
|
||||||
|
self._observation = self._minitaur.getObservation()
|
||||||
|
return self._observation
|
||||||
|
|
||||||
|
def __del__(self):
|
||||||
|
p.disconnect()
|
||||||
|
|
||||||
|
def _seed(self, seed=None):
|
||||||
|
self.np_random, seed = seeding.np_random(seed)
|
||||||
|
return [seed]
|
||||||
|
|
||||||
|
def _step(self, action):
|
||||||
|
if (self._render):
|
||||||
|
basePos = self._minitaur.getBasePosition()
|
||||||
|
p.resetDebugVisualizerCamera(1, 30, 40, basePos)
|
||||||
|
|
||||||
|
if len(action) != self._minitaur.getActionDimension():
|
||||||
|
raise ValueError("We expect {} continuous action not {}.".format(self._minitaur.getActionDimension(), len(action)))
|
||||||
|
|
||||||
|
for i in range(len(action)):
|
||||||
|
if not -1.01 <= action[i] <= 1.01:
|
||||||
|
raise ValueError("{}th action should be between -1 and 1 not {}.".format(i, action[i]))
|
||||||
|
|
||||||
|
realAction = self._minitaur.convertFromLegModel(action)
|
||||||
|
self._minitaur.applyAction(realAction)
|
||||||
|
for i in range(self._actionRepeat):
|
||||||
|
p.stepSimulation()
|
||||||
|
if self._render:
|
||||||
|
time.sleep(self._timeStep)
|
||||||
|
self._observation = self._minitaur.getObservation()
|
||||||
|
if self._termination():
|
||||||
|
break
|
||||||
|
self._envStepCounter += 1
|
||||||
|
reward = self._reward()
|
||||||
|
done = self._termination()
|
||||||
|
return np.array(self._observation), reward, done, {}
|
||||||
|
|
||||||
|
def _render(self, mode='human', close=False):
|
||||||
|
return
|
||||||
|
|
||||||
|
def is_fallen(self):
|
||||||
|
orientation = self._minitaur.getBaseOrientation()
|
||||||
|
rotMat = p.getMatrixFromQuaternion(orientation)
|
||||||
|
localUp = rotMat[6:]
|
||||||
|
return np.dot(np.asarray([0, 0, 1]), np.asarray(localUp)) < 0 or self._observation[-1] < 0.1
|
||||||
|
|
||||||
|
def _termination(self):
|
||||||
|
return self.is_fallen()
|
||||||
|
|
||||||
|
def _reward(self):
|
||||||
|
currentBasePosition = self._minitaur.getBasePosition()
|
||||||
|
forward_reward = currentBasePosition[0] - self._lastBasePosition[0]
|
||||||
|
self._lastBasePosition = currentBasePosition
|
||||||
|
|
||||||
|
energyWeight = 0.001
|
||||||
|
energy = np.abs(np.dot(self._minitaur.getMotorTorques(), self._minitaur.getMotorVelocities())) * self._timeStep
|
||||||
|
energy_reward = energyWeight * energy
|
||||||
|
reward = forward_reward - energy_reward
|
||||||
|
return reward
|
166
examples/pybullet/gym/envs/bullet/minitaur_new.py
Normal file
166
examples/pybullet/gym/envs/bullet/minitaur_new.py
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
import pybullet as p
|
||||||
|
import numpy as np
|
||||||
|
import copy
|
||||||
|
import math
|
||||||
|
|
||||||
|
class Minitaur:
|
||||||
|
def __init__(self, urdfRootPath=''):
|
||||||
|
self.urdfRootPath = urdfRootPath
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def buildJointNameToIdDict(self):
|
||||||
|
nJoints = p.getNumJoints(self.quadruped)
|
||||||
|
self.jointNameToId = {}
|
||||||
|
for i in range(nJoints):
|
||||||
|
jointInfo = p.getJointInfo(self.quadruped, i)
|
||||||
|
self.jointNameToId[jointInfo[1].decode('UTF-8')] = jointInfo[0]
|
||||||
|
self.resetPose()
|
||||||
|
|
||||||
|
def buildMotorIdList(self):
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_front_leftL_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_front_leftR_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_back_leftL_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_back_leftR_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_front_rightL_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_front_rightR_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_back_rightL_joint'])
|
||||||
|
self.motorIdList.append(self.jointNameToId['motor_back_rightR_joint'])
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
# self.quadruped = p.loadURDF("%squadruped/minitaur.urdf" % self.urdfRootPath, [0,0,.2], flags=p.URDF_USE_SELF_COLLISION)
|
||||||
|
self.quadruped = p.loadURDF("%squadruped/minitaur.urdf" % self.urdfRootPath, [0,0,.2])
|
||||||
|
self.kp = 1
|
||||||
|
self.kd = 1
|
||||||
|
self.maxForce = 3.5
|
||||||
|
self.nMotors = 8
|
||||||
|
self.motorIdList = []
|
||||||
|
self.motorDir = [-1, -1, -1, -1, 1, 1, 1, 1]
|
||||||
|
self.buildJointNameToIdDict()
|
||||||
|
self.buildMotorIdList()
|
||||||
|
|
||||||
|
|
||||||
|
def setMotorAngleById(self, motorId, desiredAngle):
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped, jointIndex=motorId, controlMode=p.POSITION_CONTROL, targetPosition=desiredAngle, positionGain=self.kp, velocityGain=self.kd, force=self.maxForce)
|
||||||
|
|
||||||
|
def setMotorAngleByName(self, motorName, desiredAngle):
|
||||||
|
self.setMotorAngleById(self.jointNameToId[motorName], desiredAngle)
|
||||||
|
|
||||||
|
def resetPose(self):
|
||||||
|
kneeFrictionForce = 0
|
||||||
|
halfpi = 1.57079632679
|
||||||
|
kneeangle = -2.1834 #halfpi - acos(upper_leg_length / lower_leg_length)
|
||||||
|
|
||||||
|
#left front leg
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_front_leftL_joint'],self.motorDir[0]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_front_leftL_link'],self.motorDir[0]*kneeangle)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_front_leftR_joint'],self.motorDir[1]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_front_leftR_link'],self.motorDir[1]*kneeangle)
|
||||||
|
p.createConstraint(self.quadruped,self.jointNameToId['knee_front_leftR_link'],self.quadruped,self.jointNameToId['knee_front_leftL_link'],p.JOINT_POINT2POINT,[0,0,0],[0,0.005,0.2],[0,0.01,0.2])
|
||||||
|
self.setMotorAngleByName('motor_front_leftL_joint', self.motorDir[0]*halfpi)
|
||||||
|
self.setMotorAngleByName('motor_front_leftR_joint', self.motorDir[1]*halfpi)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_front_leftL_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_front_leftR_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
|
||||||
|
#left back leg
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_back_leftL_joint'],self.motorDir[2]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_back_leftL_link'],self.motorDir[2]*kneeangle)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_back_leftR_joint'],self.motorDir[3]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_back_leftR_link'],self.motorDir[3]*kneeangle)
|
||||||
|
p.createConstraint(self.quadruped,self.jointNameToId['knee_back_leftR_link'],self.quadruped,self.jointNameToId['knee_back_leftL_link'],p.JOINT_POINT2POINT,[0,0,0],[0,0.005,0.2],[0,0.01,0.2])
|
||||||
|
self.setMotorAngleByName('motor_back_leftL_joint',self.motorDir[2]*halfpi)
|
||||||
|
self.setMotorAngleByName('motor_back_leftR_joint',self.motorDir[3]*halfpi)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_back_leftL_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_back_leftR_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
|
||||||
|
#right front leg
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_front_rightL_joint'],self.motorDir[4]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_front_rightL_link'],self.motorDir[4]*kneeangle)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_front_rightR_joint'],self.motorDir[5]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_front_rightR_link'],self.motorDir[5]*kneeangle)
|
||||||
|
p.createConstraint(self.quadruped,self.jointNameToId['knee_front_rightR_link'],self.quadruped,self.jointNameToId['knee_front_rightL_link'],p.JOINT_POINT2POINT,[0,0,0],[0,0.005,0.2],[0,0.01,0.2])
|
||||||
|
self.setMotorAngleByName('motor_front_rightL_joint',self.motorDir[4]*halfpi)
|
||||||
|
self.setMotorAngleByName('motor_front_rightR_joint',self.motorDir[5]*halfpi)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_front_rightL_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_front_rightR_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
|
||||||
|
|
||||||
|
#right back leg
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_back_rightL_joint'],self.motorDir[6]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_back_rightL_link'],self.motorDir[6]*kneeangle)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['motor_back_rightR_joint'],self.motorDir[7]*halfpi)
|
||||||
|
p.resetJointState(self.quadruped,self.jointNameToId['knee_back_rightR_link'],self.motorDir[7]*kneeangle)
|
||||||
|
p.createConstraint(self.quadruped,self.jointNameToId['knee_back_rightR_link'],self.quadruped,self.jointNameToId['knee_back_rightL_link'],p.JOINT_POINT2POINT,[0,0,0],[0,0.005,0.2],[0,0.01,0.2])
|
||||||
|
self.setMotorAngleByName('motor_back_rightL_joint',self.motorDir[6]*halfpi)
|
||||||
|
self.setMotorAngleByName('motor_back_rightR_joint',self.motorDir[7]*halfpi)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_back_rightL_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
p.setJointMotorControl2(bodyIndex=self.quadruped,jointIndex=self.jointNameToId['knee_back_rightR_link'],controlMode=p.VELOCITY_CONTROL,targetVelocity=0,force=kneeFrictionForce)
|
||||||
|
|
||||||
|
|
||||||
|
def getBasePosition(self):
|
||||||
|
position, orientation = p.getBasePositionAndOrientation(self.quadruped)
|
||||||
|
return position
|
||||||
|
|
||||||
|
def getBaseOrientation(self):
|
||||||
|
position, orientation = p.getBasePositionAndOrientation(self.quadruped)
|
||||||
|
return orientation
|
||||||
|
|
||||||
|
def getActionDimension(self):
|
||||||
|
return self.nMotors
|
||||||
|
|
||||||
|
def getObservationDimension(self):
|
||||||
|
return len(self.getObservation())
|
||||||
|
|
||||||
|
def getObservation(self):
|
||||||
|
observation = []
|
||||||
|
observation.extend(self.getMotorAngles().tolist())
|
||||||
|
observation.extend(self.getMotorVelocities().tolist())
|
||||||
|
observation.extend(self.getMotorTorques().tolist())
|
||||||
|
observation.extend(list(self.getBaseOrientation()))
|
||||||
|
observation.extend(list(self.getBasePosition()))
|
||||||
|
return observation
|
||||||
|
|
||||||
|
|
||||||
|
def applyAction(self, motorCommands):
|
||||||
|
motorCommandsWithDir = np.multiply(motorCommands, self.motorDir)
|
||||||
|
# print('action: {}'.format(motorCommands))
|
||||||
|
# print('motor: {}'.format(motorCommandsWithDir))
|
||||||
|
for i in range(self.nMotors):
|
||||||
|
self.setMotorAngleById(self.motorIdList[i], motorCommandsWithDir[i])
|
||||||
|
|
||||||
|
def getMotorAngles(self):
|
||||||
|
motorAngles = []
|
||||||
|
for i in range(self.nMotors):
|
||||||
|
jointState = p.getJointState(self.quadruped, self.motorIdList[i])
|
||||||
|
motorAngles.append(jointState[0])
|
||||||
|
motorAngles = np.multiply(motorAngles, self.motorDir)
|
||||||
|
return motorAngles
|
||||||
|
|
||||||
|
def getMotorVelocities(self):
|
||||||
|
motorVelocities = []
|
||||||
|
for i in range(self.nMotors):
|
||||||
|
jointState = p.getJointState(self.quadruped, self.motorIdList[i])
|
||||||
|
motorVelocities.append(jointState[1])
|
||||||
|
motorVelocities = np.multiply(motorVelocities, self.motorDir)
|
||||||
|
return motorVelocities
|
||||||
|
|
||||||
|
def getMotorTorques(self):
|
||||||
|
motorTorques = []
|
||||||
|
for i in range(self.nMotors):
|
||||||
|
jointState = p.getJointState(self.quadruped, self.motorIdList[i])
|
||||||
|
motorTorques.append(jointState[3])
|
||||||
|
motorTorques = np.multiply(motorTorques, self.motorDir)
|
||||||
|
return motorTorques
|
||||||
|
|
||||||
|
def convertFromLegModel(self, actions):
|
||||||
|
motorAngle = copy.deepcopy(actions)
|
||||||
|
scaleForSingularity = 1
|
||||||
|
offsetForSingularity = 0.5
|
||||||
|
motorAngle[0] = math.pi + math.pi / 4 * actions[0] - scaleForSingularity * math.pi / 4 * (actions[4] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[1] = math.pi - math.pi / 4 * actions[0] - scaleForSingularity * math.pi / 4 * (actions[4] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[2] = math.pi + math.pi / 4 * actions[1] - scaleForSingularity * math.pi / 4 * (actions[5] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[3] = math.pi - math.pi / 4 * actions[1] - scaleForSingularity * math.pi / 4 * (actions[5] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[4] = math.pi - math.pi / 4 * actions[2] - scaleForSingularity * math.pi / 4 * (actions[6] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[5] = math.pi + math.pi / 4 * actions[2] - scaleForSingularity * math.pi / 4 * (actions[6] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[6] = math.pi - math.pi / 4 * actions[3] - scaleForSingularity * math.pi / 4 * (actions[7] + 1 + offsetForSingularity)
|
||||||
|
motorAngle[7] = math.pi + math.pi / 4 * actions[3] - scaleForSingularity * math.pi / 4 * (actions[7] + 1 + offsetForSingularity)
|
||||||
|
return motorAngle
|
85
examples/pybullet/gym/minitaurGymEnvTest.py
Normal file
85
examples/pybullet/gym/minitaurGymEnvTest.py
Normal file
@ -0,0 +1,85 @@
|
|||||||
|
'''
|
||||||
|
A test for minitaurGymEnv
|
||||||
|
'''
|
||||||
|
|
||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
import math
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from envs.bullet.minitaurGymEnv import MinitaurGymEnv
|
||||||
|
from agents import simpleAgent
|
||||||
|
|
||||||
|
def testSinePolicy():
|
||||||
|
"""Tests sine policy
|
||||||
|
"""
|
||||||
|
np.random.seed(47)
|
||||||
|
|
||||||
|
environment = MinitaurGymEnv(render=True)
|
||||||
|
sum_reward = 0
|
||||||
|
steps = 1000
|
||||||
|
amplitude1Bound = 0.5
|
||||||
|
amplitude2Bound = 0.15
|
||||||
|
speed = 40
|
||||||
|
|
||||||
|
for stepCounter in range(steps):
|
||||||
|
t = float(stepCounter) * environment._timeStep
|
||||||
|
|
||||||
|
if (t < 1):
|
||||||
|
amplitude1 = 0
|
||||||
|
amplitude2 = 0
|
||||||
|
else:
|
||||||
|
amplitude1 = amplitude1Bound
|
||||||
|
amplitude2 = amplitude2Bound
|
||||||
|
a1 = math.sin(t*speed)*amplitude1
|
||||||
|
a2 = math.sin(t*speed+3.14)*amplitude1
|
||||||
|
a3 = math.sin(t*speed)*amplitude2
|
||||||
|
a4 = math.sin(t*speed+3.14)*amplitude2
|
||||||
|
|
||||||
|
action = [a1, a2, a2, a1, a3, a4, a4, a3]
|
||||||
|
|
||||||
|
state, reward, done, info = environment.step(action)
|
||||||
|
sum_reward += reward
|
||||||
|
if done:
|
||||||
|
environment.reset()
|
||||||
|
print("sum reward: ", sum_reward)
|
||||||
|
|
||||||
|
|
||||||
|
def testDDPGPolicy():
|
||||||
|
"""Tests sine policy
|
||||||
|
"""
|
||||||
|
environment = MinitaurGymEnv(render=True)
|
||||||
|
sum_reward = 0
|
||||||
|
steps = 1000
|
||||||
|
ckpt_path = 'data/agent/tf_graph_data/tf_graph_data.ckpt'
|
||||||
|
observation_shape = (31,)
|
||||||
|
action_size = 8
|
||||||
|
actor_layer_sizes = (100, 181)
|
||||||
|
n_steps = 0
|
||||||
|
tf.reset_default_graph()
|
||||||
|
with tf.Session() as session:
|
||||||
|
agent = simpleAgent.SimpleAgent(session, ckpt_path,
|
||||||
|
actor_layer_sizes,
|
||||||
|
observation_size=observation_shape,
|
||||||
|
action_size=action_size)
|
||||||
|
state = environment.reset()
|
||||||
|
action = agent(state)
|
||||||
|
for _ in range(steps):
|
||||||
|
n_steps += 1
|
||||||
|
state, reward, done, info = environment.step(action)
|
||||||
|
action = agent(state)
|
||||||
|
sum_reward += reward
|
||||||
|
if done:
|
||||||
|
environment.reset()
|
||||||
|
n_steps += 1
|
||||||
|
print("total reward: ", sum_reward)
|
||||||
|
print("total steps: ", n_steps)
|
||||||
|
sum_reward = 0
|
||||||
|
n_steps = 0
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
testDDPGPolicy()
|
||||||
|
#testSinePolicy()
|
Loading…
Reference in New Issue
Block a user