Skip to content

Instantly share code, notes, and snippets.

@pranz24
Last active May 3, 2018 06:31
Show Gist options
  • Save pranz24/ba731e65e1b64bf3710159aa75736f90 to your computer and use it in GitHub Desktop.
Save pranz24/ba731e65e1b64bf3710159aa75736f90 to your computer and use it in GitHub Desktop.
Solving_Acrobot-v1 using simple actor-critic
"""
small neural network trained using actor-critic in Pytorch
References:
David Silver's Lecture 7-Policy Gradient Methods: https://www.youtube.com/watch?v=KHZVXao4qXs&t=46s
Actor-critic example in Pytorch: https://github.com/pytorch/examples/blob/master/reinforcement_learning/actor_critic.py
I think a higher score can be achieved by running the algorithm for more number of episodes(>80000)
"""
import argparse
import gym
import numpy as np
from itertools import count
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
parser = argparse.ArgumentParser(description='PyTorch actor-critic for acrobot')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='interval between training status logs (default: 10)')
args = parser.parse_args()
env = gym.make('Acrobot-v1')
print(env.observation_space)
print(env.action_space)
env.seed(args.seed)
torch.manual_seed(args.seed)
SavedAction = namedtuple('SavedAction', ['action', 'value'])
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(6, 18)
self.affine2 = nn.Linear(18, 36)
self.affine3 = nn.Linear(36, 18)
self.action_head = nn.Linear(18, 3)
self.value_head = nn.Linear(18, 1)
self.saved_actions = []
self.rewards = []
def forward(self, x):
x = F.leaky_relu(self.affine1(x))
x = F.leaky_relu(self.affine2(x))
x = F.leaky_relu(self.affine3(x))
action_scores = self.action_head(x)
state_values = self.value_head(x)
return F.softmax(action_scores), state_values
model = Policy()
optimizer = optim.Adam(model.parameters(), lr=0.002)
def select_action(state):
state = torch.from_numpy(state).float().unsqueeze(0)
probs, state_value = model(Variable(state))
action = probs.multinomial()
model.saved_actions.append(SavedAction(action, state_value))
return action.data
outdir = '/tmp/Acrobot-results'
env = gym.wrappers.Monitor(env, outdir, force=True)
def finish_episode():
R = 0
saved_actions = model.saved_actions
value_loss = 0
rewards = []
for r in model.rewards[::-1]:
R = r + args.gamma * R
rewards.insert(0, R)
rewards = torch.Tensor(rewards)
rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
for (action, value), r in zip(saved_actions, rewards):
reward = r - value.data[0,0]
action.reinforce(reward)
value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
optimizer.zero_grad()
final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
gradients = [torch.ones(1)] + [None] * len(saved_actions)
autograd.backward(final_nodes, gradients)
optimizer.step()
del model.rewards[:]
del model.saved_actions[:]
running_reward = 10
for episode_no in count(1):
state = env.reset()
for t in range(1000):
action = select_action(state)
state, reward, done, _ = env.step(action[0,0])
if args.render:
env.render()
model.rewards.append(reward)
if done:
break
running_reward = running_reward * 0.99 + t * 0.01
finish_episode()
if episode_no % args.log_interval == 0:
print('Episode {}\tLast length: {:5d}\tAverage length: {:.2f}'.format(
episode_no, t, running_reward))
if episode_no > 80000:
print("Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
env.close()
break
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment