''' Script for Cartpole using policy gradient via Chainer, two layer MLP, dropout, and rejection sampling of historical memories '''

import gym 

import numpy as np

import chainer
from chainer import optimizers
from chainer import ChainList, Variable
import chainer.functions as F
import chainer.links as L

env = gym.make('CartPole-v0')
env.monitor.start('./cartpole-experiment')

print('Action space:', env.action_space)
print('Observation space:', env.observation_space)

INPUT = 4
HIDDEN = 32
MEMORY_STORE = 16
REWARD_DECAY = 0.99
EPSILON_RANDOM = 0.01
MINIMUM_UPDATE_SIZE = 2
SGD_LR = 0.8
DROPOUT = 0.5

class PolicyNetwork(ChainList):
  def __init__(self, input_size=4, hidden_size=32):
    super(PolicyNetwork, self).__init__(
      L.Linear(input_size, hidden_size, nobias=True),
      L.Linear(hidden_size, 1, nobias=True),
    )
  def __call__(self, x, train=True, dropout=0.5):
    h = x
    h = F.dropout(self[0](h), train=train, ratio=dropout)
    h = self[1](F.tanh(h))
    return F.sigmoid(h)

model = PolicyNetwork(input_size=INPUT, hidden_size=HIDDEN)
optimizer = optimizers.SGD(lr=SGD_LR)
optimizer.setup(model)

env.reset()
episodes = []
reward_history = []
for iter in range(10000):
  episode = []
  total_reward = 0

  state = env.reset()
  for t in range(201):
    env.render()
    raw_action = model(np.array([state], dtype=np.float32), train=False)
    action = 1 if np.random.random() < raw_action.data else 0
    if np.random.random() > 1 - EPSILON_RANDOM:
      action = env.action_space.sample()
    new_state, reward, done, info = env.step(action)
    episode.append((state, action, reward))
    state = new_state
    total_reward += reward
    if done:
      break

  episodes.append((total_reward, episode))
  reward_history.append(total_reward) 

  if len(episodes) > MINIMUM_UPDATE_SIZE:
    gradW = [[], []]
    for _, episode in episodes:
      R = [r for idx, (s, a, r) in enumerate(episode)]
      accR = [sum(r * REWARD_DECAY ** i for i, r in enumerate(R[idx:])) for idx, (s, a, r) in enumerate(episode)]
      pred_actions = [model(np.array([s], dtype=np.float32), train=True, dropout=DROPOUT) for (s, a, r) in episode]
      losses = [(pa - a) ** 2 for pa, (s, a, r) in zip(pred_actions, episode)]
      for loss, r in zip(losses, accR):
        model.zerograds()
        loss.backward()
        gradW[0].append(r * model[0].W.grad)
        gradW[1].append(r * model[1].W.grad)
    for idx, gradW in enumerate(gradW):
      model[idx].W.grad = np.mean(gradW, axis=0, dtype=np.float32)
    optimizer.update()
    maxR = np.max([r for (r, ep) in episodes])
    episodes = [(r, ep) for (r, ep) in episodes if maxR * np.random.random() < r]
    np.random.shuffle(episodes)
    episodes = episodes[:MEMORY_STORE]

  print('Episode {} finished after {} timesteps (avg for last 100 - {})'.format(iter, t, np.mean(reward_history[-100:])))

env.monitor.close()