From 3337800415f1913ac39696a1159569646b639635 Mon Sep 17 00:00:00 2001 From: Karanbir Chahal Date: Thu, 27 Feb 2020 22:25:49 -0500 Subject: [PATCH] Update reinforce.py The update step for REINFORCE appears to be wrong. Instead of multiplying log probs and rewards at each step, we should sum up all the rewards to go, and the log probs across the entire episode. After getting these sums, we should multiply these 2 values together. This was mentioned in Sergey Levine's lecture on Policy gradients where he derived the entire policy gradient algorithm. Source: https://www.youtube.com/watch?v=Ds1trXd6pos&list=PLkFD6_40KJIwhWJpGazJ9VSj9CFMkb79A&index=5 --- reinforcement_learning/reinforce.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/reinforcement_learning/reinforce.py b/reinforcement_learning/reinforce.py index a222ff804c..9b6cea3345 100644 --- a/reinforcement_learning/reinforce.py +++ b/reinforcement_learning/reinforce.py @@ -68,10 +68,9 @@ def finish_episode(): returns.insert(0, R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) - for log_prob, R in zip(policy.saved_log_probs, returns): - policy_loss.append(-log_prob * R) - optimizer.zero_grad() - policy_loss = torch.cat(policy_loss).sum() + l_probs = policy.saved_log_probs.sum() + rews = returns.sum() + policy_loss = rews + l_probs policy_loss.backward() optimizer.step() del policy.rewards[:]