-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.py
79 lines (69 loc) · 3.22 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
# suppose action 1 is invalid
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
action = 0
advantage = torch.tensor(1.)
device = "cpu"
# no invalid action masking
print("=============regular=============")
target_logits = torch.tensor([1., 1., 1., 1.,] , requires_grad=True)
target_probs = Categorical(logits=target_logits)
log_prob = target_probs.log_prob(torch.tensor(action))
print("log_prob", log_prob)
(log_prob*advantage).backward()
print("gradient", target_logits.grad)
print()
# invalid action masking via logits
print("==================invalid action masking=============")
target_logits = torch.tensor([1., 1., 1., 1.,] , requires_grad=True)
invalid_action_masks = torch.tensor([1., 1., 0., 1.,])
invalid_action_masks = invalid_action_masks.type(torch.BoolTensor)
adjusted_logits = torch.where(invalid_action_masks, target_logits, torch.tensor(-1e+8))
adjusted_probs = Categorical(logits=adjusted_logits)
adjusted_log_prob = adjusted_probs.log_prob(torch.tensor(action))
print("log_prob", adjusted_log_prob)
(adjusted_log_prob*advantage).backward()
print("gradient", target_logits.grad)
print()
# invalid action masking via importance sampling
print("==================regular importance sampling=============")
target_logits = torch.tensor([1., 1., 1., 1.,] , requires_grad=True)
target_probs = Categorical(logits=target_logits)
invalid_action_masks = torch.tensor([1., 1., 0., 1.,])
invalid_action_masks = invalid_action_masks.type(torch.BoolTensor)
adjusted_logits = torch.where(invalid_action_masks, target_logits, torch.tensor(-1e+8))
adjusted_probs = Categorical(logits=adjusted_logits)
log_prob = target_probs.log_prob(torch.tensor(action))
adjusted_log_prob = adjusted_probs.log_prob(torch.tensor(action))
importance_sampling = target_probs.probs[torch.tensor(action)] / (adjusted_probs.probs[torch.tensor(action)])
print("log_prob", log_prob)
(importance_sampling.detach()*log_prob*advantage).backward()
print("gradient", target_logits.grad)
print()
# invalid action masking via logits
print("==================invalid action masking=============")
target_logits = torch.tensor([1., 1., 1., 1.,] , requires_grad=True)
invalid_action_masks = torch.tensor([1., 1., 0., 1.,])
invalid_action_masks = invalid_action_masks.type(torch.BoolTensor)
adjusted_logits = torch.where(invalid_action_masks, target_logits, torch.tensor(-2.))
adjusted_probs = Categorical(logits=adjusted_logits)
adjusted_log_prob = adjusted_probs.log_prob(torch.tensor(action))
print("adjusted_probs", adjusted_probs.probs)
(adjusted_log_prob*advantage).backward()
print("gradient", target_logits.grad)
print()
# no invalid action masking with different parameterization
print("=============regular but differrent parameterization=============")
target_logits = torch.tensor([1., 1., -2., 1.,] , requires_grad=True)
target_probs = Categorical(logits=target_logits)
log_prob = target_probs.log_prob(torch.tensor(action))
print("target_probs", target_probs.probs)
(log_prob*advantage).backward()
print("gradient", target_logits.grad)
print()
new_target_logits = target_logits + target_logits.grad
new_target_probs = Categorical(logits=new_target_logits)
print("target_probs", new_target_probs.probs)