forked from zhongzishi/HEC_80629A_H2021
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_atari.py
121 lines (108 loc) · 3.22 KB
/
test_atari.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import logging
import sys
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
import numpy as np
import gym
import tensorflow as tf
from agent import DQN, train
from replay import UniformReplayBuffer, PrioritizedReplayBuffer
from replay import Transition
from observer import AverageObserver, MaximumObserver
from wrapper import AtariWrapper
def run(
agent_type="dqn",
gamma=1.0,
min_epsilon=0.1,
learning_rate=2.5e-4,
env_name="MsPacman-v0",
use_wrapper=True,
num_episodes=1000,
log_interval=100,
replay_buffer_capacity=10**5,
use_prioritized_experience_buffer=False,
max_steps_per_episode = 10000,
batch_size = 32,
use_soft_update = False,
online_update_period = 1,
target_update_tau = 1,
target_sync_period = 100,
decay_rate=1e-5,
num_saves = 0,
saved_model_dir = None,
warm_up = 10000
):
env = gym.make(env_name)
if use_wrapper:
# convert (210, 160, 3) to (84, 84, 1)
env = AtariWrapper(env)
cfg = {
"type": agent_type,
"network": {
"type": "conv2d",
"structure": None,
},
"gamma": gamma,
"min_epsilon": min_epsilon
}
agent = DQN(
cfg,
env.observation_space.shape,
env.action_space.n,
optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
loss_function=tf.keras.losses.MeanSquaredError(),
)
if use_prioritized_experience_buffer:
buffer = PrioritizedReplayBuffer(
size=replay_buffer_capacity,
alpha=0.6,
anneal_alpha_rate=1e-5,
anneal_beta_rate=1e-5
)
else:
buffer = UniformReplayBuffer(size=replay_buffer_capacity)
observer = [
AverageObserver(log_interval),
MaximumObserver(log_interval)
]
train(
env, agent, buffer,
num_episodes=num_episodes,
max_steps_per_episode=max_steps_per_episode,
batch_size=batch_size,
online_update_period=online_update_period,
target_sync_period=target_sync_period,
log_interval=log_interval,
use_soft_update=use_soft_update,
target_update_tau=target_update_tau,
observer=observer,
decay_rate=decay_rate,
num_saves=num_saves,
saved_model_dir=saved_model_dir,
warm_up=10000
)
if __name__ == "__main__":
logging.info("This is to test target sync period effect.")
for target_sync_period in [100]:
logging.info(f"\ntarget_sync_period = {target_sync_period}")
run(
agent_type="dqn",
gamma=1.0,
min_epsilon=0.1,
learning_rate=2.5e-4,
env_name="MsPacman-v0",
use_wrapper=True,
num_episodes=10000,
log_interval=500,
replay_buffer_capacity=10**6,
use_prioritized_experience_buffer=False,
max_steps_per_episode = 10000,
batch_size = 32,
use_soft_update = False,
online_update_period = 4,
target_update_tau = 1,
target_sync_period = 10000,
decay_rate=2.5e-6,
num_saves = 0,
saved_model_dir = None,
warm_up = 50000
)