forked from r7vme/learning-to-drive-in-a-day
-
Notifications
You must be signed in to change notification settings - Fork 85
/
train.py
228 lines (196 loc) · 8.66 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
# Code adapted from https://github.com/araffin/rl-baselines-zoo
# Author: Antonin Raffin
import argparse
import os
import time
import warnings
from collections import OrderedDict
from pprint import pprint
# Remove warnings
warnings.filterwarnings("ignore", category=FutureWarning, module='tensorflow')
warnings.filterwarnings("ignore", category=UserWarning, module='gym')
import numpy as np
import yaml
from stable_baselines.common import set_global_seeds
from stable_baselines.common.vec_env import VecFrameStack, VecNormalize, DummyVecEnv
from stable_baselines.ddpg import AdaptiveParamNoiseSpec, NormalActionNoise, OrnsteinUhlenbeckActionNoise
from stable_baselines.common.schedules import constfn
from config import MIN_THROTTLE, MAX_THROTTLE, FRAME_SKIP,\
MAX_CTE_ERROR, SIM_PARAMS, N_COMMAND_HISTORY, Z_SIZE, BASE_ENV, ENV_ID, MAX_STEERING_DIFF
from utils.utils import make_env, ALGOS, linear_schedule, get_latest_run_id, load_vae, create_callback
from teleop.teleop_client import TeleopEnv
parser = argparse.ArgumentParser()
parser.add_argument('-tb', '--tensorboard-log', help='Tensorboard log dir', default='', type=str)
parser.add_argument('-i', '--trained-agent', help='Path to a pretrained agent to continue training',
default='', type=str)
parser.add_argument('--algo', help='RL Algorithm', default='sac',
type=str, required=False, choices=list(ALGOS.keys()))
parser.add_argument('-n', '--n-timesteps', help='Overwrite the number of timesteps', default=-1,
type=int)
parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=-1,
type=int)
parser.add_argument('-f', '--log-folder', help='Log folder', type=str, default='logs')
parser.add_argument('-vae', '--vae-path', help='Path to saved VAE', type=str, default='')
parser.add_argument('--save-vae', action='store_true', default=False,
help='Save VAE')
parser.add_argument('--seed', help='Random generator seed', type=int, default=0)
parser.add_argument('--random-features', action='store_true', default=False,
help='Use random features')
parser.add_argument('--teleop', action='store_true', default=False,
help='Use teleoperation for training')
args = parser.parse_args()
set_global_seeds(args.seed)
if args.trained_agent != "":
assert args.trained_agent.endswith('.pkl') and os.path.isfile(args.trained_agent), \
"The trained_agent must be a valid path to a .pkl file"
tensorboard_log = None if args.tensorboard_log == '' else args.tensorboard_log + '/' + ENV_ID
print("=" * 10, ENV_ID, args.algo, "=" * 10)
vae = None
if args.vae_path != '':
print("Loading VAE ...")
vae = load_vae(args.vae_path)
elif args.random_features:
print("Randomly initialized VAE")
vae = load_vae(z_size=Z_SIZE)
# Save network
args.save_vae = True
else:
print("Learning from pixels...")
# Load hyperparameters from yaml file
with open('hyperparams/{}.yml'.format(args.algo), 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.UnsafeLoader)[BASE_ENV]
hyperparams['seed'] = args.seed
# Sort hyperparams that will be saved
saved_hyperparams = OrderedDict([(key, hyperparams[key]) for key in sorted(hyperparams.keys())])
# save vae path
saved_hyperparams['vae_path'] = args.vae_path
if vae is not None:
saved_hyperparams['z_size'] = vae.z_size
# Save simulation params
for key in SIM_PARAMS:
saved_hyperparams[key] = eval(key)
pprint(saved_hyperparams)
# Compute and create log path
log_path = os.path.join(args.log_folder, args.algo)
save_path = os.path.join(log_path, "{}_{}".format(ENV_ID, get_latest_run_id(log_path, ENV_ID) + 1))
params_path = os.path.join(save_path, ENV_ID)
os.makedirs(params_path, exist_ok=True)
# Create learning rate schedules for ppo2 and sac
if args.algo in ["ppo2", "sac"]:
for key in ['learning_rate', 'cliprange']:
if key not in hyperparams:
continue
if isinstance(hyperparams[key], str):
schedule, initial_value = hyperparams[key].split('_')
initial_value = float(initial_value)
hyperparams[key] = linear_schedule(initial_value)
elif isinstance(hyperparams[key], float):
hyperparams[key] = constfn(hyperparams[key])
else:
raise ValueError('Invalid valid for {}: {}'.format(key, hyperparams[key]))
# Should we overwrite the number of timesteps?
if args.n_timesteps > 0:
n_timesteps = args.n_timesteps
else:
n_timesteps = int(hyperparams['n_timesteps'])
del hyperparams['n_timesteps']
normalize = False
normalize_kwargs = {}
if 'normalize' in hyperparams.keys():
normalize = hyperparams['normalize']
if isinstance(normalize, str):
normalize_kwargs = eval(normalize)
normalize = True
del hyperparams['normalize']
if not args.teleop:
env = DummyVecEnv([make_env(args.seed, vae=vae, teleop=args.teleop)])
else:
env = make_env(args.seed, vae=vae, teleop=args.teleop,
n_stack=hyperparams.get('frame_stack', 1))()
if normalize:
if hyperparams.get('normalize', False) and args.algo in ['ddpg']:
print("WARNING: normalization not supported yet for DDPG")
else:
print("Normalizing input and return")
env = VecNormalize(env, **normalize_kwargs)
# Optional Frame-stacking
n_stack = 1
if hyperparams.get('frame_stack', False):
n_stack = hyperparams['frame_stack']
if not args.teleop:
env = VecFrameStack(env, n_stack)
print("Stacking {} frames".format(n_stack))
del hyperparams['frame_stack']
# Parse noise string for DDPG
if args.algo == 'ddpg' and hyperparams.get('noise_type') is not None:
noise_type = hyperparams['noise_type'].strip()
noise_std = hyperparams['noise_std']
n_actions = env.action_space.shape[0]
if 'adaptive-param' in noise_type:
hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std,
desired_action_stddev=noise_std)
elif 'normal' in noise_type:
hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
elif 'ornstein-uhlenbeck' in noise_type:
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions),
sigma=noise_std * np.ones(n_actions))
else:
raise RuntimeError('Unknown noise type "{}"'.format(noise_type))
print("Applying {} noise with std {}".format(noise_type, noise_std))
del hyperparams['noise_type']
del hyperparams['noise_std']
if args.trained_agent.endswith('.pkl') and os.path.isfile(args.trained_agent):
# Continue training
print("Loading pretrained agent")
# Policy should not be changed
del hyperparams['policy']
model = ALGOS[args.algo].load(args.trained_agent, env=env,
tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
exp_folder = args.trained_agent.split('.pkl')[0]
if normalize:
print("Loading saved running average")
env.load_running_average(exp_folder)
else:
# Train an agent from scratch
model = ALGOS[args.algo](env=env, tensorboard_log=tensorboard_log, verbose=1, **hyperparams)
# Teleoperation mode:
# we don't wrap the environment with a monitor or in a vecenv
if args.teleop:
assert args.algo == "sac", "Teleoperation mode is not yet implemented for {}".format(args.algo)
env = TeleopEnv(env, is_training=True)
model.set_env(env)
env.model = model
kwargs = {}
if args.log_interval > -1:
kwargs = {'log_interval': args.log_interval}
if args.algo == 'sac':
kwargs.update({'callback': create_callback(args.algo,
os.path.join(save_path, ENV_ID + "_best"),
verbose=1)})
model.learn(n_timesteps, **kwargs)
if args.teleop:
env.wait()
env.exit()
time.sleep(0.5)
else:
# Close the connection properly
env.reset()
if isinstance(env, VecFrameStack):
env = env.venv
# HACK to bypass Monitor wrapper
env.envs[0].env.exit_scene()
# Save trained model
model.save(os.path.join(save_path, ENV_ID), cloudpickle=True)
# Save hyperparams
with open(os.path.join(params_path, 'config.yml'), 'w') as f:
yaml.dump(saved_hyperparams, f)
if args.save_vae and vae is not None:
print("Saving VAE")
vae.save(os.path.join(params_path, 'vae'))
if normalize:
# Unwrap
if isinstance(env, VecFrameStack):
env = env.venv
# Important: save the running average, for testing the agent we need that normalization
env.save_running_average(params_path)