-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
33 lines (28 loc) · 1.58 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import argparse
import agent
import environment
import runner
import random
parser = argparse.ArgumentParser(description='RL running machine')
parser.add_argument('--environment', metavar='ENV_CLASS', type=str, default='Environment', help='Class to use for the environment. Must be in the \'environment\' module')
parser.add_argument('--agent', metavar='AGENT_CLASS', default='Agent', type=str, help='Class to use for the agent. Must be in the \'agent\' module.')
parser.add_argument('--niter', type=int, metavar='n', default='100', help='number of iterations to simulate')
parser.add_argument('--batch', type=int, metavar='nagent', default=None, help='batch run several agent at the same time')
parser.add_argument('--verbose', action='store_true', help='Display cumulative results at each step')
# random.seed(0)
def main():
args = parser.parse_args()
agent_class = eval('agent.{}'.format(args.agent))
env_class = eval('environment.{}'.format(args.environment))
if args.batch is not None:
print("Running a batched simulation with {} agents in parallel...".format(args.batch))
my_runner = runner.BatchRunner(env_class, agent_class, args.batch, args.verbose)
final_reward = my_runner.loop(args.niter)
print("Obtained a final average reward of {}".format(final_reward))
else:
print("Running a single instance simulation...")
my_runner = runner.Runner(env_class(), agent_class(), args.verbose)
final_reward = my_runner.loop(args.niter)
print("Obtained a final reward of {}".format(final_reward))
if __name__ == "__main__":
main()