-
Notifications
You must be signed in to change notification settings - Fork 0
/
valueIterationAgents.py
153 lines (116 loc) · 5.42 KB
/
valueIterationAgents.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
import mdp, util
from learningAgents import ValueEstimationAgent
import collections
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp: mdp.MarkovDecisionProcess, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
self.runValueIteration()
def runValueIteration(self):
"""
Run the value iteration algorithm. Note that in standard
value iteration, V_k+1(...) depends on V_k(...)'s.
"""
"*** MY CODE HERE ***"
all_states = self.mdp.getStates()
for _ in range(self.iterations):
temp_values = self.values.copy()
for state in all_states:
if self.mdp.isTerminal(state): # Wenn Zustand terminal, bleibt der Wert
continue
possible_actions = self.mdp.getPossibleActions(state)
max_value = float('-inf')
for action in possible_actions:
q_value = 0
for next_state, prob in self.mdp.getTransitionStatesAndProbs(state, action):
reward = self.mdp.getReward(state, action, next_state)
q_value += prob * (reward + self.discount * temp_values[next_state])
max_value = max(max_value, q_value)
self.values[state] = max_value if max_value != float('-inf') else 0
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** MY CODE HERE ***"
q_value = 0
for next_state, prob in self.mdp.getTransitionStatesAndProbs(state, action):
reward = self.mdp.getReward(state, action, next_state)
q_value += prob * (reward + self.discount * self.values[next_state])
return q_value
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** MY CODE HERE ***"
possible_actions = self.mdp.getPossibleActions(state)
if not possible_actions: # Keine möglichen Aktionen
return None
best_action= None
max_value = float('-inf')
for action in possible_actions:
comparable_value = self.computeQValueFromValues(state, action)
if comparable_value > max_value:
max_value = comparable_value
best_action = action
return best_action
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action)