-
Notifications
You must be signed in to change notification settings - Fork 0
/
autograder.py
234 lines (205 loc) · 8.26 KB
/
autograder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
# import sys
import argparse
import BinaryCSP
import traceback
from os import listdir
from Testing import get_lines, csp_parse, assignment_parse
class FunctionInvokeMonitor:
"""Counts number of invocation."""
def __init__(self):
self.counter = {}
def getFunctionMock(self, function):
self.counter[function] = 0
def mock(*args):
self.counter[function] += 1
return function(*args)
return mock
def getInvocationCount(self, function):
return self.counter[function];
def getCounter(self):
return self.counter;
def demo_FunctionInvokeMonitor():
a = FunctionInvokeMonitor()
b = FunctionInvokeMonitor()
def fn1(arg0):
print 'Inside fn1:', arg0
return 'Invoked fn1'
def fn2(arg0, arg1):
print 'Inside fn2:', arg0, arg1
return 'Invoked fn2'
aMockFn1 = a.getFunctionMock(fn1);
aMockFn2 = a.getFunctionMock(fn2);
bMockFn1 = b.getFunctionMock(fn1);
print aMockFn1('test1');
print bMockFn1('test2');
print bMockFn1('test3');
print aMockFn2('test4.1', 'test4.2')
print a.getInvocationCount(fn1)
print a.getInvocationCount(fn2)
print b.getInvocationCount(fn1)
questionValues = {
'q1': 4,
'q2': 2,
'q3': 2,
'q4': 4,
'q5': 4,
'q6': 2
}
""" Runs a single test. Either prints correct or a failure message.
Returns True if the test passes. ValueError if test does not exist. """
def run_test(test_file_name):
result = None
args = []
hint = None
fnMonitor = FunctionInvokeMonitor() # CHANGED
try:
with open(test_file_name) as test_file:
test_function = getattr(BinaryCSP, test_file.readline().strip())
for line in test_file:
line = line.split()
line_type = line[0]
if line_type == 'csp':
with open(line[1]) as csp_file:
args.append(csp_parse(csp_file.readlines()))
elif line_type == 'assignment':
with open(line[1]) as assignment_file:
args.append(assignment_parse(assignment_file.readlines()))
elif line_type == 'function':
if line[1]=='noInferences':
args.append(None)
else:
args.append(fnMonitor.getFunctionMock(getattr(BinaryCSP, line[1])))
elif line_type == 'constraint':
args.append(getattr(BinaryCSP, line[1])(*line[2:]))
elif line_type == 'boolean':
args.append(line[1] == 'True')
elif line_type == 'hint':
hint = ' '.join(line[1:])
else:
args.append(line[1])
except IOError:
raise ValueError('Invalid test: %s\n(Tests should normally be specified as \'test_cases\[question]\[test].test\')' % test_file_name)
except Exception as e:
print 'An error occured within the autograder: '
print e
try:
result = test_function(*args)
except Exception, e:
print
print 'FAIL:', test_file_name
print 'Something broke:'
print traceback.format_exc()
return False
try:
test_local = {'success': False, 'result': result, 'args': args, 'correct': None}
execfile(test_file_name.replace('.test', '.solution'), {}, test_local)
correct = test_local['correct']
success = test_local['success']
except IOError, e:
raise ValueError('Invalid solution file: %s' % test_file_name.replace('.test', '.solution'))
except Exception, e:
print
print 'FAIL:', test_file_name
print 'Solution check failed. Check your return type.'
print
return False
namesNotInvoked = []
fnInvokeCounter = fnMonitor.getCounter()
for fn in fnInvokeCounter:
if fnInvokeCounter[fn] == 0:
namesNotInvoked.append(fn.__name__)
if len(namesNotInvoked)>0:
print 'Warning: You did not invoke %s %s'%('these functions:' if len(namesNotInvoked)>1 else "the function"
,' ,'.join(namesNotInvoked))
if success and len(namesNotInvoked)==0:
print 'PASS:', test_file_name,'\n'
else:
print 'FAIL:', test_file_name
print 'Your answer:', str(result)
print 'Correct answer:', str(correct)
if hint is not None:
print 'Hint:', hint
print
return success
""" Runs every test in a list of tests.
Prints an error message for invalid tests. """
def run_tests(tests):
print '____________________________________________________________________'
print
all_pass = True
for test in tests:
try:
all_pass = run_test(test) and all_pass
except ValueError, e:
print e
if all_pass:
print
print '--------------------------------------------------------------------'
print 'All tests passed'
print '--------------------------------------------------------------------'
print
else:
print
print '--------------------------------------------------------------------'
print 'Not all tests passed'
print '--------------------------------------------------------------------'
print
""" Runs every test for a question. Returns points and possible points.
ValueError if question does not exist. """
def eval_question(question):
if question not in questionValues:
raise ValueError('Invalid question: %s' % question)
print '____________________________________________________________________'
print 'Testing question: %s' % question
print '--------------------------------------------------------------------'
all_correct = True
for file_name in [name for name in listdir('test_cases/' + question) if 'test' in name]:
try:
all_correct = run_test('test_cases/' + question + '/' + file_name) and all_correct
except Exception, e:
print e
print
print '--------------------------------------------------------------------'
if all_correct:
print 'All tests passed for question %s' % question
print '--------------------------------------------------------------------'
print
return questionValues[question], questionValues[question]
print 'Not all tests passed for question %s' % question
print '--------------------------------------------------------------------'
print
return 0, questionValues[question]
""" Runs every question in a list of questions. Sums possible and earned points.
Prints an error message for invalid questions."""
def run_questions(questions):
sum_earned = 0
sum_possible = 0
for question in questions:
try:
points, possible = eval_question(question)
sum_earned += points
sum_possible += possible
except ValueError, e:
print e
return (sum_earned, sum_possible)
""" Parses command line arguments. Can run a list of questions and a list of tests.
Defaults to running all questions and printing the total score. """
def main():
print
parser = argparse.ArgumentParser(description='Constraint satisfaction problem autograder')
parser.add_argument('-q', '--question', action='append', dest='questions')
parser.add_argument('-t', '--test', action='append', dest='tests')
args = vars(parser.parse_args())
if args['tests'] is not None:
run_tests(args['tests'])
if args['questions'] is not None:
run_questions(args['questions'])
if args['tests'] is None and args['questions'] is None:
questions = questionValues.keys()
questions.sort()
points, possible = run_questions(questions)
print '--------------------------------------------------------------------'
print 'Autograder finished. Final score %d/%d' % (points, possible)
print '--------------------------------------------------------------------'
if __name__ == '__main__':
main()