-
Notifications
You must be signed in to change notification settings - Fork 139
/
inginious-autotest
executable file
·314 lines (269 loc) · 12.2 KB
/
inginious-autotest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
#
"""Test the output of a given input from a yaml inginious output file and compare with the old one"""
import argparse
import os
import time
import json
import sys
from yaml import SafeLoader
from inginious.common.tags import Tag
from inginious.frontend.tasks import Task
from inginious.frontend.task_dispensers.toc import TableOfContents
from inginious.common.log import init_logging
from inginious.frontend.taskset_factory import create_factories
from inginious.client.client_sync import ClientSync
from inginious.frontend.arch_helper import start_asyncio_and_zmq, create_arch
from yaml import load
from inginious.common.filesystems.local import LocalFSProvider
from inginious.frontend.task_problems import get_default_displayable_problem_types
def import_class(name):
m = name.split('.')
mod = __import__(m[0])
for comp in m[1:]:
mod = getattr(mod, comp)
return mod
def create_client(config, taskset_factory, fs_provider):
"""
Create a new client and return it
:param config: dict for configuration
:return: a Client object
"""
zmq_context, t = start_asyncio_and_zmq(config.get("debug_asyncio", False))
return create_arch(config, fs_provider, zmq_context, taskset_factory)
def compare_all_outputs(output1, output2, keys):
"""
Compare both the output according to each key strategy
:param output1: output from the yaml
:param output2: output from the new job
:param keys: list of string, representing the keys for each value in the output
:return: a dict, whose the form is
{k: t,...} where k is a key whose the associated values in the outputs are different and t is a tuple containing
the values from both the outputs
"""
return {
keys[i]: (output1[i], output2[i]) for i in range(len(keys))
if compare_output(output1[i], output2[i], keys[i]) and output1[i] is not None and output2[i] is not None
}
def compare_output(output1, output2, key):
"""
Compare atomic elements from the outputs
:param output1: atomic data structure from the yaml
:param output2: atomic data structure from the new job
:param key: key associated
:return: True if output1 and output2 are different, False otherwise
"""
def result_compare(output1, output2):
"""
Compare for the result key
:param output1: string
:param output2: tuple (string 1, string 2)
:return: True if output1 is different from the string 1 for output2
"""
return output1 != output2[0]
def problems_compare(output1, output2):
"""
Compare for the problems key
:param output1: dict whose each value is a list
:param output2: dict whose each value is a tuple
:return: True if output1 != output2, False otherwise
"""
try:
for k in output1: # same keys in output1 and output2
l = output1[k] # in output1, values are lists of size 2
t = output2[k] # in output2, values are tuples of size 2
if l[0] != t[0] or l[1] != t[1]:
raise BaseException("")
return False
except: # KeyError or BaseException => different outputs
return True
def archive_compare(output1, output2):
"""
Return False because archives are always different
"""
return False
def generic_compare(output1, output2):
"""
Generic compare for keys
:param output1: string or different object (dict, list, ...)
:param output2: string or different object (dict, list, ...)
:return: see code
"""
return output1 != output2
func = {
"result": result_compare,
"problems": problems_compare,
"archive": archive_compare
}
return func.get(key, generic_compare)(output1, output2)
def test_task(yaml_data, taskset, task, client, client_sync):
"""
Test the task by comparing the new outputs with the old ones
:param yaml_data: dict corresponding to the yaml output file for the task
:param task: Task object corresponding to the yaml data
:param client: backend client of type Client
:param client_sync: ClientSync object
:return: dict whose the format is specified in compare_all_outputs function doc
"""
if task.get_environment() not in client.get_available_containers():
time.sleep(1)
if task.get_environment() not in client.get_available_containers():
raise Exception('Environment not available')
new_output = client_sync.new_job(0, taskset, task, yaml_data['input']) # request the client with input from yaml and given task
keys = ["result", "grade", "problems", "tests", "custom", "state", "archive", "stdout", "stderr"]
old_output = [yaml_data.get(x, None) for x in keys]
return compare_all_outputs(old_output, new_output, keys)
def test_web_task(yaml_data, course, task, config, yaml_path):
"""
Test the correctness of the data and task input, i.e. the content does not raise any exception and the rst contents
are compiling
:param yaml_data: dict, content of a task.yaml
:param task: Task object linked to this task.yaml
:param config: dict, configuration values
:param yaml_path: String, path of the file
:return: yaml_data if any error, {} otherwise
"""
try:
web_task = Task(
course.get_id(),
task.get_id(),
yaml_data,
task.get_fs(),
task.get_hook(),
config["default_problem_types"]
) # Test of init a web task with the yaml_data. if the values are incorrect, exception will be raised
web_task.get_context('English').parse(debug=True) # Test of compiling the context
if not Tag.check_format(web_task.get_tags()):
raise BaseException("Tag type not correct")
if web_task.get_evaluate() not in ["best", "last", "student"]:
raise BaseException("Not correct evaluation type")
for sub_prob in yaml_data["problems"]:
p = ParsableText(yaml_data["problems"][sub_prob]["header"]) # parse the rst of the subproblem
p.parse(debug=True)
return {}
except BaseException as e: # Rendering of Task failing
print('{}: WEB TASK ERROR: {}'.format(yaml_path, e), file=sys.stderr)
return yaml_data
def test_submission_yaml(client, taskset_factory, path, output, client_sync):
"""
Test the content of a submission.test yaml by comparing it to the output of the client for this task and the same
input.
:param client: Client object
:param taskset_factory: CourseFactory object
:param path: String, path to the submission.test
:param output: dict, output variable
:param client_sync: ClientSync object, client_sync = ClientSync(client)
:return: None
"""
# print(os.path.join(test_path, yaml_file.name))
with open(path, 'r') as yaml:
yaml_data = load(yaml, Loader=SafeLoader)
res = test_task(yaml_data, taskset_factory.get_taskset(yaml_data["courseid"]), taskset_factory.get_task(yaml_data["courseid"], yaml_data["taskid"]), client, client_sync)
if res != {}:
output[path] = res
def test_task_yaml(path, output, taskset_factory, task_name, course_name, config):
"""
Test the format and content of a task.yaml file and, if incorrect, the data is stored in the output dict
:param path: path to the task.yaml
:param output: output dictionary
:param taskset_factory: CourseFactory object
:param task_name: String, name of the task
:param course_name: String, name of the course
:param config: dict, contains configuration variable
:return: None
"""
with open(path, 'r') as yaml_file:
yaml_data = load(yaml_file, Loader=SafeLoader)
res = test_web_task(yaml_data, taskset_factory.get_taskset(course_name), taskset_factory.get_task(course_name, task_name), config, path)
if res != {}:
output[path] = res
def test_all_files(config, client, taskset_factory):
"""
Test each yaml file contained in the dir_path directory, with dir_path specified in the config var, as specified in
the test_task function
:param config: dict for configuration
:param client: backend client of type Client
:param taskset_factory: CourseFactory object
:return: None
"""
test_output = {}
client_sync = ClientSync(client)
dir_path = config["course_directory"]
tasks = os.scandir(dir_path)
for task in tasks:
if task.is_dir():
task_dir = os.scandir(task.path)
names = [item.name for item in task_dir]
if "task.yaml" in names: # task directory
if "test" in names: # test only if the test sub directory is present
test_path = os.path.join(task.path, "test")
test_files = os.scandir(test_path)
for yaml_file in test_files:
if not yaml_file.name.startswith('.') and yaml_file.is_file(): # Exclude possible failures
test_submission_yaml(client, taskset_factory, yaml_file.path, test_output, client_sync)
task_yaml_path = os.path.join(task.path, "task.yaml")
test_task_yaml(task_yaml_path, test_output, taskset_factory, task.name, os.path.split(dir_path)[1], config)
if test_output != {}: # errors in task.yaml ou submission.test
output = json.dumps(test_output)
if "file" in config:
with open(config["file"], "w+") as json_file:
json_file.write(output)
else:
print("\n{}\n".format(output))
raise BaseException("Testing of course failed")
# exit(-1) # or 255 if exit code is on 8 bits
if __name__ == "__main__":
# Arguments parsing
parser = argparse.ArgumentParser(description="Test the correctness of results contained in yaml files")
parser.add_argument("--logging", '-l', help="enables logging", action="store_true")
parser.add_argument("task_dir", help="Courses directory")
parser.add_argument("course_dir", help="Repository for the course to test")
parser.add_argument("-f", "--file", help="Store in the specified file in a json format")
parser.add_argument("--tdisp", nargs="+", help="Python class import path for additionnal task dispensers")
parser.add_argument("--ptype", nargs="+", help="Python class import path for additionnal subproblem types")
args = parser.parse_args()
if args.logging:
# Init logging
init_logging()
task_dispensers = {
task_dispenser.get_id(): task_dispenser for task_dispenser in [TableOfContents]
}
problem_types = get_default_displayable_problem_types()
if args.tdisp:
for tdisp_loc in args.tdisp:
try:
tdisp = import_class(tdisp_loc)
task_dispensers[tdisp.get_id()] = tdisp
except:
print("Cannot load {}, exiting".format(tdisp_loc), file=sys.stderr)
exit(1)
if args.ptype:
for ptype_loc in args.ptype:
try:
ptype = import_class(ptype_loc)
problem_types[ptype.get_type()] = ptype
except:
print("Cannot load {}, exiting".format(ptype_loc), file=sys.stderr)
exit(1)
config = {
"task_directory": args.task_dir,
"course_directory": args.course_dir,
"backend": "local",
"default_problem_types": problem_types
} # yaml in tests directories in each task directory
if args.file:
config["file"] = args.file
fs_provider = LocalFSProvider(config["task_directory"])
try:
taskset_factory, _ = create_factories(fs_provider, task_dispensers, problem_types) # used for getting tasks
client = create_client(config, taskset_factory, fs_provider)
client.start()
test_all_files(config, client, taskset_factory)
except BaseException as e:
print("\nAn error has occured: {}\n".format(e), file=sys.stderr)
exit(66)
print("\nAll yaml files in {} directory are consistent with the tests\n".format(args.course_dir))