Skip to content

Commit

Permalink
Add support for disabled readLineDirectives (#47)
Browse files Browse the repository at this point in the history
  • Loading branch information
vmordan authored Oct 20, 2023
1 parent 1f15c7e commit a6d3637
Show file tree
Hide file tree
Showing 16 changed files with 256 additions and 166 deletions.
3 changes: 2 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,6 @@ disable=
duplicate-code,
too-few-public-methods,
unnecessary-lambda,
no-name-in-module
no-name-in-module,
line-too-long

3 changes: 2 additions & 1 deletion configs/bridge/verifier_options.json
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
{"-setprop": "counterexample.export.exportCounterexampleCoverage=true"},
{"-setprop": "counterexample.export.prefixAdditionalCoverageFile=Counterexample.%d.additionalCoverage.info"},
{"-setprop": "additionalCoverage.file=additionalCoverage.info"},
{"-setprop": "parser.readLineDirectives=true"},
{"-setprop": "parser.readLineDirectives=false"},
{"-setprop": "coverage.file=coverage.info"},
{"-setprop": "cpa.arg.proofWitness=witness.correctness.graphml"},
{"-setprop": "cpa.arg.export=true"},
{"-setprop": "cpa.arg.compressWitness=false"},
Expand Down
10 changes: 1 addition & 9 deletions docs/klever_bridge.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,7 @@ sudo ./scripts/runner.py -c runner.json -d <path to Linux kernel>

## Klever config

In order to visualize error traces CPAchecker config must include the following:
```json
{"-setprop": "parser.readLineDirectives=true"}
```
Note, Klever will not visualise traces with such option.

Also Klever job configuration should specify `Keep intermediate files inside the working directory of Klever Core` inside
Klever job configuration should specify `Keep intermediate files inside the working directory of Klever Core` inside
`Other settings` section in order to visualize generated files in the error trace.

If you need to visualise correctness witnesses in CV, the following options are required:
Expand All @@ -79,5 +73,3 @@ If you need to visualise correctness witnesses in CV, the following options are
{"-setprop": "cpa.arg.export=true"},
{"-setprop": "cpa.arg.compressWitness=false"}
```

If you need to add coverage, set flag `Collect total code coverage` in the Klever job config.
2 changes: 1 addition & 1 deletion properties/properties.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"-skipRecursion": [],
"-stack": ["10m"],
"-setprop": [
"parser.readLineDirectives=true",
"parser.readLineDirectives=false",
"output.disable=true",
"cpa.predicate.abortOnLargeArrays=false",
"cpa.arg.proofWitness=witness.correctness.graphml",
Expand Down
6 changes: 4 additions & 2 deletions scripts/bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import argparse

from components.benchmark_launcher import TAG_TASKS_DIR, TAG_OUTPUT_DIR
from klever_bridge.launcher import KleverLauncher, TAG_JOB_ID
from klever_bridge.launcher import KleverLauncher, TAG_JOB_ID, KERNEL_DIR

if __name__ == '__main__':
# Support the following modes:
Expand All @@ -37,12 +37,14 @@
parser.add_argument("-o", "--output", help="benchmark output directory", default=None)
parser.add_argument("-t", "--tasks", help="tasks directory", default=None)
parser.add_argument("-j", "--job", help="job id", default=None)
parser.add_argument("-k", "--kernel-dir", dest="kernel_dir", help="directory with kernel dir")
options = parser.parse_args()

additional_config = {
TAG_OUTPUT_DIR: options.output,
TAG_TASKS_DIR: options.tasks,
TAG_JOB_ID: options.job
TAG_JOB_ID: options.job,
KERNEL_DIR: options.kernel_dir
}

launcher = KleverLauncher(options.config, additional_config, options.launch)
Expand Down
15 changes: 8 additions & 7 deletions scripts/components/coverage_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

TAG_COVERAGE_MODE = "mode"
TAG_COVERAGE_PERCENT_MODE = "percent mode"
TAG_FULL_COVERAGE_MODE = "full mode"
TAG_FULL_COVERAGE_MODE = "src cov mode"

COVERAGE_MODE_NONE = "none" # Do not compute coverage.
COVERAGE_MODE_PERCENT = "percent" # Present only percentage of coverage by lines/functions.
Expand Down Expand Up @@ -210,14 +210,13 @@ def __init__(self, launcher_component: Component = None, basic_config=None, inst
self.mode = self.component_config.get(TAG_COVERAGE_MODE, DEFAULT_COVERAGE_MODE)
self.percent_mode = self.component_config.get(TAG_COVERAGE_PERCENT_MODE,
COVERAGE_PERCENT_LOG)
# TODO: check other modes - do we need this argument?
self.full_mode = self.component_config.get(TAG_FULL_COVERAGE_MODE, "full")
self.src_cov_mode = self.component_config.get(TAG_FULL_COVERAGE_MODE, "full")
self.internal_logger = logging.getLogger(name=COMPONENT_COVERAGE)
self.internal_logger.setLevel(self.logger.level)
self.default_source_file = default_source_file

def compute_coverage(self, source_dirs: set, launch_directory: str,
queue: multiprocessing.Queue = None):
queue: multiprocessing.Queue = None, work_dir=None):
"""
Main method for coverage processing.
"""
Expand Down Expand Up @@ -255,7 +254,7 @@ def compute_coverage(self, source_dirs: set, launch_directory: str,
else:
self.logger.warning(f"Unknown coverage mode: {self.percent_mode}")
if self.mode == COVERAGE_MODE_FULL:
self.__full_coverage(source_dirs, os.path.abspath(file))
self.__full_coverage(source_dirs, os.path.abspath(file), work_dir)
break
os.chdir(self.launcher_dir)

Expand All @@ -265,15 +264,17 @@ def compute_coverage(self, source_dirs: set, launch_directory: str,
data[TAG_COVERAGE_FUNCS] = cov_funcs
queue.put(data)

def __full_coverage(self, source_dirs: set, coverage_file: str):
def __full_coverage(self, source_dirs: set, coverage_file: str, work_dir: str):
dummy_dir = ""
for src_dir in source_dirs:
if os.path.exists(os.path.join(src_dir, CLADE_WORK_DIR)):
dummy_dir = os.path.join(src_dir, CLADE_WORK_DIR)
break

if not work_dir:
work_dir = self.launcher_dir
lcov = LCOV(self.internal_logger, coverage_file, dummy_dir, source_dirs, [],
self.launcher_dir, self.full_mode,
work_dir, self.src_cov_mode,
ignore_files={os.path.join(DIRECTORY_WITH_GENERATED_FILES,
COMMON_HEADER_FOR_RULES)},
default_file=self.default_source_file)
Expand Down
12 changes: 7 additions & 5 deletions scripts/components/exporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def export(self, report_launches: str, report_resources: str, report_components:
proofs = {}

# Process several error traces in parallel.
source_files = set()
source_files = {}
with open(report_launches, encoding='utf8', errors='ignore') as file_obj, \
open(report_resources, encoding='utf8', errors='ignore') as res_obj:
resources_data = res_obj.readlines()[1:]
Expand Down Expand Up @@ -408,7 +408,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
with zipfile.ZipFile(witness, 'r') as arc_arch:
src = json.loads(arc_arch.read(ERROR_TRACE_SOURCES).
decode('utf8', errors='ignore'))
source_files.update(src)
for src_file, src_file_res in src:
source_files[src_file] = src_file_res
except Exception as exception:
self.logger.warning(f"Cannot process sources: {exception}\n",
exc_info=True)
Expand Down Expand Up @@ -439,7 +440,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
with zipfile.ZipFile(witness, 'r') as arc_arch:
src = json.loads(arc_arch.read(ERROR_TRACE_SOURCES).
decode('utf8', errors='ignore'))
source_files.update(src)
for src_file, src_file_res in src:
source_files[src_file] = src_file_res
except Exception as exception:
self.logger.warning(f"Cannot process sources: "
f"{exception}\n", exc_info=True)
Expand Down Expand Up @@ -577,8 +579,8 @@ def export(self, report_launches: str, report_resources: str, report_components:
reports.append(root_element)
with zipfile.ZipFile(DEFAULT_SOURCES_ARCH, mode='w',
compression=zipfile.ZIP_DEFLATED) as arch_obj:
for src_file in source_files:
arch_obj.write(src_file)
for src_file, src_file_res in source_files.items():
arch_obj.write(src_file, arcname=src_file_res)
final_zip.write(DEFAULT_SOURCES_ARCH)
os.remove(DEFAULT_SOURCES_ARCH)

Expand Down
4 changes: 2 additions & 2 deletions scripts/components/launcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,12 +200,12 @@ def _copy_result_files(self, files: list, group_directory: str) -> str:
return launch_dir

def _process_coverage(self, result, launch_directory, source_dirs: list,
default_source_file=None):
default_source_file=None, work_dir=None):
cov = Coverage(self, default_source_file=default_source_file)
cov_queue = multiprocessing.Queue()
cov_process = multiprocessing.Process(target=cov.compute_coverage,
name=f"coverage_{result.get_name()}",
args=(source_dirs, launch_directory, cov_queue))
args=(source_dirs, launch_directory, cov_queue, work_dir))
cov_process.start()
cov_process.join() # Wait since we are already in parallel threads for each launch.
if not cov_process.exitcode:
Expand Down
28 changes: 18 additions & 10 deletions scripts/components/mea.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,19 +58,19 @@

class MEA(Component):
"""
Multiple Error Analysis (MEA) is aimed at processing several error traces, which violates
Multiple Error Analysis (MEA) is aimed at processing several error traces, which violates
the same property. Error traces are called equivalent, if they correspond to the same error.
Error trace equivalence for two traces et1 and et2 is determined in the following way:
et1 = et2 <=> comparison(conversion(parser(et1)), comparison(parser(et2))),
where parser function parses the given file with error trace and returns its internal
where parser function parses the given file with error trace and returns its internal
representation, conversion function transforms its internal representation (for example,
by removing some elements) and comparison function compares its internal representation.
Definitions:
- parsed error trace - result of parser(et), et - file name with error trace (xml);
- converted error trace - result of conversion(pet), pet - parsed error trace.
"""
def __init__(self, general_config: dict, error_traces: list, install_dir: str, rule: str = "",
result_dir: str = "", is_standalone=False):
result_dir: str = "", is_standalone=False, remove_prefixes=None):
super().__init__(COMPONENT_MEA, general_config)
self.install_dir = install_dir
if result_dir:
Expand All @@ -97,6 +97,7 @@ def __init__(self, general_config: dict, error_traces: list, install_dir: str, r
self.unzip = self.__get_option_for_rule(TAG_UNZIP, True)
self.dry_run = self.__get_option_for_rule(TAG_DRY_RUN, False)
self.source_dir = self.__get_option_for_rule(TAG_SOURCE_DIR, None)
self.remove_prefixes = remove_prefixes

# Cache of filtered converted error traces.
self.__cache = {}
Expand Down Expand Up @@ -335,9 +336,9 @@ def __print_trace_archive(self, error_trace_file_name: str, witness_type=WITNESS
def __process_parsed_trace(parsed_error_trace: dict):
# Normalize source paths.
src_files = []
for src_file in parsed_error_trace['files']:
src_file = os.path.normpath(src_file)
src_files.append(src_file)
for src_file, src_file_res in parsed_error_trace['files']:
tmp_res = (os.path.normpath(src_file), src_file_res)
src_files.append(tmp_res)
parsed_error_trace['files'] = src_files

def __parse_trace(self, error_trace_file: str, supported_types: set) -> dict:
Expand All @@ -346,7 +347,7 @@ def __parse_trace(self, error_trace_file: str, supported_types: set) -> dict:
"Witness processor", logging.WARNING if self.debug else logging.ERROR
)
try:
json_error_trace = import_error_trace(logger, error_trace_file, self.source_dir)
json_error_trace = import_error_trace(logger, error_trace_file, self.source_dir, self.remove_prefixes)
if self.dry_run:
warnings = json_error_trace.get('warnings', [])
if warnings:
Expand All @@ -373,13 +374,20 @@ def __print_parsed_error_trace(self, parsed_error_trace: dict, converted_error_t
json_trace_name, source_files, converted_traces_files = \
self.__get_aux_file_names(error_trace_file)

with open(json_trace_name, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace, file_obj, ensure_ascii=False, sort_keys=True, indent="\t")

with open(source_files, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace['files'], file_obj, ensure_ascii=False, sort_keys=True,
indent="\t")

files = []
for file, file_res in parsed_error_trace['files']:
if self.is_standalone:
files.append(file)
else:
files.append(file_res)
parsed_error_trace['files'] = files
with open(json_trace_name, 'w', encoding='utf8') as file_obj:
json.dump(parsed_error_trace, file_obj, ensure_ascii=False, sort_keys=True, indent="\t")

converted_traces = {}
if parsed_error_trace.get('type') == WITNESS_VIOLATION:
for conversion_function in EXPORTING_CONVERTED_FUNCTIONS:
Expand Down
Loading

0 comments on commit a6d3637

Please sign in to comment.