diff --git a/src/trustyai/metrics/language.py b/src/trustyai/metrics/language.py new file mode 100644 index 0000000..d774402 --- /dev/null +++ b/src/trustyai/metrics/language.py @@ -0,0 +1,68 @@ +""""Group fairness metrics""" +from dataclasses import dataclass + +# pylint: disable = import-error +from typing import List, Optional, Union, Callable + +from org.kie.trustyai.metrics.language.wer import ( + WordErrorRate as _WordErrorRate, + WordErrorRateResult as _WordErrorRateResult, +) + +from opennlp.tools.tokenize import Tokenizer + + +@dataclass +class TokenSequenceAlignmentCounters: + """Token Sequence Alignment Counters""" + + substitutions: int + insertions: int + deletions: int + correct: int + + +@dataclass +class WordErrorRateResult: + """Word Error Rate Result""" + + wer: float + aligned_reference: str + aligned_input: str + alignment_counters: TokenSequenceAlignmentCounters + + @staticmethod + def convert(wer_result: _WordErrorRateResult): + """Converts a Java WordErrorRateResult to a Python WordErrorRateResult""" + wer = wer_result.getWordErrorRate() + aligned_reference = wer_result.getAlignedReferenceString() + aligned_input = wer_result.getAlignedInputString() + alignment_counters = wer_result.getAlignmentCounters() + return WordErrorRateResult( + wer=wer, + aligned_reference=aligned_reference, + aligned_input=aligned_input, + alignment_counters=alignment_counters, + ) + + +def word_error_rate( + reference: str, + hypothesis: str, + tokenizer: Optional[Union[Tokenizer, Callable[[str], List[str]]]] = None, +) -> WordErrorRateResult: + """Calculate Word Error Rate between reference and hypothesis strings""" + if not tokenizer: + _wer = _WordErrorRate() + elif isinstance(tokenizer, Tokenizer): + _wer = _WordErrorRate(tokenizer) + elif callable(tokenizer): + tokenized_reference = tokenizer(reference) + tokenized_hypothesis = tokenizer(hypothesis) + _wer = _WordErrorRate() + return WordErrorRateResult.convert( + _wer.calculate(tokenized_reference, tokenized_hypothesis) + ) + else: + raise ValueError("Unsupported tokenizer") + return WordErrorRateResult.convert(_wer.calculate(reference, hypothesis)) diff --git a/src/trustyai/utils/tokenizers.py b/src/trustyai/utils/tokenizers.py new file mode 100644 index 0000000..a53f16b --- /dev/null +++ b/src/trustyai/utils/tokenizers.py @@ -0,0 +1,8 @@ +""""Default tokenizers for TrustyAI.""" +# pylint: disable = import-error + +from org.apache.commons.text import StringTokenizer as _StringTokenizer +from opennlp.tools.tokenize import SimpleTokenizer as _SimpleTokenizer + +CommonsStringTokenizer = _StringTokenizer +OpenNLPTokenizer = _SimpleTokenizer diff --git a/tests/general/test_metrics_language.py b/tests/general/test_metrics_language.py new file mode 100644 index 0000000..3c0b42b --- /dev/null +++ b/tests/general/test_metrics_language.py @@ -0,0 +1,66 @@ +# pylint: disable=import-error, wrong-import-position, wrong-import-order, duplicate-code, unused-import +"""Language metrics test suite""" + +from common import * +from trustyai.metrics.language import word_error_rate +import math + +tolerance = 1e-4 + +REFERENCES = [ + "This is the test reference, to which I will compare alignment against.", + "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur condimentum velit id velit posuere dictum. Fusce euismod tortor massa, nec euismod sapien laoreet non. Donec vulputate mi velit, eu ultricies nibh iaculis vel. Aenean posuere urna nec sapien consectetur, vitae porttitor sapien finibus. Duis nec libero convallis lectus pharetra blandit ut ac odio. Vivamus nec dui quis sem convallis pulvinar. Maecenas sodales sollicitudin leo a faucibus.", + "The quick red fox jumped over the lazy brown dog"] + +INPUTS = [ + "I'm a hypothesis reference, from which the aligner will compare against.", + "Lorem ipsum sit amet, consectetur adipiscing elit. Curabitur condimentum velit id velit posuere dictum. Fusce blandit euismod tortor massa, nec euismod sapien blandit laoreet non. Donec vulputate mi velit, eu ultricies nibh iaculis vel. Aenean posuere urna nec sapien consectetur, vitae porttitor sapien finibus. Duis nec libero convallis lectus pharetra blandit ut ac odio. Vivamus nec dui quis sem convallis pulvinar. Maecenas sodales sollicitudin leo a faucibus.", + "dog brown lazy the over jumped fox red quick The"] + + +def test_default_tokenizer(): + """Test default tokenizer""" + results = [4 / 7, 1 / 26, 1] + for i, (reference, hypothesis) in enumerate(zip(REFERENCES, INPUTS)): + wer = word_error_rate(reference, hypothesis).wer + assert math.isclose(wer, results[i], rel_tol=tolerance), \ + f"WER for {reference}, {hypothesis} was {wer}, expected ~{results[i]}." + + +def test_commons_stringtokenizer(): + """Test Apache Commons StringTokenizer""" + from trustyai.utils.tokenizers import CommonsStringTokenizer + results = [8 / 12., 3 / 66., 1.0] + + def tokenizer(text: str) -> List[str]: + return CommonsStringTokenizer(text).getTokenList() + + for i, (reference, hypothesis) in enumerate(zip(REFERENCES, INPUTS)): + wer = word_error_rate(reference, hypothesis, tokenizer=tokenizer).wer + assert math.isclose(wer, results[i], rel_tol=tolerance), \ + f"WER for {reference}, {hypothesis} was {wer}, expected ~{results[i]}." + + +def test_opennlp_tokenizer(): + """Test Apache Commons StringTokenizer""" + from trustyai.utils.tokenizers import OpenNLPTokenizer + results = [9 / 14., 3 / 78., 1.0] + tokenizer = OpenNLPTokenizer() + for i, (reference, hypothesis) in enumerate(zip(REFERENCES, INPUTS)): + wer = word_error_rate(reference, hypothesis, tokenizer=tokenizer).wer + assert math.isclose(wer, results[i], rel_tol=tolerance), \ + f"WER for {reference}, {hypothesis} was {wer}, expected ~{results[i]}." + + +def test_python_tokenizer(): + """Test pure Python whitespace tokenizer""" + + results = [3 / 4., 3 / 66., 1.0] + + def tokenizer(text: str) -> List[str]: + return text.split(" ") + + for i, (reference, hypothesis) in enumerate(zip(REFERENCES, INPUTS)): + wer = word_error_rate(reference, hypothesis, tokenizer=tokenizer).wer + assert math.isclose(wer, results[i], rel_tol=tolerance), \ + f"WER for {reference}, {hypothesis} was {wer}, expected ~{results[i]}."