Skip to content

Commit

Permalink
Merge pull request #285 from whylabs/toxic-onnx-switch
Browse files Browse the repository at this point in the history
Toxic onnx switch
  • Loading branch information
naddeoa authored Apr 7, 2024
2 parents 465a609 + d63f218 commit 4e85e39
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 8 deletions.
2 changes: 1 addition & 1 deletion .bumpversion.cfg
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[bumpversion]
current_version = 0.0.28.dev1
current_version = 0.0.28.dev2
tag = False
parse = (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.(?P<release>[a-z]+)(?P<build>\d+))?
serialize =
Expand Down
22 changes: 16 additions & 6 deletions langkit/metrics/library.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,14 +113,19 @@ def __call__(self) -> MetricCreator:
return self.toxicity_score()

@staticmethod
def toxicity_score() -> MetricCreator:
def toxicity_score(onnx: bool = True) -> MetricCreator:
"""
Analyze the input for toxicity. The output of this metric ranges from 0 to 1, where 0 indicates
non-toxic and 1 indicates toxic.
"""
from langkit.metrics.toxicity_onnx import prompt_toxicity_metric
if onnx:
from langkit.metrics.toxicity_onnx import prompt_toxicity_metric

return prompt_toxicity_metric
else:
from langkit.metrics.toxicity import prompt_toxicity_metric

return prompt_toxicity_metric
return prompt_toxicity_metric

class stats:
def __call__(self) -> MetricCreator:
Expand Down Expand Up @@ -331,14 +336,19 @@ def __call__(self) -> MetricCreator:
return self.toxicity_score()

@staticmethod
def toxicity_score() -> MetricCreator:
def toxicity_score(onnx: bool = True) -> MetricCreator:
"""
Analyze the toxicity of the response. The output of this metric ranges from 0 to 1, where 0
indicates a non-toxic response and 1 indicates a toxic response.
"""
from langkit.metrics.toxicity_onnx import response_toxicity_metric
if onnx:
from langkit.metrics.toxicity_onnx import response_toxicity_metric

return response_toxicity_metric
else:
from langkit.metrics.toxicity import response_toxicity_metric

return response_toxicity_metric
return response_toxicity_metric

class stats:
def __call__(self) -> MetricCreator:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "langkit"
version = "0.0.28.dev1"
version = "0.0.28.dev2"
description = "A language toolkit for monitoring LLM interactions"
authors = ["WhyLabs.ai <[email protected]>"]
homepage = "https://docs.whylabs.ai/docs/large-language-model-monitoring"
Expand Down

0 comments on commit 4e85e39

Please sign in to comment.