diff --git a/src/lm_buddy/constants.py b/src/lm_buddy/constants.py index 6239ecb..2ae0d5e 100644 --- a/src/lm_buddy/constants.py +++ b/src/lm_buddy/constants.py @@ -5,3 +5,8 @@ "LM_BUDDY_HOME", str(Path.home() / ".lm_buddy"), ) + +LM_BUDDY_RESULTS_PATH: str = os.getenv( + "LM_BUDDY_RESULTS", + f"{LM_BUDDY_HOME_PATH}/results", +) diff --git a/src/lm_buddy/jobs/evaluation/prometheus.py b/src/lm_buddy/jobs/evaluation/prometheus.py index 7a0fb56..f916b76 100644 --- a/src/lm_buddy/jobs/evaluation/prometheus.py +++ b/src/lm_buddy/jobs/evaluation/prometheus.py @@ -17,7 +17,7 @@ from lm_buddy.configs.huggingface import AutoTokenizerConfig from lm_buddy.configs.jobs.prometheus import PrometheusJobConfig -from lm_buddy.constants import LM_BUDDY_HOME_PATH +from lm_buddy.constants import LM_BUDDY_RESULTS_PATH from lm_buddy.jobs.asset_loader import HuggingFaceAssetLoader from lm_buddy.jobs.common import EvaluationResult from lm_buddy.preprocessing import format_dataset_with_prompt @@ -161,8 +161,8 @@ def data_generator(): result_dataset = Dataset.from_generator(data_generator) # Save dataset to disk - storage_path = config.evaluation.storage_path or LM_BUDDY_HOME_PATH - result_dataset_path = Path(storage_path) / "datasets" / config.name / "prometheus" + storage_path = config.evaluation.storage_path or LM_BUDDY_RESULTS_PATH + result_dataset_path = Path(storage_path) / config.name / "prometheus" result_dataset.save_to_disk(result_dataset_path) return result_dataset_path diff --git a/src/lm_buddy/jobs/evaluation/ragas.py b/src/lm_buddy/jobs/evaluation/ragas.py index d773460..9da7027 100644 --- a/src/lm_buddy/jobs/evaluation/ragas.py +++ b/src/lm_buddy/jobs/evaluation/ragas.py @@ -8,7 +8,7 @@ from ragas.metrics import answer_relevancy, context_precision, context_recall, faithfulness from lm_buddy.configs.jobs.ragas import RagasJobConfig -from lm_buddy.constants import LM_BUDDY_HOME_PATH +from lm_buddy.constants import LM_BUDDY_RESULTS_PATH from lm_buddy.jobs.asset_loader import HuggingFaceAssetLoader from lm_buddy.jobs.common import EvaluationResult from lm_buddy.preprocessing import format_dataset_with_prompt @@ -60,8 +60,8 @@ def run_eval(config: RagasJobConfig) -> Path: result_dataset = Dataset.from_pandas(result.to_pandas()) # Save dataset to disk - storage_path = config.evaluation.storage_path or LM_BUDDY_HOME_PATH - result_dataset_path = Path(storage_path) / "datasets" / config.name / "ragas" + storage_path = config.evaluation.storage_path or LM_BUDDY_RESULTS_PATH + result_dataset_path = Path(storage_path) / config.name / "ragas" result_dataset.save_to_disk(result_dataset_path) return result_dataset_path