From 3653c5b954ea020e31a485eeb60adcd68decdd24 Mon Sep 17 00:00:00 2001 From: Kirushikesh Date: Sat, 24 Feb 2024 09:25:10 -0500 Subject: [PATCH] Removed the temperature parameter --- src/ragas/llms/base.py | 17 ----------------- tests/conftest.py | 6 +++--- tests/unit/llms/test_llm.py | 6 +++--- 3 files changed, 6 insertions(+), 23 deletions(-) diff --git a/src/ragas/llms/base.py b/src/ragas/llms/base.py index 5979d336b..cfcca3ae2 100644 --- a/src/ragas/llms/base.py +++ b/src/ragas/llms/base.py @@ -49,16 +49,11 @@ class BaseRagasLLM(ABC): def set_run_config(self, run_config: RunConfig): self.run_config = run_config - def get_temperature(self, n: int) -> float: - """Return the temperature to use for completion based on n.""" - return 0.3 if n > 1 else 1e-8 - @abstractmethod def generate_text( self, prompt: PromptValue, n: int = 1, - temperature: float = 1e-8, stop: t.Optional[t.List[str]] = None, callbacks: Callbacks = [], ) -> LLMResult: @@ -69,7 +64,6 @@ async def agenerate_text( self, prompt: PromptValue, n: int = 1, - temperature: float = 1e-8, stop: t.Optional[t.List[str]] = None, callbacks: Callbacks = [], ) -> LLMResult: @@ -79,7 +73,6 @@ async def generate( self, prompt: PromptValue, n: int = 1, - temperature: float = 1e-8, stop: t.Optional[t.List[str]] = None, callbacks: Callbacks = [], is_async: bool = True, @@ -92,7 +85,6 @@ async def generate( return await agenerate_text_with_retry( prompt=prompt, n=n, - temperature=temperature, stop=stop, callbacks=callbacks, ) @@ -103,7 +95,6 @@ async def generate( generate_text_with_retry, prompt=prompt, n=n, - temperature=temperature, stop=stop, callbacks=callbacks, ) @@ -130,23 +121,19 @@ def generate_text( self, prompt: PromptValue, n: int = 1, - temperature: float = 1e-8, stop: t.Optional[t.List[str]] = None, callbacks: t.Optional[Callbacks] = None, ) -> LLMResult: - temperature = self.get_temperature(n=n) if is_multiple_completion_supported(self.langchain_llm): return self.langchain_llm.generate_prompt( prompts=[prompt], n=n, - temperature=temperature, stop=stop, callbacks=callbacks, ) else: result = self.langchain_llm.generate_prompt( prompts=[prompt] * n, - temperature=temperature, stop=stop, callbacks=callbacks, ) @@ -160,23 +147,19 @@ async def agenerate_text( self, prompt: PromptValue, n: int = 1, - temperature: float = 1e-8, stop: t.Optional[t.List[str]] = None, callbacks: t.Optional[Callbacks] = None, ) -> LLMResult: - temperature = self.get_temperature(n=n) if is_multiple_completion_supported(self.langchain_llm): return await self.langchain_llm.agenerate_prompt( prompts=[prompt], n=n, - temperature=temperature, stop=stop, callbacks=callbacks, ) else: result = await self.langchain_llm.agenerate_prompt( prompts=[prompt] * n, - temperature=temperature, stop=stop, callbacks=callbacks, ) diff --git a/tests/conftest.py b/tests/conftest.py index 03697132b..f9259ed78 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -16,15 +16,15 @@ def llm(self): return self def generate_text( - self, prompt: PromptValue, n=1, temperature=1e-8, stop=None, callbacks=[] + self, prompt: PromptValue, n=1, stop=None, callbacks=[] ): generations = [[Generation(text=prompt.prompt_str)] * n] return LLMResult(generations=generations) async def agenerate_text( - self, prompt: PromptValue, n=1, temperature=1e-8, stop=None, callbacks=[] + self, prompt: PromptValue, n=1, stop=None, callbacks=[] ): - return self.generate_text(prompt, n, temperature, stop, callbacks) + return self.generate_text(prompt, n, stop, callbacks) @pytest.fixture diff --git a/tests/unit/llms/test_llm.py b/tests/unit/llms/test_llm.py index ed50a8d31..3a4334cce 100644 --- a/tests/unit/llms/test_llm.py +++ b/tests/unit/llms/test_llm.py @@ -15,12 +15,12 @@ def llm(self): return self def generate_text( - self, prompt: PromptValue, n=1, temperature=1e-8, stop=None, callbacks=[] + self, prompt: PromptValue, n=1, stop=None, callbacks=[] ): generations = [[Generation(text=prompt.prompt_str)] * n] return LLMResult(generations=generations) async def agenerate_text( - self, prompt: PromptValue, n=1, temperature=1e-8, stop=None, callbacks=[] + self, prompt: PromptValue, n=1, stop=None, callbacks=[] ): - return self.generate_text(prompt, n, temperature, stop, callbacks) + return self.generate_text(prompt, n, stop, callbacks)