From d991f6000f86293d99c390d0265e002fe0ac822b Mon Sep 17 00:00:00 2001 From: ranxia Date: Wed, 18 Dec 2024 18:02:58 +0800 Subject: [PATCH] fix llm max token --- src/pai_rag/integrations/llms/pai/llm_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pai_rag/integrations/llms/pai/llm_config.py b/src/pai_rag/integrations/llms/pai/llm_config.py index 314e7573..4017bf7a 100644 --- a/src/pai_rag/integrations/llms/pai/llm_config.py +++ b/src/pai_rag/integrations/llms/pai/llm_config.py @@ -4,7 +4,7 @@ from enum import Enum from llama_index.core.constants import DEFAULT_TEMPERATURE -DEFAULT_MAX_TOKENS = 4000 +DEFAULT_MAX_TOKENS = 2000 class DashScopeGenerationModels: