Skip to content

Commit

Permalink
Fix test failures (#244)
Browse files Browse the repository at this point in the history
* Fix test failures

* Fix llm connection error

* Fix asyncio error

* Fix eventloop error

* Improve api call stability
  • Loading branch information
moria97 authored Oct 13, 2024
1 parent cd4d83b commit dfd1ff7
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 13 deletions.
3 changes: 3 additions & 0 deletions src/pai_rag/integrations/llms/pai/llm_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ def create_llm(llm_config: PaiBaseLlmConfig):
system_prompt=llm_config.system_prompt,
api_key=llm_config.api_key,
max_tokens=llm_config.max_tokens,
reuse_client=False,
)
elif isinstance(llm_config, DashScopeLlmConfig):
logger.info(
Expand All @@ -50,6 +51,7 @@ def create_llm(llm_config: PaiBaseLlmConfig):
is_chat_model=True,
api_key=llm_config.api_key or os.environ.get("DASHSCOPE_API_KEY"),
max_tokens=llm_config.max_tokens,
reuse_client=False,
)
elif isinstance(llm_config, PaiEasLlmConfig):
logger.info(
Expand All @@ -67,6 +69,7 @@ def create_llm(llm_config: PaiBaseLlmConfig):
system_prompt=llm_config.system_prompt,
api_key=llm_config.token,
max_tokens=llm_config.max_tokens,
reuse_client=False,
)
else:
raise ValueError(f"Unknown LLM source: '{llm_config}'")
Expand Down
2 changes: 1 addition & 1 deletion tests/modules/agent/test_fc_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from pai_rag.integrations.llms.pai.pai_llm import PaiLlm
from pai_rag.integrations.llms.pai.llm_config import DashScopeLlmConfig

fc_llm_config = DashScopeLlmConfig(model_name="qwen2-7b-instruct")
fc_llm_config = DashScopeLlmConfig(model_name="qwen-max")
fc_llm = PaiLlm(fc_llm_config)


Expand Down
2 changes: 1 addition & 1 deletion tests/modules/intentdetection/test_intent_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from pai_rag.integrations.llms.pai.pai_llm import PaiLlm
from pai_rag.integrations.llms.pai.llm_config import DashScopeLlmConfig

fc_llm_config = DashScopeLlmConfig(model_name="qwen2.5-7b-instruct")
fc_llm_config = DashScopeLlmConfig(model_name="qwen-max")
fc_llm = PaiLlm(fc_llm_config)

intents = {
Expand Down
2 changes: 1 addition & 1 deletion tests/modules/llm/test_function_calling_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from pai_rag.integrations.llms.pai.pai_llm import PaiLlm
from pai_rag.integrations.llms.pai.llm_config import DashScopeLlmConfig

fc_llm_config = DashScopeLlmConfig(model_name="qwen2-7b-instruct")
fc_llm_config = DashScopeLlmConfig(model_name="qwen-max")
fc_llm = PaiLlm(fc_llm_config)


Expand Down
24 changes: 14 additions & 10 deletions tests/modules/llm/test_llm.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,29 @@
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from pai_rag.integrations.llms.pai.pai_llm import PaiLlm
from pai_rag.integrations.llms.pai.llm_config import DashScopeLlmConfig
import pytest

llm_config = DashScopeLlmConfig(model_name="qwen-turbo")
llm = PaiLlm(llm_config)

@pytest.fixture(scope="module", autouse=True)
def llm():
llm_config = DashScopeLlmConfig(model_name="qwen-turbo")
return PaiLlm(llm_config)

def test_dashscope_llm_complete():

def test_dashscope_llm_complete(llm):
response = llm.complete("What is the result of 15+22?")
assert "37" in response.text


def test_dashscope_llm_stream_complete():
def test_dashscope_llm_stream_complete(llm):
response = ""
stream_response = llm.stream_complete("What is the result of 15+23?")
for token in stream_response:
response += token.delta
assert "38" in response


def test_dashscope_llm_chat():
def test_dashscope_llm_chat(llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="What is the result of 15+24?"),
Expand All @@ -28,7 +32,7 @@ def test_dashscope_llm_chat():
assert "39" in response.message.content


def test_dashscope_llm_stream_chat():
def test_dashscope_llm_stream_chat(llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="What is the result of 15+25?"),
Expand All @@ -40,20 +44,20 @@ def test_dashscope_llm_stream_chat():
assert "40" in response


async def test_dashscope_llm_acomplete():
async def test_dashscope_llm_acomplete(llm):
response = await llm.acomplete("What is the result of 15+22?")
assert "37" in response.text


async def test_dashscope_llm_astream_complete():
async def test_dashscope_llm_astream_complete(llm):
response = ""
stream_response = await llm.astream_complete("What is the result of 16+22?")
async for token in stream_response:
response += token.delta
assert "38" in response


async def test_dashscope_llm_achat():
async def test_dashscope_llm_achat(llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="What is the result of 17+22?"),
Expand All @@ -62,7 +66,7 @@ async def test_dashscope_llm_achat():
assert "39" in response.message.content


async def test_dashscope_llm_astream_chat():
async def test_dashscope_llm_astream_chat(llm):
messages = [
ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful assistant."),
ChatMessage(role=MessageRole.USER, content="What is the result of 18+22?"),
Expand Down

0 comments on commit dfd1ff7

Please sign in to comment.