Skip to content

Commit

Permalink
Add parameterization to test_llm_server
Browse files Browse the repository at this point in the history
  • Loading branch information
stbaione committed Oct 30, 2024
1 parent 498ae81 commit 436ab39
Showing 1 changed file with 14 additions and 8 deletions.
22 changes: 14 additions & 8 deletions tests/apps/llm/cpu_llm_server_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,9 @@ def wait_for_server(url, timeout=10):


@pytest.fixture(scope="module")
def llm_server(model_test_dir, available_port):
def llm_server(request, model_test_dir, available_port):
# Start the server
model_file = request.param["model_file"]
server_process = subprocess.Popen(
[
"python",
Expand All @@ -147,7 +148,7 @@ def llm_server(model_test_dir, available_port):
f"--tokenizer={model_test_dir / 'tokenizer.json'}",
f"--model_config={model_test_dir / 'edited_config.json'}",
f"--vmfb={model_test_dir / 'model.vmfb'}",
f"--parameters={model_test_dir / 'open-llama-3b-v2-f16.gguf'}",
f"--parameters={model_test_dir / model_file}",
f"--device={settings['device']}",
]
)
Expand Down Expand Up @@ -190,13 +191,18 @@ def do_generate(prompt, port):


@pytest.mark.parametrize(
"model_test_dir",
"model_test_dir,llm_server",
[
{
"repo_id": "SlyEcho/open_llama_3b_v2_gguf",
"model_file": "open-llama-3b-v2-f16.gguf",
"tokenizer_id": "openlm-research/open_llama_3b_v2",
}
(
{
"repo_id": "SlyEcho/open_llama_3b_v2_gguf",
"model_file": "open-llama-3b-v2-f16.gguf",
"tokenizer_id": "openlm-research/open_llama_3b_v2",
},
{
"model_file": "open-llama-3b-v2-f16.gguf",
},
)
],
indirect=True,
)
Expand Down

0 comments on commit 436ab39

Please sign in to comment.