-
Notifications
You must be signed in to change notification settings - Fork 0
/
.gitlab-ci.yml
48 lines (40 loc) · 1.63 KB
/
.gitlab-ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
default:
image: aminedjeghri/python-uv-node:latest
stages:
- evaluate
variables:
VENV_PATH: "$CI_PROJECT_DIR/.venv" # Define virtual environment path
OLLAMA_MODEL_NAME: "phi3:3.8b-mini-4k-instruct-q4_K_M"
OLLAMA_EMBEDDING_MODEL_NAME: "all-minilm:l6-v2"
LLM_PROVIDER: "openai"
OPENAI_DEPLOYMENT_NAME: "phi3:3.8b-mini-4k-instruct-q4_K_M" # or gpt-4o-mini if you use openai
OPENAI_BASE_URL: "http://localhost:11434/v1" # ollama endpoint or https://api.openai.com/v1 if use use openai
OPENAI_API_KEY: "t"
ENABLE_EVALUATION: true # if true, you need to set the following
# LLMAAJ stands for LLM as a judge
LLMAAJ_PROVIDER: "openai" # or azure_openai
#if openai
LLMAAJ_OPENAI_DEPLOYMENT_NAME: "phi3:3.8b-mini-4k-instruct-q4_K_M" # or gpt-4o-mini if you use openai
LLMAAJ_OPENAI_BASE_URL: "http://localhost:11434/v1" # ollama endpoint or https://api.openai.com/v1 if use use openai
LLMAAJ_OPENAI_API_KEY: "t"
LLMAAJ_OPENAI_EMBEDDING_DEPLOYMENT_NAME: "text-embedding-ada-002"
cache:
key:
files: # can't use more than two files in a single cache key.
- pyproject.toml #or if you have a folder /requirements/*
- package.json
paths:
- node_modules/
- $VENV_PATH # Cache the virtual environment
evaluate:
stage: evaluate # Reference the correct stage
script:
- if [ ! -d node_modules ]; then make install-npm-dependencies; fi
- |
if [ ! -d $VENV_PATH ]; then
echo "Virtual environment does not exist. Creating..."
make install-dev
fi
# Run evaluation
- make download-ollama-model
- ollama serve && make test