diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..7620cbb --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,30 @@ +name: Release Charts + +on: + push: + branches: + - main + +jobs: + release: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Install Helm + uses: azure/setup-helm@v3 + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index 2868cb6..3d83a83 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -62,7 +62,7 @@ Create the name of the service account to use {{- end }} {{/* -Create the model list +Create the included model list */}} {{- define "ollama.modelList" -}} {{- $modelList := default list}} @@ -74,4 +74,4 @@ Create the model list {{- end}} {{- $modelList = $modelList | uniq}} {{- default (join " " $modelList) -}} -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/templates/deployment.yaml b/templates/deployment.yaml index ba8b506..1d949b8 100644 --- a/templates/deployment.yaml +++ b/templates/deployment.yaml @@ -105,11 +105,25 @@ spec: successThreshold: {{ .Values.readinessProbe.successThreshold }} failureThreshold: {{ .Values.readinessProbe.failureThreshold }} {{- end }} - {{- if or .Values.ollama.models .Values.ollama.defaultModel }} + {{- if or .Values.ollama.models .Values.ollama.defaultModel .Values.ollama.customModels }} lifecycle: postStart: exec: - command: [ "/bin/sh", "-c", "{{- printf "echo %s | xargs -n1 /bin/ollama pull %s" (include "ollama.modelList" .) (ternary "--insecure" "" .Values.ollama.insecure)}}" ] + command: + - "/bin/sh" + - "-c" + - > + apt-get update && apt-get install wget -y + mkdir -p /models + {{- if or .Values.ollama.models .Values.ollama.defaultModel }} + {{ printf "echo %s | xargs -n1 /bin/ollama pull %s" (include "ollama.modelList" .) (ternary "--insecure" "" .Values.ollama.insecure)}} + {{- end }} + {{- range $m := .Values.ollama.customModels }} + {{- $filename := splitList "/" $m.url | last }} + wget --no-verbose --show-progress --progress=dot:mega -O /models/{{ $filename }} {{ $m.url }} + echo "FROM /models/{{ $filename }}" > /models/Modelfile-{{ $filename }} + /bin/ollama create {{ $m.name }} -f Modelfile-{{ $filename }} + {{- end }} {{- end }} volumes: - name: ollama-data @@ -140,4 +154,4 @@ spec: {{- with .Values.tolerations }} {{- toYaml . | nindent 8 }} {{- end }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/values.yaml b/values.yaml index da76367..d860437 100644 --- a/values.yaml +++ b/values.yaml @@ -47,6 +47,16 @@ ollama: # - mistral models: {} + # -- List of custom models to pull and install at container startup + # Similar to the above, the more you have, the longer the container will take to start + # This expects a dictionary with the keys "name" (for the name to import the + # model as inside Ollama) and "uri" (which is the location where the custom + # model's gguf file is located, probably huggingface somewhere) + # customModels: + # - name: ggml-gritlm-7b:q4_0 + # url: https://huggingface.co/dranger003/GritLM-7B-GGUF/resolve/main/ggml-gritlm-7b-q4_k.gguf + customModels: {} + # -- Add insecure flag for pulling at container startup insecure: false