-
Notifications
You must be signed in to change notification settings - Fork 234
/
deploy.yml
70 lines (70 loc) · 1.58 KB
/
deploy.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
---
version: "2.0"
services:
vllm:
image: vllm/vllm-openai:v0.6.2@sha256:730ef3d3c17a217b34cfdbfd99be80b3f459e37ef2fc0c5c43ba70752dad08ae
expose:
- port: 8000
as: 8000
to:
- global: true
command:
- bash
- "-c"
args:
- >-
vllm serve hugging-quants/Meta-Llama-3.1-405B-Instruct-AWQ-INT4
--tensor-parallel-size 8
--speculative-model meta-llama/Meta-Llama-3.1-8B-Instruct
--num-speculative-tokens 5
--use-v2-block-manager
env:
- "HF_TOKEN=" # Hugging Face API token required for Meta-Llama Models
#- "NCCL_DEBUG=INFO" # Uncomment to enable NCCL debugging
params:
storage:
shm:
mount: /dev/shm
data:
mount: /root/.cache
readOnly: false
profiles:
compute:
vllm:
resources:
cpu:
units: 16
memory:
size: 128Gi
storage:
- size: 100Gi
- name: data
size: 1000Gi
attributes:
persistent: true
class: beta3
- name: shm
size: 10Gi
attributes:
class: ram
persistent: false
gpu:
units: 8
attributes:
vendor:
nvidia:
- model: h100
ram: 80Gi
- model: a100
ram: 80Gi
placement:
dcloud:
pricing:
vllm:
denom: uakt
amount: 1000000
deployment:
vllm:
dcloud:
profile: vllm
count: 1