-
Notifications
You must be signed in to change notification settings - Fork 30
/
scalellm.yml
53 lines (50 loc) · 1.55 KB
/
scalellm.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
version: '2.2'
services:
scalellm:
image: vectorchai/scalellm:latest
hostname: scalellm
container_name: scalellm
ports:
- 8888:8888
- 9999:9999
environment:
- HF_MODEL_ID=${HF_MODEL_ID:-TheBloke/Llama-2-7B-chat-AWQ}
- DEVICE=${DEVICE:-auto}
- HF_MODEL_REVISION=${HF_MODEL_REVISION}
- HF_MODEL_ALLOW_PATTERN=${HF_MODEL_ALLOW_PATTERN}
- HF_MODEL_CACHE_DIR=${HF_MODEL_CACHE_DIR}
- HUGGING_FACE_HUB_TOKEN=${HUGGING_FACE_HUB_TOKEN}
volumes:
- $HOME/.cache/huggingface/hub:/models
shm_size: 1g
command: --logtostderr
# turn on GPU access
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
scalellm-gateway:
image: vectorchai/scalellm-gateway:latest
hostname: scalellm-gateway
container_name: scalellm-gateway
ports:
- 8080:8080
command: --grpc-server=scalellm:8888 --logtostderr
depends_on:
- scalellm
chatbot-ui:
image: vectorchai/chatbot-ui:latest
hostname: chatbot-ui
container_name: chatbot-ui
ports:
- 3000:3000
environment:
- OPENAI_API_HOST=http://scalellm-gateway:8080
- OPENAI_API_KEY=YOUR_API_KEY
- DEFAULT_SYSTEM_PROMPT=You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
depends_on:
- scalellm-gateway
- scalellm