-
Notifications
You must be signed in to change notification settings - Fork 448
215 lines (209 loc) · 7.58 KB
/
test.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
name: test
on:
pull_request:
paths:
- ".github/scripts/test_triton_server.py"
- ".github/workflows/test.yml"
- "cmake/**"
- "src/**"
- "3rdparty/**"
- "lmdeploy/**"
- "requirements/**"
- "requirements.txt"
- "CMakeLists.txt"
- "setup.py"
push:
branches:
- main
paths:
- "lmdeploy/version.py"
tags:
- "v*.*.*"
workflow_dispatch:
inputs:
markers:
required: false
description: 'Tested markers. eg: "-m internlm_chat_7b"'
type: string
default: ''
env:
HOST_PIP_CACHE_DIR: /nvme/github-actions/pip-cache
HOST_LOCALTIME: /usr/share/zoneinfo/Asia/Shanghai
jobs:
test_functions:
runs-on: [self-hosted, linux-a100]
timeout-minutes: 4320 # 72hours
environment: 'prod'
env:
REPORT_DIR: /nvme/qa_test_models/test-reports
container:
image: nvcr.io/nvidia/tritonserver:22.12-py3
options: "--gpus=all --ipc=host --user root -e PIP_CACHE_DIR=/root/.cache/pip"
volumes:
- /nvme/github-actions/pip-cache:/root/.cache/pip
- /nvme/github-actions/packages:/root/packages
- /nvme/qa_test_models:/nvme/qa_test_models
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime:ro
steps:
- name: Setup systems
run: |
rm /etc/apt/sources.list.d/cuda*.list
apt-get update && apt-get install -y --no-install-recommends rapidjson-dev \
libgoogle-glog-dev libgl1 openjdk-8-jre-headless
dpkg -i /root/packages/allure_2.24.1-1_all.deb
rm -rf /var/lib/apt/lists/*
- name: Clone repository
uses: actions/checkout@v2
- name: Install pytorch
run: |
python3 -m pip cache dir
python3 -m pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117
- name: Build lmdeploy
run: |
python3 -m pip install cmake
python3 -m pip install -r requirements/build.txt
# use cached build
cp -r ../../build build
cd build
cmake .. \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_EXPORT_COMPILE_COMMANDS=1 \
-DCMAKE_INSTALL_PREFIX=./install \
-DBUILD_PY_FFI=ON \
-DBUILD_MULTI_GPU=ON \
-DCMAKE_CUDA_FLAGS="-lineinfo" \
-DUSE_NVTX=ON \
-DSM=80 \
-DCMAKE_CUDA_ARCHITECTURES=80 \
-DBUILD_TEST=OFF
make -j$(nproc) && make install
- name: Install lmdeploy
run: |
python3 -m pip install packaging protobuf transformers_stream_generator
python3 -m pip install -r requirements.txt -r requirements/test.txt
python3 -m pip install .
- name: Check env
run: |
python3 -m pip list
lmdeploy check_env
- name: Test lmdeploy
run: |
echo "TODO: awaiting PR of adding autotest"
# pytest autotest ${{github.event.inputs.markers}} --alluredir=allure-results --clean-alluredir
- name: Generate reports
if: always()
run: |
if test -D "allure-results"; then
export date_today="$(date +'%Y%m%d-%H%M%S')"
export report_dir="$REPORT_DIR/$date_today"
echo "Save report to $ALLURE_DIR"
allure generate -c -o $report_dir
fi
- name: Clear workfile
if: always()
run: |
export workdir=$(pwd)
cd ..
rm -rf $workdir
mkdir $workdir
chmod -R 777 $workdir
test_triton:
runs-on: [self-hosted, linux-a100]
timeout-minutes: 4320 # 72hours
environment: 'prod'
env:
HF_MODEL: /nvme/qa_test_models/internlm-chat-20b
WORKDIR: /nvme/qa_test_models/triton_workspace
TB_MODEL: internlm-chat-20b-fp16-tp2
GRPC_PORT: 33337
steps:
- name: Clone repository
uses: actions/checkout@v2
- name: Create test container
run: |
export CONTAINER_ID=$(docker create \
--rm \
--gpus='"device=0,1"' \
--shm-size 16g \
--cap-add=SYS_PTRACE \
--cap-add=SYS_ADMIN \
--security-opt seccomp=unconfined \
--name lmdeploy-ci-triton \
--workdir /__w/lmdeploy/lmdeploy \
--env PIP_CACHE_DIR=/root/.cache/pip \
--env NCCL_LAUNCH_MODE=GROUP \
-v $(pwd)/../../:/__w \
-v ${HF_MODEL}:/root/workspace/hf_model \
-v ${WORKDIR}:/root/workspace/workdir \
-v ${HOST_PIP_CACHE_DIR}:/root/.cache/pip \
-v ${HOST_LOCALTIME}:/etc/localtime:ro \
openmmlab/lmdeploy:latest tail -f /dev/null \
)
docker start $CONTAINER_ID
echo "CONTAINER_ID=$CONTAINER_ID"
echo "CONTAINER_ID=$CONTAINER_ID" >> $GITHUB_ENV
- name: Build lmdeploy from source
run: |
docker exec $CONTAINER_ID cp -r ../../build build
docker exec --workdir /__w/lmdeploy/lmdeploy/build \
--env http_proxy=${{secrets.PROXY}} \
--env https_proxy=${{secrets.PROXY}} \
$CONTAINER_ID cmake .. \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_EXPORT_COMPILE_COMMANDS=1 \
-DCMAKE_INSTALL_PREFIX=./install \
-DBUILD_PY_FFI=ON \
-DBUILD_MULTI_GPU=ON \
-DCMAKE_CUDA_FLAGS="-lineinfo" \
-DUSE_NVTX=ON \
-DSM=80 \
-DCMAKE_CUDA_ARCHITECTURES=80 \
-DBUILD_TEST=OFF
docker exec --workdir /__w/lmdeploy/lmdeploy/build $CONTAINER_ID make -j$(nproc)
docker exec --workdir /__w/lmdeploy/lmdeploy/build $CONTAINER_ID make install
- name: Install lmdeploy
run: |
docker exec \
--env http_proxy=${{secrets.PROXY}} \
--env https_proxy=${{secrets.PROXY}} \
$CONTAINER_ID python3 -m pip install tritonclient[grpc]
docker exec \
--env http_proxy=${{secrets.PROXY}} \
--env https_proxy=${{secrets.PROXY}} \
$CONTAINER_ID python3 -m pip install -r requirements/test.txt
docker exec $CONTAINER_ID python3 -m pip install .
# docker exec $CONTAINER_ID check_env
- name: Convert to turbomind model
run: |
docker exec $CONTAINER_ID \
lmdeploy convert \
--model-name internlm-chat-20b \
--model-path /root/workspace/hf_model \
--tp 2 \
--dst-path /root/workspace/workdir/${TB_MODEL}
- name: Start triton server service
run: |
docker exec --detach $CONTAINER_ID \
tritonserver \
--model-repository=/root/workspace/workdir/${TB_MODEL}/model_repository \
--allow-http=0 \
--allow-grpc=1 \
--grpc-port=${GRPC_PORT} \
--log-verbose=0 \
--allow-metrics=1
# wait for triton server to fully start up
sleep 180s
- name: Test triton server
run: |
docker exec \
--env no_proxy=localhost,127.0.0.1 \
$CONTAINER_ID python3 .github/scripts/test_triton_server.py --port ${GRPC_PORT}
- name: Clear workfile
if: always()
run: |
export workdir=$(pwd)
docker exec --workdir /__w/lmdeploy $CONTAINER_ID rm -rf lmdeploy
mkdir $workdir
chmod -R 777 $workdir
docker exec --workdir /__w/lmdeploy $CONTAINER_ID rm -rf /root/workspace/workdir/${TB_MODEL}
docker stop $CONTAINER_ID