forked from Project-MONAI/MONAI
-
Notifications
You must be signed in to change notification settings - Fork 0
258 lines (246 loc) · 10.8 KB
/
cron.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
# nightly: Jenkinsfile.monai-pytorch-versions, monai-latest-image, monai-pip, monai-latest-docker, monai-notebooks
name: nightly-crons
on:
# schedule:
# - cron: "0 2 * * *" # at 02:00 UTC
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
jobs:
cron-gpu:
if: github.repository == 'Project-MONAI/MONAI'
strategy:
matrix:
environment:
- "PT191+CUDA113"
- "PT110+CUDA113"
- "PT113+CUDA113"
- "PTLATEST+CUDA121"
include:
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes
- environment: PT191+CUDA113
pytorch: "torch==1.9.1 torchvision==0.10.1 --extra-index-url https://download.pytorch.org/whl/cu113"
base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3
- environment: PT110+CUDA113
pytorch: "torch==1.10.2 torchvision==0.11.3 --extra-index-url https://download.pytorch.org/whl/cu113"
base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3
- environment: PT113+CUDA113
pytorch: "torch==1.13.1 torchvision==0.14.1 --extra-index-url https://download.pytorch.org/whl/cu113"
base: "nvcr.io/nvidia/pytorch:21.06-py3" # CUDA 11.3
- environment: PTLATEST+CUDA121
pytorch: "-U torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118"
base: "nvcr.io/nvidia/pytorch:23.08-py3" # CUDA 12.2
container:
image: ${{ matrix.base }}
options: "--gpus all"
runs-on: [self-hosted, linux, x64, common]
steps:
- uses: actions/checkout@v4
- name: apt install
run: |
apt-get update
apt-get install -y wget
- name: Install the dependencies
run: |
which python
python -m pip install --upgrade pip wheel
python -m pip uninstall -y torch torchvision
python -m pip install ${{ matrix.pytorch }}
python -m pip install -r requirements-dev.txt
python -m pip list
- name: Run tests report coverage
run: |
export LAUNCH_DELAY=$[ $RANDOM % 16 * 60 ]
echo "Sleep $LAUNCH_DELAY"
sleep $LAUNCH_DELAY
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
trap 'if pgrep python; then pkill python; fi;' ERR
python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null &
python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))"
python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))'
BUILD_MONAI=1 ./runtests.sh --build --coverage --unittests --disttests # unit tests with coverage report
BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report
coverage xml --ignore-errors
if pgrep python; then pkill python; fi
shell: bash
- name: Upload coverage
uses: codecov/codecov-action@v3
with:
fail_ci_if_error: false
files: ./coverage.xml
cron-pt-image:
if: github.repository == 'Project-MONAI/MONAI'
strategy:
matrix:
container: ["pytorch:22.10", "pytorch:23.08"]
container:
image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image
options: "--gpus all"
runs-on: [self-hosted, linux, x64, integration]
steps:
- uses: actions/checkout@v4
- name: Install APT dependencies
run: |
apt-get update
DEBIAN_FRONTEND="noninteractive" apt-get install -y libopenslide0
- name: Install Python dependencies
run: |
which python
python -m pip install --upgrade pip wheel
python -m pip install -r requirements-dev.txt
python -m pip list
- name: Run tests report coverage
run: |
export LAUNCH_DELAY=$[ $RANDOM % 16 * 60 ]
echo "Sleep $LAUNCH_DELAY"
sleep $LAUNCH_DELAY
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
trap 'if pgrep python; then pkill python; fi;' ERR
python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null &
python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))"
python -c 'import torch; print(torch.rand(5, 3, device=torch.device("cuda:0")))'
BUILD_MONAI=1 ./runtests.sh --build --coverage --unittests --disttests # unit tests with coverage report
BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report
coverage xml --ignore-errors
if pgrep python; then pkill python; fi
shell: bash
- name: Upload coverage
uses: codecov/codecov-action@v3
with:
fail_ci_if_error: false
files: ./coverage.xml
cron-pip:
# pip install monai[all] and use it to run unit tests
if: github.repository == 'Project-MONAI/MONAI'
strategy:
matrix:
container: ["pytorch:23.08"]
container:
image: nvcr.io/nvidia/${{ matrix.container }}-py3 # testing with the latest pytorch base image
options: "--gpus all"
runs-on: [self-hosted, linux, x64, integration]
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install the dependencies
run: |
which python
python -m pip install --upgrade pip wheel twine
python -m pip list
- name: Run tests report coverage
shell: bash
run: |
pip uninstall monai
pip list | grep -iv monai
git fetch --depth=1 origin +refs/tags/*:refs/tags/*
root_dir=$PWD
echo "$root_dir"
set -e
# build tar.gz and wheel
bash runtests.sh --clean # clear any existing dev temp files
python -m pip uninstall -y torch torchvision
python setup.py check -m -s
python setup.py sdist bdist_wheel
python -m twine check dist/*
# move packages to a temp dir
tmp_dir=$(mktemp -d)
cp dist/monai* "$tmp_dir"
rm -r build dist monai.egg-info
cd "$tmp_dir"
ls -al
# install from tar.gz
name=$(ls *.tar.gz | head -n1)
echo $name
python -m pip install $name[all]
python -c 'import monai; monai.config.print_config()' 2>&1 | grep -iv "unknown"
python -c 'import monai; print(monai.__file__)'
# run tests
cp $root_dir/requirements*.txt "$tmp_dir"
cp -r $root_dir/tests "$tmp_dir"
pwd
ls -al
export LAUNCH_DELAY=$[ $RANDOM % 16 * 60 ]
echo "Sleep $LAUNCH_DELAY"
sleep $LAUNCH_DELAY
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
trap 'if pgrep python; then pkill python; fi;' ERR
python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null &
python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))"
python -m pip install -r requirements-dev.txt
PYTHONPATH="$tmp_dir":$PYTHONPATH BUILD_MONAI=1 python ./tests/runner.py -p 'test_((?!integration).)' # unit tests
if pgrep python; then pkill python; fi
cron-docker:
if: github.repository == 'Project-MONAI/MONAI'
container:
image: docker://projectmonai/monai:latest # this might be slow and has the pull count limitations
options: "--gpus all"
runs-on: [self-hosted, linux, x64, integration]
steps:
- name: Run tests report coverage
# The docker image process has done the compilation.
# BUILD_MONAI=1 is necessary for triggering the USE_COMPILED flag.
run: |
cd /opt/monai
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
trap 'if pgrep python; then pkill python; fi;' ERR
python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null &
python -c "import torch; print(torch.__version__); print('{} of GPUs available'.format(torch.cuda.device_count()))"
python -c 'import torch; print(torch.rand(5,3, device=torch.device("cuda:0")))'
ngc --version
BUILD_MONAI=1 ./runtests.sh --build --coverage --pytype --unittests --disttests # unit tests with pytype checks, coverage report
BUILD_MONAI=1 ./runtests.sh --build --coverage --net # integration tests with coverage report
coverage xml --ignore-errors
if pgrep python; then pkill python; fi
shell: bash
- name: Upload coverage
uses: codecov/codecov-action@v3
with:
fail_ci_if_error: false
files: ./coverage.xml
cron-tutorial-notebooks:
if: github.repository == 'Project-MONAI/MONAI'
needs: cron-gpu # so that monai itself is verified first
container:
image: nvcr.io/nvidia/pytorch:23.08-py3 # testing with the latest pytorch base image
options: "--gpus all --ipc=host"
runs-on: [self-hosted, linux, x64, integration]
steps:
- uses: actions/checkout@v4
- name: Install MONAI
id: monai-install
run: |
which python
python -m pip install --upgrade pip wheel
python -m pip install -r requirements-dev.txt
BUILD_MONAI=1 python setup.py develop # install monai
nvidia-smi
export CUDA_VISIBLE_DEVICES=$(python -m tests.utils | tail -n 1)
echo $CUDA_VISIBLE_DEVICES
echo "devices=$CUDA_VISIBLE_DEVICES" >> $GITHUB_OUTPUT
- name: Checkout tutorials and install their requirements
run: |
cd /opt
git clone --depth 1 --branch main --single-branch https://github.com/Project-MONAI/tutorials.git # latest commit of main branch
cd tutorials
python -m pip install -r requirements.txt
- name: Run tutorial notebooks
timeout-minutes: 150
run: |
export CUDA_VISIBLE_DEVICES=${{ steps.monai-install.outputs.devices }}
echo $CUDA_VISIBLE_DEVICES
trap 'if pgrep python; then pkill python; fi;' ERR
python -c $'import torch\na,b=torch.zeros(1,device="cuda:0"),torch.zeros(1,device="cuda:1");\nwhile True:print(a,b)' > /dev/null &
cd /opt/tutorials
python -c 'import monai; monai.config.print_debug_info()'
$(pwd)/runner.sh
python -c 'import monai; monai.config.print_debug_info()'
if pgrep python; then pkill python; fi
shell: bash