-
Notifications
You must be signed in to change notification settings - Fork 221
94 lines (88 loc) · 3.75 KB
/
run-readme-pr-mps.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
name: Run the README instructions - with stories - on MPS/MacOS
on:
pull_request:
push:
branches:
- main
workflow_dispatch:
jobs:
test-readme-mps-macos:
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
with:
runner: macos-m1-stable # neeps MPS, was macos-m1-stable
script: |
conda create -y -n test-readme-mps-macos python=3.10.11
conda activate test-readme-mps-macos
set -x
# NS: Remove previous installation of torch first
# as this script does not isntall anything into conda env but rather as system dep
pip3 uninstall -y torch || true
set -eou pipefail
echo "::group::Print machine info"
uname -a
sysctl machdep.cpu.brand_string
sysctl machdep.cpu.core_count
echo "::endgroup::"
# echo "::group::Install newer objcopy that supports --set-section-alignment"
# yum install -y devtoolset-10-binutils
# export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH
# echo "::endgroup::"
echo "::group::Create script to run README"
python3 scripts/updown.py --file README.md --replace 'llama3:stories15M,-l 3:-l 2,meta-llama/Meta-Llama-3-8B-Instruct:stories15M' --suppress huggingface-cli,HF_TOKEN > ./run-readme.sh
# for good measure, if something happened to updown processor,
# and it did not error out, fail with an exit 1
echo "exit 1" >> ./run-readme.sh
echo "::endgroup::"
echo "::group::Run README"
echo "*******************************************"
cat ./run-readme.sh
echo "*******************************************"
bash -x ./run-readme.sh
echo "::endgroup::"
echo "::group::Completion"
echo "tests complete"
echo "*******************************************"
echo "::endgroup::"
# test-quantization-mps-macos:
# uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
# with:
# runner: macos-m1-stable # neeps MPS, was macos-m1-stable
# script: |
# set -x
# conda create -y -n test-quantization-mps-macos python=3.10.11
# conda activate test-quantization-mps-macos
# # NS: Remove previous installation of torch first
# # as this script does not isntall anything into conda env but rather as system dep
# pip3 uninstall -y torch || true
# set -eou pipefail
#
# echo "::group::Print machine info"
# uname -a
# sysctl machdep.cpu.brand_string
# sysctl machdep.cpu.core_count
# echo "::endgroup::"
#
# # echo "::group::Install newer objcopy that supports --set-section-alignment"
# # yum install -y devtoolset-10-binutils
# # export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH
# # echo "::endgroup::"
#
# echo "::group::Create script to run quantization"
# python3 scripts/updown.py --file docs/quantization.md --replace llama3:stories15M --suppress huggingface-cli,HF_TOKEN > ./run-quantization.sh
# # for good measure, if something happened to updown processor,
# # and it did not error out, fail with an exit 1
# echo "exit 1" >> ./run-quantization.sh
# echo "::endgroup::"
#
# echo "::group::Run quantization"
# echo "*******************************************"
# cat ./run-quantization.sh
# echo "*******************************************"
# bash -x ./run-quantization.sh
# echo "::endgroup::"
#
# echo "::group::Completion"
# echo "tests complete"
# echo "*******************************************"
# echo "::endgroup::"
#