Minimal example for using ET and AOTI #1086
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Run HQQ tests | |
on: | |
pull_request: | |
push: | |
branches: | |
- main | |
workflow_dispatch: | |
jobs: | |
test-cuda: | |
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main | |
with: | |
runner: linux.g5.4xlarge.nvidia.gpu | |
gpu-arch-type: cuda | |
gpu-arch-version: "12.1" | |
script: | | |
echo "::group::Print machine info" | |
uname -a | |
echo "::endgroup::" | |
echo "::group::Install newer objcopy that supports --set-section-alignment" | |
yum install -y devtoolset-10-binutils | |
export PATH=/opt/rh/devtoolset-10/root/usr/bin/:$PATH | |
echo "::endgroup::" | |
echo "::group::Download checkpoints" | |
# Install requirements | |
./install_requirements.sh cuda | |
pip3 list | |
python3 -c 'import torch;print(f"torch: {torch.__version__, torch.version.git_version}")' | |
echo "::endgroup::" | |
echo "::group::Download checkpoints" | |
mkdir -p checkpoints/stories15M | |
pushd checkpoints/stories15M | |
wget https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt | |
wget https://github.com/karpathy/llama2.c/raw/master/tokenizer.model | |
popd | |
echo "::endgroup::" | |
echo "::group::Run inference" | |
export MODEL_PATH=checkpoints/stories15M/stories15M.pt | |
export MODEL_NAME=stories15M | |
export MODEL_DIR=/tmp | |
for DTYPE in bfloat16 float16 float32; do | |
python generate.py --dtype ${DTYPE} --device cuda --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device cuda --compile --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_compiled | |
cat ./output_compiled | |
python export.py --dtype ${DTYPE} --device cuda --checkpoint-path ${MODEL_PATH} --output-dso-path ${MODEL_DIR}/${MODEL_NAME}.so | |
python generate.py --dtype ${DTYPE} --device cuda --checkpoint-path ${MODEL_PATH} --temperature 0 --dso-path ${MODEL_DIR}/${MODEL_NAME}.so > ./output_aoti | |
cat ./output_aoti | |
echo "**********************************************" | |
echo "******** INT4 HQQ group-wise quantized *******" | |
echo "**********************************************" | |
python generate.py --dtype ${DTYPE} --device cuda --quant '{"linear:hqq" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device cuda --compile --quant '{"linear:hqq" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_compiled | |
cat ./output_compiled | |
python export.py --dtype ${DTYPE} --device cuda --quant '{"linear:hqq" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --output-dso-path ${MODEL_DIR}/${MODEL_NAME}.so | |
python generate.py --dtype ${DTYPE} --device cuda --checkpoint-path ${MODEL_PATH} --temperature 0 --dso-path ${MODEL_DIR}/${MODEL_NAME}.so > ./output_aoti | |
cat ./output_aoti | |
done | |
echo "tests complete" | |
echo "******************************************" | |
echo "::endgroup::" |