Skip to content

Enable end to end non-DPS testing #374

Enable end to end non-DPS testing

Enable end to end non-DPS testing #374

name: MLIR-TensorRT CI
on:
pull_request:
branches:
- main
types: [synchronize, opened, reopened, ready_for_review]
paths: ["mlir-tensorrt/**"]
env:
DEFAULT_IMAGE: ghcr.io/nvidia/tensorrt-incubator/mlir-tensorrt:cuda12.5-ubuntu-llvm17
REGISTRY: ghcr.io
jobs:
mlir-tensorrt-tests:
if: github.event.pull_request.draft == false
# `ubuntu-latest` is a CPU runner.
# If selected, tests requiring GPU are not run.
runs-on: ubuntu-latest
steps:
# Free some disk space, otherwise we get OOM error.
- name: Free disk space
run: |
sudo rm -rf \
/usr/share/dotnet "$AGENT_TOOLSDIRECTORY" /usr/local/lib/android /opt/ghc \
/usr/local/share/powershell /usr/share/swift /usr/local/.ghcup \
/usr/lib/jvm
sudo apt-get purge microsoft-edge-stable || true
sudo apt-get purge google-cloud-cli || true
sudo apt-get purge dotnet-sdk-* || true
sudo apt-get purge google-chrome-stable || true
sudo apt-get autoremove -y
sudo apt-get autoclean -y
# Value of `github.workspace` is /home/runner/work/{repo_name}/{repo-name}
# i.e. /home/runner/work/TensorRT-Incubator/TensorRT-Incubator in our case.
# After this action, repo is cloned inside above path.
- uses: actions/checkout@v4
with:
fetch-depth: 5
- name: Validate commit message
env:
PR_HEAD_COMMIT_SHA: ${{ github.event.pull_request.head.sha }}
run: |
cat > commit_message_checker.py <<EOF
#!/usr/bin/python3
import re
import sys
import subprocess
git_cmd = f"git show -s --format=%B {sys.argv[1]}"
try:
commit_message_cmd = subprocess.run(git_cmd.split(' '), capture_output=True, text=True, check=True)
commit_message = commit_message_cmd.stdout.strip()
except subprocess.CalledProcessError as e:
print(f"Failed to get PR HEAD commit message with error: {e.stderr.strip()}")
match = re.search(r"^(\[bot\].+|NFC: .+|(.+\n\n+.+\n+.+))$", commit_message, re.DOTALL)
if match:
print("Commit message is in canonical form :)")
sys.exit(0)
print("Commit message is not in the canonical form!")
print(commit_message)
print("")
print("Expected format is, ")
print("<title>")
print("<body>")
print("NOTE: Body should start on new line. `2 spaces + enter` for new line!")
print("NOTE: Body should be at least two lines.")
sys.exit(1)
EOF
python3 commit_message_checker.py ${PR_HEAD_COMMIT_SHA}
# Run initial format check
- name: Run python format and clang check
uses: addnab/docker-run-action@v3
with:
image: ${{ env.DEFAULT_IMAGE }}
options: -v ${{ github.workspace }}:/tensorrt-incubator
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# This step does two things
# 1. Check if Python files follow black format
# 2. Check if C++ files follow clang format
# NOTE: We are placed at the root directory ('/') inside the container.
run: |
cd tensorrt-incubator
git config --global --add safe.directory /tensorrt-incubator
cat > run_format_check.sh <<EOF
#!/bin/bash
set -e
python3 -m black --check --exclude='.*\.pyi' mlir-tensorrt/test/
python3 -m black --check --exclude='.*\.pyi' mlir-tensorrt/python/
git clang-format HEAD~1 --diff
EOF
bash run_format_check.sh
# Create cache folders
- name: Create cache folder
run: |
mkdir -p ${{ github.workspace }}/ccache
mkdir -p ${{ github.workspace }}/.ccache.cpm
# Create cache action
- name: Create cache action
id: core-build-cache
uses: actions/cache@v4
with:
key: ${{ runner.os }}-mlir-tensorrt-core-build
path: |
${{ github.workspace }}/ccache
${{ github.workspace }}/.ccache.cpm
# Run LIT tests with TensorRT 10
- name: Run MLIR-TensorRT lit tests with TensorRT 10
uses: addnab/docker-run-action@v3
with:
image: ${{ env.DEFAULT_IMAGE }}
options: -v ${{ github.workspace }}/mlir-tensorrt:/mlir-tensorrt -v ${{ github.workspace }}/ccache:/ccache -v ${{ github.workspace }}/.ccache.cpm:/.ccache.cpm
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
run: |
export CCACHE_BASEDIR="$PWD"
export CCACHE_DIR="$PWD/ccache"
export CCACHE_COMPILERCHECK=content
export CCACHE_MAXSIZE=10G
ccache --zero-stats || true
ccache --show-stats || true
cd mlir-tensorrt
cat > build_and_test.sh <<EOF
#!/bin/bash
set -e
python3 -m pip install -r python/requirements-dev.txt
cmake -B ./build -S . -G Ninja \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DMLIR_TRT_PACKAGE_CACHE_DIR=${PWD}/.cache.cpm \
-DMLIR_TRT_ENABLE_ASSERTIONS=ON \
-DMLIR_TRT_DOWNLOAD_TENSORRT_VERSION=10.2 \
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
-DMLIR_TRT_USE_LINKER=lld \
-DMLIR_EXECUTOR_ENABLE_GPU_INTEGRATION_TESTS=OFF
ninja -C build all
ninja -C build check-mlir-executor
ninja -C build check-mlir-tensorrt-dialect
ninja -C build check-mlir-tensorrt
cd ..
ccache --show-stats || true
EOF
bash build_and_test.sh
# Run LIT tests with TensorRT 10 & ASAN
- name: Run MLIR-TensorRT lit tests with TensorRT 10, ASAN enabled
uses: addnab/docker-run-action@v3
with:
image: ${{ env.DEFAULT_IMAGE }}
options: -v ${{ github.workspace }}/mlir-tensorrt:/mlir-tensorrt -v ${{ github.workspace }}/ccache:/ccache -v ${{ github.workspace }}/.ccache.cpm:/.ccache.cpm
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
run: |
export CCACHE_BASEDIR="$PWD"
export CCACHE_DIR="$PWD/ccache"
export CCACHE_COMPILERCHECK=content
export CCACHE_MAXSIZE=10G
ccache --zero-stats || true
ccache --show-stats || true
cd mlir-tensorrt
cat > build_and_test.sh <<EOF
#!/bin/bash
set -e
python3 -m pip install -r python/requirements-dev.txt
cmake -B ./build -S . -G Ninja \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DMLIR_TRT_PACKAGE_CACHE_DIR=${PWD}/.cache.cpm \
-DMLIR_TRT_ENABLE_ASSERTIONS=ON \
-DMLIR_TRT_DOWNLOAD_TENSORRT_VERSION=10.2 \
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
-DMLIR_TRT_USE_LINKER=lld \
-DMLIR_EXECUTOR_ENABLE_GPU_INTEGRATION_TESTS=OFF \
-DENABLE_ASAN=ON
ninja -C build all
ninja -C build check-mlir-executor
ninja -C build check-mlir-tensorrt-dialect
ninja -C build check-mlir-tensorrt
cd ..
ccache --show-stats || true
EOF
bash build_and_test.sh
# Run LIT tests with TensorRT 9
- name: Run MLIR-TensorRT lit tests with TensorRT 9
uses: addnab/docker-run-action@v3
with:
image: ${{ env.DEFAULT_IMAGE }}
options: -v ${{ github.workspace }}/mlir-tensorrt:/mlir-tensorrt -v ${{ github.workspace }}/ccache:/ccache -v ${{ github.workspace }}/.ccache.cpm:/.ccache.cpm
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
run: |
export CCACHE_BASEDIR="$PWD"
export CCACHE_DIR="$PWD/ccache"
export CCACHE_COMPILERCHECK=content
export CCACHE_MAXSIZE=10G
ccache --zero-stats || true
ccache --show-stats || true
cd mlir-tensorrt
cat > build_and_test.sh <<EOF
#!/bin/bash
set -e
python3 -m pip install -r python/requirements-dev.txt
cmake -B ./build -S . -G Ninja \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DMLIR_TRT_PACKAGE_CACHE_DIR=${PWD}/.cache.cpm \
-DMLIR_TRT_ENABLE_ASSERTIONS=ON \
-DMLIR_TRT_DOWNLOAD_TENSORRT_VERSION=9.2.0.5 \
-DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \
-DMLIR_TRT_USE_LINKER=lld \
-DMLIR_EXECUTOR_ENABLE_GPU_INTEGRATION_TESTS=OFF
ninja -C build all
ninja -C build check-mlir-executor
ninja -C build check-mlir-tensorrt-dialect
ninja -C build check-mlir-tensorrt
cd ..
ccache --show-stats || true
EOF
bash build_and_test.sh