Skip to content

Refactor the llama.cpp interface #723

Refactor the llama.cpp interface

Refactor the llama.cpp interface #723

name: Benchmark PR
on:
push:
pull_request:
types: [synchronize, labeled]
workflow_dispatch:
env:
PYTHON_VERSION: "3.10"
WORKING_DIR: ${{ github.workspace }}/benchmarks
BENCHMARKS_OUTPUT: ${{ github.workspace }}/benchmarks_output
permissions:
contents: read
# Cancels all previous workflow runs for pull requests that have not completed.
concurrency:
# The concurrency group contains the workflow name and the branch name for pull requests
# or the commit hash for any other events.
group: ${{ github.workflow }}-${{ github.event_name == 'pull_request' && github.head_ref || github.sha }}
cancel-in-progress: true
jobs:
benchmark-pr:
runs-on: ubuntu-latest
if: ${{ contains(github.event.pull_request.labels.*.name, 'run-benchmarks') || github.ref == 'refs/heads/main' }}
defaults:
run:
working-directory: ${{ env.WORKING_DIR }}
steps:
- name: Checkout repository
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install asv virtualenv lf-asv-formatter
- name: Create ASV machine config file
run: asv machine --machine gh-runner --yes
- name: Run Benchmarks - `PR HEAD` vs `main`
run: |
# prepare main branch for comparison
git remote add upstream https://github.com/${{ github.repository }}.git
git fetch upstream main
# Run benchmarks, allow errors, they will be caught in the next step
asv continuous upstream/main HEAD \
--no-stats --interleave-rounds -a repeat=3 || true
- name: BENCHMARK RESULTS
run: |
asv compare --factor=1.1 --no-stats --split upstream/main HEAD | tee ${{ env.BENCHMARKS_OUTPUT }}
if grep -q "Benchmarks that have got worse" "${{ env.BENCHMARKS_OUTPUT }}"; then
echo "Performance degradation detected!"
exit 1
fi