Skip to content

Continuous benchmarking #186

Continuous benchmarking

Continuous benchmarking #186

name: Continuous Benchmarking
on:
pull_request:
workflow_dispatch:
permissions:
contents: write
deployments: write
jobs:
benchmark:
name: Run C Benchmarks
runs-on: ubuntu-latest #FIXME: change to self-hosted after russel is set up.
steps:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Checkout benchmark repository
uses: actions/checkout@v2
with:
repository: lf-lang/benchmarks-lingua-franca
ref: automated-full-benchmark # FIXME: delete this line
- name: Checkout Lingua Franca repository
uses: actions/checkout@v2
with:
repository: lf-lang/lingua-franca
path: lf
- name: Prepare LF build environment
uses: ./lf/.github/actions/prepare-build-env
- name: Checkout current version of reactor-c
uses: actions/checkout@v2
with:
path: lf/org.lflang/src/lib/c/reactor-c
- name: Install Python dependencies
run: pip3 install -r runner/requirements.txt
- name: Build lfc
run: |
cd lf
./gradlew buildLfc
- name: Set LF_PATH and LF_BENCHMARKS_PATH environmental variable
run: |
echo "LF_PATH=$GITHUB_WORKSPACE/lf" >> $GITHUB_ENV
echo "LF_BENCHMARKS_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
- name: Run C Benchmarks (multithreaded)
run: |
python3 runner/run_benchmark.py -m continue_on_error=True iterations=12 problem_size=small \
benchmark="glob(*)" target=lf-c target.params.scheduler=GEDF_NP,NP,adaptive threads=0
- name: Collect results
run: python3 runner/collect_results.py continuous-benchmarking-results-multi-threaded.json
- name: Store Benchmark Result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Lingua Franca C Benchmark -- Multithreaded
tool: customSmallerIsBetter
output-file-path: continuous-benchmarking-results-multi-threaded.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
alert-threshold: '200%' # FIXME: After russel is set up, lower the threshold
comment-on-alert: true
fail-on-alert: false
- name: Run C Benchmarks (unthreaded)
run: |
python3 runner/run_benchmark.py -m continue_on_error=True iterations=12 problem_size=small \
benchmark="glob(*)" target=lf-c-unthreaded
- name: Collect results
run: python3 runner/collect_results.py continuous-benchmarking-results-single-threaded.json
- name: Store Benchmark Result
uses: benchmark-action/github-action-benchmark@v1
with:
name: Lingua Franca C Benchmark -- Single-Threaded
tool: customSmallerIsBetter
output-file-path: continuous-benchmarking-results-single-threaded.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
alert-threshold: '200%' # FIXME: After russel is set up, lower the threshold
comment-on-alert: true
fail-on-alert: false