Skip to content

Shot batching is made more efficient by executing all the shots in one go on Lightning Qubit #609

Shot batching is made more efficient by executing all the shots in one go on Lightning Qubit

Shot batching is made more efficient by executing all the shots in one go on Lightning Qubit #609

name: Testing::Linux::x86_64::MPSTNCuda::Python
# TODO remove MPS from the workflow name and the workflow filename once exact is in.
# TODO remove the `pl_tensor_method` once exact TN is added.
on:
workflow_call:
inputs:
lightning-version:
type: string
required: true
description: The version of Lightning to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master)
pennylane-version:
type: string
required: true
description: The version of PennyLane to use. Valid values are either 'release' (most recent release candidate), 'stable' (most recent git-tag) or 'latest' (most recent commit from master)
pull_request:
paths-ignore:
- .github/**
- '!.github/workflows/tests_lmps_tncuda_python.yml'
- pennylane_lightning/core/src/simulators/lightning_kokkos/**
- pennylane_lightning/core/src/simulators/lightning_qubit/**
- pennylane_lightning/core/src/simulators/lightning_gpu/**
- pennylane_lightning/core/_version.py
- pennylane_lightning/lightning_gpu/**
- pennylane_lightning/lightning_qubit/**
- pennylane_lightning/lightning_kokkos/**
push:
branches:
- master
env:
CI_CUDA_ARCH: 86
COVERAGE_FLAGS: "--cov=pennylane_lightning --cov-report=term-missing --cov-report=xml:./coverage.xml --no-flaky-report -p no:warnings --tb=native"
GCC_VERSION: 11
concurrency:
group: tests_lmps_tncuda_python-${{ github.ref }}-${{ github.event_name }}-${{ inputs.lightning-version }}-${{ inputs.pennylane-version }}
cancel-in-progress: true
jobs:
builddeps:
runs-on:
- self-hosted
- ubuntu-22.04
- gpu
strategy:
max-parallel: 1
matrix:
os: [ubuntu-22.04]
pl_backend: ["lightning_tensor"]
cuda_version: ["12"]
steps:
- name: Validate GPU version and installed compiler
run: |
source /etc/profile.d/modules.sh
module use /opt/modules
module load cuda/${{ matrix.cuda_version }}
echo "${PATH}" >> $GITHUB_PATH
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV
nvcc --version
nvidia-smi
pythontestswithMPSTNCuda:
if: ${{ !contains(fromJSON('["schedule", "workflow_dispatch"]'), github.event_name) }}
needs: [builddeps]
strategy:
matrix:
os: [ubuntu-22.04]
pl_backend: ["lightning_tensor"]
default_backend: ["lightning_qubit"]
pl_tensor_method: ["mps"]
pl_tensor_backend: ["cutensornet"]
cuda_version: ["12"]
name: Python Tests (${{ matrix.pl_backend }}, method-${{ matrix.pl_tensor_method }}, backend-${{ matrix.pl_tensor_backend }}, cuda-${{ matrix.cuda_version }})
runs-on:
- ${{ matrix.os }}
- self-hosted
- gpu
steps:
- name: Validate GPU version and installed compiler
run: |
source /etc/profile.d/modules.sh
module use /opt/modules
module load cuda/${{ matrix.cuda_version }}
echo "${PATH}" >> $GITHUB_PATH
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> $GITHUB_ENV
nvcc --version
nvidia-smi
- name: Checkout PennyLane-Lightning-Tensor-MPS-TNCuda
uses: actions/checkout@v4
with:
path: main
- uses: actions/setup-python@v5
name: Install Python
with:
python-version: '3.9'
# Since the self-hosted runner can be re-used. It is best to set up all package
# installations in a virtual environment that gets cleaned at the end of each workflow run
- name: Setup Python virtual environment
id: setup_venv
env:
VENV_NAME: ${{ github.workspace }}/venv_${{ steps.setup_python.outputs.python-version }}_${{ github.sha }}
run: |
# Clear any pre-existing venvs
rm -rf venv_*
# Create new venv for this workflow_run
python --version
python -m venv ${{ env.VENV_NAME }}
# Add the venv to PATH for subsequent steps
echo ${{ env.VENV_NAME }}/bin >> $GITHUB_PATH
# Adding venv name as an output for subsequent steps to reference if needed
source ${{ env.VENV_NAME }}/bin/activate
echo "venv_name=${{ env.VENV_NAME }}" >> $GITHUB_OUTPUT
echo "Python_ROOT_DIR=${{ env.VENV_NAME }}" >> $GITHUB_ENV
echo "Python3_ROOT_DIR=${{ env.VENV_NAME }}" >> $GITHUB_ENV
# Adding venv site-packages to output for subsequent step to referecen if needed
echo "site_packages_dir=$(${{ env.VENV_NAME }}/bin/python -c 'import sysconfig; print(sysconfig.get_path("platlib"))')" >> $GITHUB_OUTPUT
- name: Display Python-Path
id: python_path
run: |
py_path=$(which python)
echo "Python Interpreter Path => $py_path"
echo "python=$py_path" >> $GITHUB_OUTPUT
echo "PIP Path => $py_path"
echo "pip=$py_path" >> $GITHUB_OUTPUT
- name: Install required packages
run: |
cd main
python -m pip install -r requirements-dev.txt
python -m pip install ninja cmake scipy custatevec-cu${{ matrix.cuda_version }} cutensornet-cu${{ matrix.cuda_version }} openfermionpyscf
- name: Checkout PennyLane for release build
if: inputs.pennylane-version == 'release'
uses: actions/checkout@v4
with:
path: pennylane
repository: PennyLaneAI/pennylane
- name: Switch to release build of PennyLane
if: inputs.pennylane-version == 'release'
run: |
cd pennylane
git fetch --all
git checkout $(git branch -a --list "origin/v0.*rc*" | sort | tail -1)
python -m pip uninstall -y pennylane && python -m pip install . -vv --no-deps
- name: Install Stable PennyLane
if: inputs.pennylane-version == 'stable'
run: |
cd main
python -m pip uninstall -y pennylane && python -m pip install -U pennylane
- name: Build and install package
run: |
cd main
rm -rf build
CMAKE_ARGS="-DPL_BACKEND=${{ matrix.default_backend }} -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \
python -m pip install . -vv
rm -rf build
rm -rf build
CMAKE_ARGS="-DPL_BACKEND=${{ matrix.pl_backend }} -DCMAKE_CXX_COMPILER=$(which g++-$GCC_VERSION)" \
python -m pip install . -vv
- name: Run PennyLane-Lightning-Tensor unit tests
if: ${{ matrix.pl_backend != 'all'}}
run: |
cd main/
DEVICENAME=`echo ${{ matrix.pl_backend }} | sed "s/_/./g"`
PL_DEVICE=${DEVICENAME} python -m pytest tests $COVERAGE_FLAGS
mv coverage.xml coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml
- name: Upload code coverage results
uses: actions/upload-artifact@v3
with:
name: ubuntu-codecov-results-python
path: ./main/coverage-${{ github.job }}-${{ matrix.pl_backend }}.xml
if-no-files-found: error
upload-to-codecov-linux-python:
needs: [pythontestswithMPSTNCuda]
name: Upload coverage data to codecov
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download coverage reports
uses: actions/download-artifact@v3
with:
name: ubuntu-codecov-results-python
- name: Upload to Codecov
uses: codecov/codecov-action@v4
with:
fail_ci_if_error: true
verbose: true
token: ${{ secrets.CODECOV_TOKEN }}