From 2d2a249bc9409a2a5afb2bd188f1926256eb0d34 Mon Sep 17 00:00:00 2001 From: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:40:01 +0100 Subject: [PATCH] Support for V2 primitives (#843) * Update README.md * Generalize the Einstein summation signature * Add reno * Update Copyright * Rename and add test * Update Copyright * Add docstring for `test_get_einsum_signature` * Correct spelling * Disable spellcheck for comments * Add `docstring` in pylint dict * Delete example in docstring * Add Einstein in pylint dict * Add full use case in einsum dict * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (2/2) * Update RuntimeError msg * Update RuntimeError msg - line too long * Trigger CI * Merge algos, globals.random to fix * Fixed `algorithms_globals` * Import /tests and run CI locally * Fix copyrights and some spellings * Ignore mypy in 8 instances * Merge spell dicts * Black reformatting * Black reformatting * Add reno * Lint sanitize * Pylint * Pylint * Pylint * Pylint * Fix relative imports in tutorials * Fix relative imports in tutorials * Remove algorithms from Jupyter magic methods * Temporarily disable "Run stable tutorials" tests * Change the docstrings with imports from qiskit_algorithms * Styling * Update qiskit_machine_learning/optimizers/gradient_descent.py Co-authored-by: Declan Millar * Update qiskit_machine_learning/optimizers/optimizer_utils/learning_rate.py Co-authored-by: Declan Millar * Add more tests for utils * Add more tests for optimizers: adam, bobyqa, gsls and imfil * Fix random seed for volatile optimizers * Fix random seed for volatile optimizers * Add more tests * Pylint dict * Activate scikit-quant-0.8.2 * Remove scikit-quant methods * Remove scikit-quant methods (2) * Edit the release notes and Qiskit version 1+ * Edit the release notes and Qiskit version 1+ * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Apply line breaks * Restructure line breaks * Added support for SamplerV2 primitives (#49) * Migrating `qiskit_algorithms` (#817) * Update README.md * Generalize the Einstein summation signature * Add reno * Update Copyright * Rename and add test * Update Copyright * Add docstring for `test_get_einsum_signature` * Correct spelling * Disable spellcheck for comments * Add `docstring` in pylint dict * Delete example in docstring * Add Einstein in pylint dict * Add full use case in einsum dict * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (2/2) * Update RuntimeError msg * Update RuntimeError msg - line too long * Trigger CI * Merge algos, globals.random to fix * Fixed `algorithms_globals` * Import /tests and run CI locally * Fix copyrights and some spellings * Ignore mypy in 8 instances * Merge spell dicts * Black reformatting * Black reformatting * Add reno * Lint sanitize * Pylint * Pylint * Pylint * Pylint * Fix relative imports in tutorials * Fix relative imports in tutorials * Remove algorithms from Jupyter magic methods * Temporarily disable "Run stable tutorials" tests * Change the docstrings with imports from qiskit_algorithms * Styling * Update qiskit_machine_learning/optimizers/gradient_descent.py Co-authored-by: Declan Millar * Update qiskit_machine_learning/optimizers/optimizer_utils/learning_rate.py Co-authored-by: Declan Millar * Add more tests for utils * Add more tests for optimizers: adam, bobyqa, gsls and imfil * Fix random seed for volatile optimizers * Fix random seed for volatile optimizers * Add more tests * Pylint dict * Activate scikit-quant-0.8.2 * Remove scikit-quant methods * Remove scikit-quant methods (2) * Edit the release notes and Qiskit version 1+ * Edit the release notes and Qiskit version 1+ * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Apply line breaks * Restructure line breaks --------- Co-authored-by: FrancescaSchiav Co-authored-by: M. Emre Sahin <40424147+OkuyanBoga@users.noreply.github.com> Co-authored-by: Declan Millar * Revamp readme pt2 (#822) * Restructure README.md --------- Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> * V2 Primitive Support for SamplerQNN and Gradients * Update base_sampler_gradient.py * Update qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/sampler_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Fix lint errors due to Pylint 3.3.0 update in CI (#833) * disable=too-many-positional-arguments * Transfer pylint rc to toml * Transfer pylint rc to toml * Minor fixes * Remove Python 3.8 from CI (#824) (#826) * Remove Python 3.8 in CI (#824) * Correct `tmp` dirs (#818) * Correct unit py version (#818) * Add reno (#818) * Finalze removal of py38 (#818) * Spelling * Remove duplicate tmp folder * Updated the release note * Bump min pyversion in toml * Remove ipython constraints * Update reno * Updated test for test_sampler_qnn * Fix: output_shape * Adding optimisation level to TestSamplerQNN SamplerV2 option * Correcting the PUB prep for SamplerV2 by changing max iterator from n to len(job_param_values). Added a load of print statements to investigate behaviour when self._output_shape = (2, 3) - a tuple as this was failing tests due to a comparison in line 166. This has lead me to think that the way we are calculating QuasiDistribution is wrong as we need to know which real qubits the virtual qubits have been transpiled too to calculate the correct dist for SamplerV2. Following this up with IBM runtime. * Update sampler_qnn.py for correcting tuple output_shape when interpret is provided. * Adding ISA capabilities to gradients * Fix output shape and its default for V2 * Implement SamplerV2 for bayesian inference * Implement SamplerV2 for bayesian inference * Adding ISA capabilities to SamplerQNN and ParamShiftSamplerGradient * Removing unused backend * Removing failed merge conflicts * Removing residual merge conflicts * added SamplerV2 support for ComputeUncompute * Removing multiple tranpilations within same test * Formatting * Linting * Adding measure_all to setUp * removing default pm --------- Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> Co-authored-by: FrancescaSchiav Co-authored-by: Declan Millar Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> Co-authored-by: oscar-wallis * Added support for EstimatorV2 primitives (#48) * Migrating `qiskit_algorithms` (#817) * Update README.md * Generalize the Einstein summation signature * Add reno * Update Copyright * Rename and add test * Update Copyright * Add docstring for `test_get_einsum_signature` * Correct spelling * Disable spellcheck for comments * Add `docstring` in pylint dict * Delete example in docstring * Add Einstein in pylint dict * Add full use case in einsum dict * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (2/2) * Update RuntimeError msg * Update RuntimeError msg - line too long * Trigger CI * Merge algos, globals.random to fix * Fixed `algorithms_globals` * Import /tests and run CI locally * Fix copyrights and some spellings * Ignore mypy in 8 instances * Merge spell dicts * Black reformatting * Black reformatting * Add reno * Lint sanitize * Pylint * Pylint * Pylint * Pylint * Fix relative imports in tutorials * Fix relative imports in tutorials * Remove algorithms from Jupyter magic methods * Temporarily disable "Run stable tutorials" tests * Change the docstrings with imports from qiskit_algorithms * Styling * Update qiskit_machine_learning/optimizers/gradient_descent.py Co-authored-by: Declan Millar * Update qiskit_machine_learning/optimizers/optimizer_utils/learning_rate.py Co-authored-by: Declan Millar * Add more tests for utils * Add more tests for optimizers: adam, bobyqa, gsls and imfil * Fix random seed for volatile optimizers * Fix random seed for volatile optimizers * Add more tests * Pylint dict * Activate scikit-quant-0.8.2 * Remove scikit-quant methods * Remove scikit-quant methods (2) * Edit the release notes and Qiskit version 1+ * Edit the release notes and Qiskit version 1+ * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Apply line breaks * Restructure line breaks --------- Co-authored-by: FrancescaSchiav Co-authored-by: M. Emre Sahin <40424147+OkuyanBoga@users.noreply.github.com> Co-authored-by: Declan Millar * Revamp readme pt2 (#822) * Restructure README.md --------- Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> * Added support for EstimatorV2 primitives * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/neural_networks/estimator_qnn.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Update qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> * Fix lint errors due to Pylint 3.3.0 update in CI (#833) * disable=too-many-positional-arguments * Transfer pylint rc to toml * Transfer pylint rc to toml * Cleaner statements * Remove Python 3.8 from CI (#824) (#826) * Remove Python 3.8 in CI (#824) * Correct `tmp` dirs (#818) * Correct unit py version (#818) * Add reno (#818) * Finalze removal of py38 (#818) * Spelling * Remove duplicate tmp folder * Updated the release note * Bump min pyversion in toml * Remove ipython constraints * Update reno * Added unit tests for estimatorqnnV2 and minor fixes * Make black * Make lint and changes to V1/2 choice logics * Update requirements * Add default precision * Update estimator tests * Change num qubits in backend * Allow for num_qubits=None * Fix shape in parameter shift * Fix shape in parameter shift * Fix shape observables * Fix shape observables * Change default precision to match base estimator * Fix remaining shape issues * Estimator seed has no effect in local testing * fix argnames and supress error tolerance for test_estimator_qnn_v2 * Added pass manager the gradients. * quick bugfix for isa_circuits * Updating PUBs for estimatorqnn, updating test_estimator_qnn_v2 for ISA circs and relaxing tolerances * Lint and formatting * Tranpiling observables for isa g circs * fixing apply_layout --------- Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> Co-authored-by: FrancescaSchiav Co-authored-by: Declan Millar Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> Co-authored-by: oscar-wallis * Pulled changes from main * Quick fix * bugfix for V1 * formatting * Prep-ing for 0.8 (#53) * Migrating `qiskit_algorithms` (#817) * Update README.md * Generalize the Einstein summation signature * Add reno * Update Copyright * Rename and add test * Update Copyright * Add docstring for `test_get_einsum_signature` * Correct spelling * Disable spellcheck for comments * Add `docstring` in pylint dict * Delete example in docstring * Add Einstein in pylint dict * Add full use case in einsum dict * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Spelling and type ignore * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (1/2) * Remove for loop in einsum function and remove Literal arguments (2/2) * Update RuntimeError msg * Update RuntimeError msg - line too long * Trigger CI * Merge algos, globals.random to fix * Fixed `algorithms_globals` * Import /tests and run CI locally * Fix copyrights and some spellings * Ignore mypy in 8 instances * Merge spell dicts * Black reformatting * Black reformatting * Add reno * Lint sanitize * Pylint * Pylint * Pylint * Pylint * Fix relative imports in tutorials * Fix relative imports in tutorials * Remove algorithms from Jupyter magic methods * Temporarily disable "Run stable tutorials" tests * Change the docstrings with imports from qiskit_algorithms * Styling * Update qiskit_machine_learning/optimizers/gradient_descent.py Co-authored-by: Declan Millar * Update qiskit_machine_learning/optimizers/optimizer_utils/learning_rate.py Co-authored-by: Declan Millar * Add more tests for utils * Add more tests for optimizers: adam, bobyqa, gsls and imfil * Fix random seed for volatile optimizers * Fix random seed for volatile optimizers * Add more tests * Pylint dict * Activate scikit-quant-0.8.2 * Remove scikit-quant methods * Remove scikit-quant methods (2) * Edit the release notes and Qiskit version 1+ * Edit the release notes and Qiskit version 1+ * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Add Qiskit 1.0 upgrade in reno * Apply line breaks * Restructure line breaks --------- Co-authored-by: FrancescaSchiav Co-authored-by: M. Emre Sahin <40424147+OkuyanBoga@users.noreply.github.com> Co-authored-by: Declan Millar * Revamp readme pt2 (#822) * Restructure README.md --------- Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> * Fix lint errors due to Pylint 3.3.0 update in CI (#833) * disable=too-many-positional-arguments * Transfer pylint rc to toml * Transfer pylint rc to toml * Remove Python 3.8 from CI (#824) (#826) * Remove Python 3.8 in CI (#824) * Correct `tmp` dirs (#818) * Correct unit py version (#818) * Add reno (#818) * Finalze removal of py38 (#818) * Spelling * Remove duplicate tmp folder * Updated the release note * Bump min pyversion in toml * Remove ipython constraints * Update reno * Reestablish latest Pytorch and Numpy (#818) (#827) * Reestablish latest Pytorch and Numpy (#818) * Keep pinned Numpy * Keep pinned Numpy * Fix numpy min version * Fix RawFeatureVector for failing test case (#838) * Alter RawFeatureVector normalization * Alter RawFeatureVector normalization * Release nbconvert constraints (#842) * Remove redundant MacOS 14 image in CI (#841) * Remove redundant MacOS 14 image * Update with macos-latest-large * Revert "Update with macos-latest-large" This reverts commit 14f945e3f0c8eaf39195fed81bc6e55ce077735f. * Update with macos-latest-large * Update v2 (#54) * bugfix for V1 * formatting --------- Co-authored-by: oscar-wallis --------- Co-authored-by: Edoardo Altamura <38359901+edoaltamura@users.noreply.github.com> Co-authored-by: FrancescaSchiav Co-authored-by: M. Emre Sahin <40424147+OkuyanBoga@users.noreply.github.com> Co-authored-by: Declan Millar Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> * Update test_qbayesian * Bugfixing the test_gradient * Fixing an Options error with sampler_gradient * Linting and formatting * Add reno * Fix dict typing definition * Fix mypy * Issue deprecation warnings * Update skip test message * Update deprecation warning for qbayesian.py * Update deprecation warning for qbayesian.py * Add headers in deprecation.py * Add headers in deprecation.py * Add headers in deprecation.py * Correct spelling * Add spelling `msg` --------- Co-authored-by: FrancescaSchiav Co-authored-by: M. Emre Sahin <40424147+OkuyanBoga@users.noreply.github.com> Co-authored-by: Declan Millar Co-authored-by: Steve Wood <40241007+woodsp-ibm@users.noreply.github.com> Co-authored-by: oscar-wallis Co-authored-by: Emre Co-authored-by: Oscar <108736468+oscar-wallis@users.noreply.github.com> --- .github/workflows/main.yml | 2 +- .pylintdict | 2 + .../algorithms/inference/qbayesian.py | 67 ++- .../gradients/base/base_estimator_gradient.py | 28 +- .../gradients/base/base_sampler_gradient.py | 22 +- .../lin_comb/lin_comb_estimator_gradient.py | 2 +- .../param_shift_estimator_gradient.py | 85 ++- .../param_shift_sampler_gradient.py | 53 +- .../gradients/spsa/spsa_estimator_gradient.py | 2 +- .../neural_networks/estimator_qnn.py | 113 +++- .../neural_networks/neural_network.py | 6 +- .../neural_networks/sampler_qnn.py | 138 +++-- .../state_fidelities/compute_uncompute.py | 109 +++- qiskit_machine_learning/utils/deprecation.py | 84 +++ ...v2-primitive-support-2cf30f1701c31d0f.yaml | 35 ++ requirements-dev.txt | 1 + setup.py | 38 +- ...imator_qnn.py => test_estimator_qnn_v1.py} | 6 +- test/neural_networks/test_estimator_qnn_v2.py | 569 ++++++++++++++++++ test/neural_networks/test_sampler_qnn.py | 44 +- .../test_compute_uncompute_v2.py | 343 +++++++++++ 21 files changed, 1581 insertions(+), 168 deletions(-) create mode 100644 qiskit_machine_learning/utils/deprecation.py create mode 100644 releasenotes/notes/v2-primitive-support-2cf30f1701c31d0f.yaml rename test/neural_networks/{test_estimator_qnn.py => test_estimator_qnn_v1.py} (99%) create mode 100644 test/neural_networks/test_estimator_qnn_v2.py create mode 100644 test/state_fidelities/test_compute_uncompute_v2.py diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 1e3eeb01f..b4cc1e962 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -312,4 +312,4 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: coveralls --service=github - shell: bash + shell: bash \ No newline at end of file diff --git a/.pylintdict b/.pylintdict index 93892d47d..70e2bb17f 100644 --- a/.pylintdict +++ b/.pylintdict @@ -312,6 +312,7 @@ monte mosca mpl mprev +msg multiclass multinomial multioutput @@ -501,6 +502,7 @@ sparsearray spedalieri spsa sqrt +stacklevel statefn statevector statevectors diff --git a/qiskit_machine_learning/algorithms/inference/qbayesian.py b/qiskit_machine_learning/algorithms/inference/qbayesian.py index 9621ba5e4..2d3736eac 100644 --- a/qiskit_machine_learning/algorithms/inference/qbayesian.py +++ b/qiskit_machine_learning/algorithms/inference/qbayesian.py @@ -15,11 +15,17 @@ import copy from typing import Tuple, Dict, Set, List + from qiskit import QuantumCircuit, ClassicalRegister from qiskit.quantum_info import Statevector -from qiskit.circuit.library import GroverOperator -from qiskit.primitives import BaseSampler, Sampler from qiskit.circuit import Qubit +from qiskit.circuit.library import GroverOperator +from qiskit.primitives import BaseSampler, Sampler, BaseSamplerV2, BaseSamplerV1 +from qiskit.transpiler.passmanager import BasePassManager +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager +from qiskit.providers.fake_provider import GenericBackendV2 + +from ...utils.deprecation import issue_deprecation_msg class QBayesian: @@ -62,7 +68,8 @@ def __init__( *, limit: int = 10, threshold: float = 0.9, - sampler: BaseSampler | None = None, + sampler: BaseSampler | BaseSamplerV2 | None = None, + pass_manager: BasePassManager | None = None, ): """ Args: @@ -83,15 +90,30 @@ def __init__( # Test valid input for qrg in circuit.qregs: if qrg.size > 1: - raise ValueError("Every register needs to be mapped to exactly one unique qubit") + raise ValueError("Every register needs to be mapped to exactly one unique qubit.") + # Initialize parameter self._circ = circuit self._limit = limit self._threshold = threshold if sampler is None: sampler = Sampler() + + if isinstance(sampler, BaseSamplerV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", + ) + self._sampler = sampler + if pass_manager is None: + _backend = GenericBackendV2(num_qubits=max(circuit.num_qubits, 2)) + pass_manager = generate_preset_pass_manager(optimization_level=1, backend=_backend) + self._pass_manager = pass_manager + # Label of register mapped to its qubit self._label2qubit = {qrg.name: qrg[0] for qrg in self._circ.qregs} # Label of register mapped to its qubit index bottom up in significance @@ -139,11 +161,34 @@ def _get_grover_op(self, evidence: Dict[str, int]) -> GroverOperator: def _run_circuit(self, circuit: QuantumCircuit) -> Dict[str, float]: """Run the quantum circuit with the sampler.""" - # Sample from circuit - job = self._sampler.run(circuit) - result = job.result() - # Get the counts of quantum state results - counts = result.quasi_dists[0].nearest_probability_distribution().binary_probabilities() + counts = {} + + if isinstance(self._sampler, BaseSampler): + # Sample from circuit + job = self._sampler.run(circuit) + result = job.result() + + # Get the counts of quantum state results + counts = result.quasi_dists[0].nearest_probability_distribution().binary_probabilities() + + elif isinstance(self._sampler, BaseSamplerV2): + + # Sample from circuit + circuit_isa = self._pass_manager.run(circuit) + job = self._sampler.run([circuit_isa]) + result = job.result() + + bit_array = list(result[0].data.values())[0] + bitstring_counts = bit_array.get_counts() + + # Normalize the counts to probabilities + total_shots = result[0].metadata["shots"] + counts = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + # counts = QuasiDistribution(probabilities) + # counts = {k: v for k, v in counts.items()} + return counts def __power_grover( @@ -360,12 +405,12 @@ def limit(self, limit: int): self._limit = limit @property - def sampler(self) -> BaseSampler: + def sampler(self) -> BaseSampler | BaseSamplerV2: """Returns the sampler primitive used to compute the samples.""" return self._sampler @sampler.setter - def sampler(self, sampler: BaseSampler): + def sampler(self, sampler: BaseSampler | BaseSamplerV2): """Set the sampler primitive used to compute the samples.""" self._sampler = sampler diff --git a/qiskit_machine_learning/gradients/base/base_estimator_gradient.py b/qiskit_machine_learning/gradients/base/base_estimator_gradient.py index edfe80fd0..bb85cd179 100644 --- a/qiskit_machine_learning/gradients/base/base_estimator_gradient.py +++ b/qiskit_machine_learning/gradients/base/base_estimator_gradient.py @@ -23,11 +23,13 @@ import numpy as np from qiskit.circuit import Parameter, ParameterExpression, QuantumCircuit -from qiskit.primitives import BaseEstimator +from qiskit.primitives import BaseEstimator, BaseEstimatorV1 +from qiskit.primitives.base import BaseEstimatorV2 from qiskit.primitives.utils import _circuit_key from qiskit.providers import Options from qiskit.quantum_info.operators.base_operator import BaseOperator from qiskit.transpiler.passes import TranslateParameterizedGates +from qiskit.transpiler.passmanager import BasePassManager from .estimator_gradient_result import EstimatorGradientResult from ..utils import ( @@ -37,7 +39,7 @@ _make_gradient_parameters, _make_gradient_parameter_values, ) - +from ...utils.deprecation import issue_deprecation_msg from ...algorithm_job import AlgorithmJob @@ -46,13 +48,15 @@ class BaseEstimatorGradient(ABC): def __init__( self, - estimator: BaseEstimator, + estimator: BaseEstimator | BaseEstimatorV2, options: Options | None = None, derivative_type: DerivativeType = DerivativeType.REAL, + pass_manager: BasePassManager | None = None, ): r""" Args: estimator: The estimator used to compute the gradients. + pass_manager: pass manager for isa_circuit transpilation. options: Primitive backend runtime options used for circuit execution. The order of priority is: options in ``run`` method > gradient's default options > primitive's default setting. @@ -68,7 +72,15 @@ def __init__( gradient and this type is the only supported type for function-level schemes like finite difference. """ + if isinstance(estimator, BaseEstimatorV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", + ) self._estimator: BaseEstimator = estimator + self._pass_manager = pass_manager self._default_options = Options() if options is not None: self._default_options.update_options(**options) @@ -92,7 +104,7 @@ def run( self, circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter] | None] | None = None, **options, ) -> AlgorithmJob: @@ -157,7 +169,7 @@ def _run( self, circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], **options, ) -> EstimatorGradientResult: @@ -167,7 +179,7 @@ def _run( def _preprocess( self, circuits: Sequence[QuantumCircuit], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], supported_gates: Sequence[str], ) -> tuple[Sequence[QuantumCircuit], Sequence[Sequence[float]], Sequence[Sequence[Parameter]]]: @@ -209,7 +221,7 @@ def _postprocess( self, results: EstimatorGradientResult, circuits: Sequence[QuantumCircuit], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], ) -> EstimatorGradientResult: """Postprocess the gradients. This method computes the gradient of the original circuits @@ -269,7 +281,7 @@ def _postprocess( def _validate_arguments( circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], ) -> None: """Validate the arguments of the ``run`` method. diff --git a/qiskit_machine_learning/gradients/base/base_sampler_gradient.py b/qiskit_machine_learning/gradients/base/base_sampler_gradient.py index 9e29b47ab..3db0c3e31 100644 --- a/qiskit_machine_learning/gradients/base/base_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/base/base_sampler_gradient.py @@ -22,10 +22,11 @@ from copy import copy from qiskit.circuit import Parameter, ParameterExpression, QuantumCircuit -from qiskit.primitives import BaseSampler +from qiskit.primitives import BaseSampler, BaseSamplerV1 from qiskit.primitives.utils import _circuit_key from qiskit.providers import Options from qiskit.transpiler.passes import TranslateParameterizedGates +from qiskit.transpiler.passmanager import BasePassManager from .sampler_gradient_result import SamplerGradientResult from ..utils import ( @@ -34,14 +35,20 @@ _make_gradient_parameters, _make_gradient_parameter_values, ) - +from ...utils.deprecation import issue_deprecation_msg from ...algorithm_job import AlgorithmJob class BaseSamplerGradient(ABC): """Base class for a ``SamplerGradient`` to compute the gradients of the sampling probability.""" - def __init__(self, sampler: BaseSampler, options: Options | None = None): + def __init__( + self, + sampler: BaseSampler, + options: Options | None = None, + len_quasi_dist: int | None = None, + pass_manager: BasePassManager | None = None, + ): """ Args: sampler: The sampler used to compute the gradients. @@ -50,7 +57,16 @@ def __init__(self, sampler: BaseSampler, options: Options | None = None): default options > primitive's default setting. Higher priority setting overrides lower priority setting """ + if isinstance(sampler, BaseSamplerV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", + ) self._sampler: BaseSampler = sampler + self._pass_manager = pass_manager + self._len_quasi_dist = len_quasi_dist self._default_options = Options() if options is not None: self._default_options.update_options(**options) diff --git a/qiskit_machine_learning/gradients/lin_comb/lin_comb_estimator_gradient.py b/qiskit_machine_learning/gradients/lin_comb/lin_comb_estimator_gradient.py index f7787f7e3..e70876a26 100644 --- a/qiskit_machine_learning/gradients/lin_comb/lin_comb_estimator_gradient.py +++ b/qiskit_machine_learning/gradients/lin_comb/lin_comb_estimator_gradient.py @@ -98,7 +98,7 @@ def _run( self, circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], **options, ) -> EstimatorGradientResult: diff --git a/qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py b/qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py index cde25a0fd..8bbe5f051 100644 --- a/qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py +++ b/qiskit_machine_learning/gradients/param_shift/param_shift_estimator_gradient.py @@ -17,14 +17,18 @@ from collections.abc import Sequence +import numpy as np + from qiskit.circuit import Parameter, QuantumCircuit from qiskit.quantum_info.operators.base_operator import BaseOperator +from qiskit.primitives.base import BaseEstimatorV2 +from qiskit.primitives import BaseEstimatorV1 +from qiskit.providers.options import Options from ..base.base_estimator_gradient import BaseEstimatorGradient from ..base.estimator_gradient_result import EstimatorGradientResult from ..utils import _make_param_shift_parameter_values - -from ...exceptions import AlgorithmError +from ...exceptions import QiskitMachineLearningError class ParamShiftEstimatorGradient(BaseEstimatorGradient): @@ -58,7 +62,7 @@ def _run( self, circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], **options, ) -> EstimatorGradientResult: @@ -97,26 +101,59 @@ def _run_unique( job_param_values.extend(param_shift_parameter_values) all_n.append(n) - # Run the single job with all circuits. - job = self._estimator.run( - job_circuits, - job_observables, - job_param_values, - **options, - ) - try: + # Determine how to run the estimator based on its version + if isinstance(self._estimator, BaseEstimatorV1): + # Run the single job with all circuits. + job = self._estimator.run( + job_circuits, + job_observables, + job_param_values, + **options, + ) results = job.result() - except Exception as exc: - raise AlgorithmError("Estimator job failed.") from exc - - # Compute the gradients. - gradients = [] - partial_sum_n = 0 - for n in all_n: - result = results.values[partial_sum_n : partial_sum_n + n] - gradient_ = (result[: n // 2] - result[n // 2 :]) / 2 - gradients.append(gradient_) - partial_sum_n += n - - opt = self._get_local_options(options) + + # Compute the gradients. + gradients = [] + partial_sum_n = 0 + for n in all_n: + result = results.values[partial_sum_n : partial_sum_n + n] + gradient_ = (result[: n // 2] - result[n // 2 :]) / 2 + gradients.append(gradient_) + partial_sum_n += n + + opt = self._get_local_options(options) + + elif isinstance(self._estimator, BaseEstimatorV2): + isa_g_circs = self._pass_manager.run(job_circuits) + isa_g_observables = [ + op.apply_layout(isa_g_circs[i].layout) for i, op in enumerate(job_observables) + ] + # Prepare circuit-observable-parameter tuples (PUBs) + circuit_observable_params = [] + for pub in zip(isa_g_circs, isa_g_observables, job_param_values): + circuit_observable_params.append(pub) + + # For BaseEstimatorV2, run the estimator using PUBs and specified precision + job = self._estimator.run(circuit_observable_params) + results = job.result() + results = np.array([float(r.data.evs) for r in results]) + + # Compute the gradients. + gradients = [] + partial_sum_n = 0 + for n in all_n: + result = results[partial_sum_n : partial_sum_n + n] + gradient_ = (result[: n // 2] - result[n // 2 :]) / 2 + gradients.append(gradient_) + partial_sum_n += n + + opt = Options(**options) + + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseEstimatorV1 and BaseEstimatorV2; got " + + f"{type(self._estimator)} instead. Note that BaseEstimatorV1 is deprecated in" + + "Qiskit and removed in Qiskit IBM Runtime." + ) + return EstimatorGradientResult(gradients=gradients, metadata=metadata, options=opt) diff --git a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py index 0d7f384a8..f327b6453 100644 --- a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py @@ -20,11 +20,14 @@ from qiskit.circuit import Parameter, QuantumCircuit +from qiskit.primitives import BaseSamplerV1 +from qiskit.primitives.base import BaseSamplerV2 +from qiskit.result import QuasiDistribution + from ..base.base_sampler_gradient import BaseSamplerGradient from ..base.sampler_gradient_result import SamplerGradientResult from ..utils import _make_param_shift_parameter_values - -from ...exceptions import AlgorithmError +from ...exceptions import AlgorithmError, QiskitMachineLearningError class ParamShiftSamplerGradient(BaseSamplerGradient): @@ -91,18 +94,57 @@ def _run_unique( all_n.append(n) # Run the single job with all circuits. - job = self._sampler.run(job_circuits, job_param_values, **options) + if isinstance(self._sampler, BaseSamplerV1): + job = self._sampler.run(job_circuits, job_param_values, **options) + elif isinstance(self._sampler, BaseSamplerV2): + if self._pass_manager is None: + raise QiskitMachineLearningError( + "To use ParameterShifSamplerGradient with SamplerV2 you " + + "must pass a gradient with a pass manager" + ) + isa_g_circs = self._pass_manager.run(job_circuits) + circ_params = [ + (isa_g_circs[i], job_param_values[i]) for i in range(len(job_param_values)) + ] + job = self._sampler.run(circ_params) + else: + raise AlgorithmError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; got " + + f"{type(self._sampler)} instead." + ) + try: results = job.result() except Exception as exc: - raise AlgorithmError("Estimator job failed.") from exc + raise AlgorithmError("Sampler job failed.") from exc # Compute the gradients. gradients = [] partial_sum_n = 0 + opt = None # Required by PyLint: possibly-used-before-assignment for n in all_n: gradient = [] - result = results.quasi_dists[partial_sum_n : partial_sum_n + n] + + if isinstance(self._sampler, BaseSamplerV1): + result = results.quasi_dists[partial_sum_n : partial_sum_n + n] + opt = self._get_local_options(options) + + elif isinstance(self._sampler, BaseSamplerV2): + result = [] + for i in range(partial_sum_n, partial_sum_n + n): + bitstring_counts = results[i].data.meas.get_counts() + + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + result.append( + {k: v for k, v in counts.items() if int(k) < self._len_quasi_dist} + ) + opt = options + for dist_plus, dist_minus in zip(result[: n // 2], result[n // 2 :]): grad_dist: dict[int, float] = defaultdict(float) for key, val in dist_plus.items(): @@ -113,5 +155,4 @@ def _run_unique( gradients.append(gradient) partial_sum_n += n - opt = self._get_local_options(options) return SamplerGradientResult(gradients=gradients, metadata=metadata, options=opt) diff --git a/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py b/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py index c0387a201..8f524a0bf 100644 --- a/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py +++ b/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py @@ -75,7 +75,7 @@ def _run( self, circuits: Sequence[QuantumCircuit], observables: Sequence[BaseOperator], - parameter_values: Sequence[Sequence[float]], + parameter_values: Sequence[Sequence[float]] | np.ndarray, parameters: Sequence[Sequence[Parameter]], **options, ) -> EstimatorGradientResult: diff --git a/qiskit_machine_learning/neural_networks/estimator_qnn.py b/qiskit_machine_learning/neural_networks/estimator_qnn.py index f55d82224..36b3d92ce 100644 --- a/qiskit_machine_learning/neural_networks/estimator_qnn.py +++ b/qiskit_machine_learning/neural_networks/estimator_qnn.py @@ -15,14 +15,18 @@ from __future__ import annotations import logging +import warnings from copy import copy from typing import Sequence - import numpy as np + from qiskit.circuit import Parameter, QuantumCircuit -from qiskit.primitives import BaseEstimator, Estimator, EstimatorResult +from qiskit.primitives.base import BaseEstimatorV2 +from qiskit.primitives import BaseEstimator, BaseEstimatorV1, Estimator, EstimatorResult from qiskit.quantum_info import SparsePauliOp from qiskit.quantum_info.operators.base_operator import BaseOperator + + from ..gradients import ( BaseEstimatorGradient, EstimatorGradientResult, @@ -31,7 +35,7 @@ from ..circuit.library import QNNCircuit from ..exceptions import QiskitMachineLearningError - +from ..utils.deprecation import issue_deprecation_msg from .neural_network import NeuralNetwork logger = logging.getLogger(__name__) @@ -64,7 +68,7 @@ class EstimatorQNN(NeuralNetwork): num_qubits = 2 # Using the QNNCircuit: - # Create a parameterized 2 qubit circuit composed of the default ZZFeatureMap feature map + # Create a parametrized 2 qubit circuit composed of the default ZZFeatureMap feature map # and RealAmplitudes ansatz. qnn_qc = QNNCircuit(num_qubits) @@ -105,12 +109,14 @@ def __init__( self, *, circuit: QuantumCircuit, - estimator: BaseEstimator | None = None, + estimator: BaseEstimator | BaseEstimatorV2 | None = None, observables: Sequence[BaseOperator] | BaseOperator | None = None, input_params: Sequence[Parameter] | None = None, weight_params: Sequence[Parameter] | None = None, gradient: BaseEstimatorGradient | None = None, input_gradients: bool = False, + num_virtual_qubits: int | None = None, + default_precision: float = 0.015625, ): r""" Args: @@ -127,12 +133,12 @@ def __init__( input_params: The parameters that correspond to the input data of the network. If ``None``, the input data is not bound to any parameters. If a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `input_params` value here is ignored. Instead the value is taken from the + `input_params` value here is ignored. Instead, the value is taken from the :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` input_parameters. weight_params: The parameters that correspond to the trainable weights. If ``None``, the weights are not bound to any parameters. If a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `weight_params` value here is ignored. Instead the value is taken from the + `weight_params` value here is ignored. Instead, the value is taken from the :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` weight_parameters. gradient: The estimator gradient to be used for the backward pass. If None, a default instance of the estimator gradient, @@ -141,27 +147,64 @@ def __init__( Note that this parameter is ``False`` by default, and must be explicitly set to ``True`` for a proper gradient computation when using :class:`~qiskit_machine_learning.connectors.TorchConnector`. + num_virtual_qubits: Number of virtual qubits. + default_precision: The default precision for the estimator if not specified during run. Raises: QiskitMachineLearningError: Invalid parameter values. """ if estimator is None: estimator = Estimator() + + if isinstance(estimator, BaseEstimatorV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", + ) self.estimator = estimator self._org_circuit = circuit + + if num_virtual_qubits is None: + self.num_virtual_qubits = circuit.num_qubits + warnings.warn( + f"No number of qubits was not specified ({num_virtual_qubits}) and was retrieved from " + + f"`circuit` ({self.num_virtual_qubits:d}). If `circuit` is transpiled, this may cause " + + "unstable behaviour.", + UserWarning, + stacklevel=2, + ) + else: + self.num_virtual_qubits = num_virtual_qubits + if observables is None: - observables = SparsePauliOp.from_list([("Z" * circuit.num_qubits, 1)]) + observables = SparsePauliOp.from_sparse_list( + [("Z" * self.num_virtual_qubits, range(self.num_virtual_qubits), 1)], + num_qubits=self.circuit.num_qubits, + ) + if isinstance(observables, BaseOperator): observables = (observables,) + self._observables = observables + if isinstance(circuit, QNNCircuit): self._input_params = list(circuit.input_parameters) self._weight_params = list(circuit.weight_parameters) else: self._input_params = list(input_params) if input_params is not None else [] self._weight_params = list(weight_params) if weight_params is not None else [] + if gradient is None: + if isinstance(self.estimator, BaseEstimatorV2): + raise QiskitMachineLearningError( + "Please provide a gradient with pass manager initialised." + ) + gradient = ParamShiftEstimatorGradient(self.estimator) + + self._default_precision = default_precision self.gradient = gradient self._input_gradients = input_gradients @@ -198,7 +241,7 @@ def weight_params(self) -> Sequence[Parameter] | None: @property def input_gradients(self) -> bool: """Returns whether gradients with respect to input data are computed by this neural network - in the ``backward`` method or not. By default such gradients are not computed.""" + in the ``backward`` method or not. By default, such gradients are not computed.""" return self._input_gradients @input_gradients.setter @@ -206,25 +249,46 @@ def input_gradients(self, input_gradients: bool) -> None: """Turn on/off computation of gradients with respect to input data.""" self._input_gradients = input_gradients + @property + def default_precision(self) -> float: + """Return the default precision""" + return self._default_precision + def _forward_postprocess(self, num_samples: int, result: EstimatorResult) -> np.ndarray: """Post-processing during forward pass of the network.""" - return np.reshape(result.values, (-1, num_samples)).T + return np.reshape(result, (-1, num_samples)).T def _forward( self, input_data: np.ndarray | None, weights: np.ndarray | None ) -> np.ndarray | None: """Forward pass of the neural network.""" parameter_values_, num_samples = self._preprocess_forward(input_data, weights) - job = self.estimator.run( - [self._circuit] * num_samples * self.output_shape[0], - [op for op in self._observables for _ in range(num_samples)], - np.tile(parameter_values_, (self.output_shape[0], 1)), - ) - try: - results = job.result() - except Exception as exc: - raise QiskitMachineLearningError("Estimator job failed.") from exc + # Determine how to run the estimator based on its version + if isinstance(self.estimator, BaseEstimatorV1): + job = self.estimator.run( + [self._circuit] * num_samples * self.output_shape[0], + [op for op in self._observables for _ in range(num_samples)], + np.tile(parameter_values_, (self.output_shape[0], 1)), + ) + results = job.result().values + + elif isinstance(self.estimator, BaseEstimatorV2): + + # Prepare circuit-observable-parameter tuples (PUBs) + circuit_observable_params = [] + for observable in self._observables: + circuit_observable_params.append((self._circuit, observable, parameter_values_)) + + # For BaseEstimatorV2, run the estimator using PUBs and specified precision + job = self.estimator.run(circuit_observable_params, precision=self._default_precision) + results = [result.data.evs for result in job.result()] + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseEstimatorV1 and BaseEstimatorV2; got " + + f"{type(self.estimator)} instead. Note that BaseEstimatorV1 is deprecated in" + + "Qiskit and removed in Qiskit IBM Runtime." + ) return self._forward_postprocess(num_samples, results) def _backward_postprocess( @@ -269,19 +333,20 @@ def _backward( param_values = np.tile(parameter_values, (num_observables, 1)) job = None + if self._input_gradients: - job = self.gradient.run(circuits, observables, param_values) # type: ignore[arg-type] + + job = self.gradient.run(circuits, observables, param_values) + elif len(parameter_values[0]) > self._num_inputs: params = [self._circuit.parameters[self._num_inputs :]] * num_circuits - job = self.gradient.run( - circuits, observables, param_values, parameters=params # type: ignore[arg-type] - ) + job = self.gradient.run(circuits, observables, param_values, parameters=params) if job is not None: try: results = job.result() except Exception as exc: - raise QiskitMachineLearningError("Estimator job failed.") from exc + raise QiskitMachineLearningError(f"Estimator job failed. {exc}") from exc input_grad, weights_grad = self._backward_postprocess(num_samples, results) diff --git a/qiskit_machine_learning/neural_networks/neural_network.py b/qiskit_machine_learning/neural_networks/neural_network.py index e75858d38..3f0e14c9c 100644 --- a/qiskit_machine_learning/neural_networks/neural_network.py +++ b/qiskit_machine_learning/neural_networks/neural_network.py @@ -293,9 +293,9 @@ def _reparameterize_circuit( if len(parameters) != (self.num_inputs + self.num_weights): raise ValueError( - f"Number of circuit parameters {len(parameters)}" - f" mismatch with sum of num inputs and weights" - f" {self.num_inputs + self.num_weights}" + f"Number of circuit parameters ({len(parameters)})" + f" does not match the sum of number of inputs and weights" + f" ({self.num_inputs + self.num_weights})." ) new_input_params = ParameterVector("inputs", self.num_inputs) diff --git a/qiskit_machine_learning/neural_networks/sampler_qnn.py b/qiskit_machine_learning/neural_networks/sampler_qnn.py index 6982d2e87..bb5ca4023 100644 --- a/qiskit_machine_learning/neural_networks/sampler_qnn.py +++ b/qiskit_machine_learning/neural_networks/sampler_qnn.py @@ -14,17 +14,19 @@ from __future__ import annotations import logging - from numbers import Integral from typing import Callable, cast, Iterable, Sequence - import numpy as np +from qiskit.primitives import BaseSamplerV1 +from qiskit.primitives.base import BaseSamplerV2 + from qiskit.circuit import Parameter, QuantumCircuit from qiskit.primitives import BaseSampler, SamplerResult, Sampler +from qiskit.result import QuasiDistribution import qiskit_machine_learning.optionals as _optionals -from .neural_network import NeuralNetwork + from ..gradients import ( BaseSamplerGradient, ParamShiftSamplerGradient, @@ -32,6 +34,8 @@ ) from ..circuit.library import QNNCircuit from ..exceptions import QiskitMachineLearningError +from ..utils.deprecation import issue_deprecation_msg +from .neural_network import NeuralNetwork if _optionals.HAS_SPARSE: @@ -128,6 +132,7 @@ def __init__( self, *, circuit: QuantumCircuit, + num_virtual_qubits: int | None = None, sampler: BaseSampler | None = None, input_params: Sequence[Parameter] | None = None, weight_params: Sequence[Parameter] | None = None, @@ -138,50 +143,53 @@ def __init__( input_gradients: bool = False, ): """ - Args: - sampler: The sampler primitive used to compute the neural network's results. - If ``None`` is given, a default instance of the reference sampler defined - by :class:`~qiskit.primitives.Sampler` will be used. - circuit: The parametrized quantum circuit that generates the samples of this network. - If a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is passed, the - `input_params` and `weight_params` do not have to be provided, because these two - properties are taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit`. - input_params: The parameters of the circuit corresponding to the input. If a - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `input_params` value here is ignored. Instead the value is taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` input_parameters. - weight_params: The parameters of the circuit corresponding to the trainable weights. If - a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `weight_params` value here is ignored. Instead the value is taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` weight_parameters. - sparse: Returns whether the output is sparse or not. - interpret: A callable that maps the measured integer to another unsigned integer or - tuple of unsigned integers. These are used as new indices for the (potentially - sparse) output array. If no interpret function is - passed, then an identity function will be used by this neural network. - output_shape: The output shape of the custom interpretation. It is ignored if no custom - interpret method is provided where the shape is taken to be - ``2^circuit.num_qubits``. - gradient: An optional sampler gradient to be used for the backward pass. - If ``None`` is given, a default instance of - :class:`~qiskit_machine_learning.gradients.ParamShiftSamplerGradient` will be used. - input_gradients: Determines whether to compute gradients with respect to input data. - Note that this parameter is ``False`` by default, and must be explicitly set to - ``True`` for a proper gradient computation when using - :class:`~qiskit_machine_learning.connectors.TorchConnector`. - Raises: - QiskitMachineLearningError: Invalid parameter values. + Args: sampler: The sampler primitive used to compute the neural network's results. If + ``None`` is given, a default instance of the reference sampler defined by + :class:`~qiskit.primitives.Sampler` will be used. circuit: The parametrized quantum + circuit that generates the samples of this network. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is passed, + the `input_params` and `weight_params` do not have to be provided, because these two + properties are taken from the :class:`~qiskit_machine_learning.circuit.library.QNNCircuit + `. input_params: The parameters of the circuit corresponding to the input. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the + `input_params` value here is ignored. Instead, the value is taken from the + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` input_parameters. + weight_params: The parameters of the circuit corresponding to the trainable weights. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the + `weight_params` value here is ignored. Instead, the value is taken from the + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` weight_parameters. sparse: + Returns whether the output is sparse or not. interpret: A callable that maps the measured + integer to another unsigned integer or tuple of unsigned integers. These are used as new + indices for the (potentially sparse) output array. If no interpret function is passed, + then an identity function will be used by this neural network. output_shape: The output + shape of the custom interpretation. For SamplerV1, it is ignored if no custom interpret + method is provided where the shape is taken to be ``2^circuit.num_qubits``. gradient: An + optional sampler gradient to be used for the backward pass. If ``None`` is given, + a default instance of + :class:`~qiskit_machine_learning.gradients.ParamShiftSamplerGradient` will be used. + input_gradients: Determines whether to compute gradients with respect to input data. Note + that this parameter is ``False`` by default, and must be explicitly set to ``True`` for a + proper gradient computation when using + :class:`~qiskit_machine_learning.connectors.TorchConnector`. Raises: + QiskitMachineLearningError: Invalid parameter values. """ # set primitive, provide default if sampler is None: sampler = Sampler() + + if isinstance(sampler, BaseSamplerV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", + ) self.sampler = sampler - # set gradient - if gradient is None: - gradient = ParamShiftSamplerGradient(self.sampler) - self.gradient = gradient + if num_virtual_qubits is None: + # print statement + num_virtual_qubits = circuit.num_qubits + self.num_virtual_qubits = num_virtual_qubits self._org_circuit = circuit @@ -196,6 +204,12 @@ def __init__( _optionals.HAS_SPARSE.require_now("DOK") self.set_interpret(interpret, output_shape) + + # set gradient + if gradient is None: + gradient = ParamShiftSamplerGradient(sampler=self.sampler) + self.gradient = gradient + self._input_gradients = input_gradients super().__init__( @@ -276,10 +290,9 @@ def _compute_output_shape( # Warn user that output_shape parameter will be ignored logger.warning( "No interpret function given, output_shape will be automatically " - "determined as 2^num_qubits." + "determined as 2^num_virtual_qubits." ) - output_shape_ = (2**self.circuit.num_qubits,) - + output_shape_ = (2**self.num_virtual_qubits,) return output_shape_ def _postprocess(self, num_samples: int, result: SamplerResult) -> np.ndarray | SparseArray: @@ -296,8 +309,24 @@ def _postprocess(self, num_samples: int, result: SamplerResult) -> np.ndarray | prob = np.zeros((num_samples, *self._output_shape)) for i in range(num_samples): - counts = result.quasi_dists[i] + if isinstance(self.sampler, BaseSamplerV1): + counts = result.quasi_dists[i] + + elif isinstance(self.sampler, BaseSamplerV2): + bitstring_counts = result[i].data.meas.get_counts() + + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + counts = {k: v for k, v in counts.items() if int(k) < 2**self.num_virtual_qubits} + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; " + + f"got {type(self.sampler)} instead." + ) # evaluate probabilities for b, v in counts.items(): key = self._interpret(b) @@ -387,14 +416,22 @@ def _forward( """ parameter_values, num_samples = self._preprocess_forward(input_data, weights) - # sampler allows batching - job = self.sampler.run([self._circuit] * num_samples, parameter_values) + if isinstance(self.sampler, BaseSamplerV1): + job = self.sampler.run([self._circuit] * num_samples, parameter_values) + elif isinstance(self.sampler, BaseSamplerV2): + job = self.sampler.run( + [(self._circuit, parameter_values[i]) for i in range(num_samples)] + ) + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; " + + f"got {type(self.sampler)} instead." + ) try: results = job.result() except Exception as exc: - raise QiskitMachineLearningError("Sampler job failed.") from exc + raise QiskitMachineLearningError(f"Sampler job failed: {exc}") from exc result = self._postprocess(num_samples, results) - return result def _backward( @@ -410,7 +447,6 @@ def _backward( if np.prod(parameter_values.shape) > 0: circuits = [self._circuit] * num_samples - job = None if self._input_gradients: job = self.gradient.run(circuits, parameter_values) # type: ignore[arg-type] @@ -424,7 +460,7 @@ def _backward( try: results = job.result() except Exception as exc: - raise QiskitMachineLearningError("Sampler job failed.") from exc + raise QiskitMachineLearningError(f"Sampler job failed: {exc}") from exc input_grad, weights_grad = self._postprocess_gradient(num_samples, results) diff --git a/qiskit_machine_learning/state_fidelities/compute_uncompute.py b/qiskit_machine_learning/state_fidelities/compute_uncompute.py index 3453b2081..03a9d7354 100644 --- a/qiskit_machine_learning/state_fidelities/compute_uncompute.py +++ b/qiskit_machine_learning/state_fidelities/compute_uncompute.py @@ -18,11 +18,15 @@ from copy import copy from qiskit import QuantumCircuit -from qiskit.primitives import BaseSampler +from qiskit.primitives import BaseSampler, BaseSamplerV1, SamplerResult, StatevectorSampler +from qiskit.primitives.base import BaseSamplerV2 +from qiskit.transpiler.passmanager import PassManager +from qiskit.result import QuasiDistribution from qiskit.primitives.primitive_job import PrimitiveJob from qiskit.providers import Options -from ..exceptions import AlgorithmError +from ..exceptions import AlgorithmError, QiskitMachineLearningError +from ..utils.deprecation import issue_deprecation_msg from .base_state_fidelity import BaseStateFidelity from .state_fidelity_result import StateFidelityResult from ..algorithm_job import AlgorithmJob @@ -53,7 +57,10 @@ class ComputeUncompute(BaseStateFidelity): def __init__( self, - sampler: BaseSampler, + sampler: BaseSampler | BaseSamplerV2, + *, + num_virtual_qubits: int | None = None, + pass_manager: PassManager | None = None, options: Options | None = None, local: bool = False, ) -> None: @@ -79,11 +86,31 @@ def __init__( Raises: ValueError: If the sampler is not an instance of ``BaseSampler``. """ - if not isinstance(sampler, BaseSampler): + if (not isinstance(sampler, BaseSampler)) and (not isinstance(sampler, BaseSamplerV2)): raise ValueError( - f"The sampler should be an instance of BaseSampler, " f"but got {type(sampler)}" + f"The sampler should be an instance of BaseSampler or BaseSamplerV2, " + f"but got {type(sampler)}" + ) + if ( + isinstance(sampler, BaseSamplerV2) + and (pass_manager is None) + and not isinstance(sampler, StatevectorSampler) + ): + raise ValueError(f"A pass_manager should be provided for {type(sampler)}.") + if (pass_manager is not None) and (num_virtual_qubits is None): + raise ValueError( + f"Number of virtual qubits should be provided for {type(pass_manager)}." + ) + if isinstance(sampler, BaseSamplerV1): + issue_deprecation_msg( + msg="V1 Primitives are deprecated", + version="0.8.0", + remedy="Use V2 primitives for continued compatibility and support.", + period="4 months", ) self._sampler: BaseSampler = sampler + self.num_virtual_qubits = num_virtual_qubits + self.pass_manager = pass_manager self._local = local self._default_options = Options() if options is not None: @@ -111,6 +138,8 @@ def create_fidelity_circuit( circuit = circuit_1.compose(circuit_2.inverse()) circuit.measure_all() + if self.pass_manager is not None: + circuit = self.pass_manager.run(circuit) return circuit def _run( @@ -157,28 +186,67 @@ def _run( opts = copy(self._default_options) opts.update_options(**options) - sampler_job = self._sampler.run(circuits=circuits, parameter_values=values, **opts.__dict__) - - local_opts = self._get_local_options(opts.__dict__) - return AlgorithmJob(ComputeUncompute._call, sampler_job, circuits, self._local, local_opts) + if isinstance(self._sampler, BaseSamplerV1): + sampler_job = self._sampler.run( + circuits=circuits, parameter_values=values, **opts.__dict__ + ) + local_opts = self._get_local_options(opts.__dict__) + elif isinstance(self._sampler, BaseSamplerV2): + sampler_job = self._sampler.run( + [(circuits[i], values[i]) for i in range(len(circuits))], **opts.__dict__ + ) + local_opts = opts.__dict__ + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; got" + + f" {type(self._sampler)} instead." + ) + return AlgorithmJob( + ComputeUncompute._call, + sampler_job, + circuits, + self._local, + local_opts, + self._sampler, + self._post_process_v2, + self.num_virtual_qubits, + ) @staticmethod def _call( - job: PrimitiveJob, circuits: Sequence[QuantumCircuit], local: bool, local_opts: Options + job: PrimitiveJob, + circuits: Sequence[QuantumCircuit], + local: bool, + local_opts: Options = None, + _sampler=None, + _post_process_v2=None, + num_virtual_qubits=None, ) -> StateFidelityResult: try: result = job.result() except Exception as exc: raise AlgorithmError("Sampler job failed!") from exc + if isinstance(_sampler, BaseSamplerV1): + quasi_dists = result.quasi_dists + elif isinstance(_sampler, BaseSamplerV2): + quasi_dists = _post_process_v2(result) + if local: raw_fidelities = [ - ComputeUncompute._get_local_fidelity(prob_dist, circuit.num_qubits) - for prob_dist, circuit in zip(result.quasi_dists, circuits) + ComputeUncompute._get_local_fidelity( + prob_dist, + ( + num_virtual_qubits + if isinstance(_sampler, BaseSamplerV2) + else circuit.num_qubits + ), + ) + for prob_dist, circuit in zip(quasi_dists, circuits) ] else: raw_fidelities = [ - ComputeUncompute._get_global_fidelity(prob_dist) for prob_dist in result.quasi_dists + ComputeUncompute._get_global_fidelity(prob_dist) for prob_dist in quasi_dists ] fidelities = ComputeUncompute._truncate_fidelities(raw_fidelities) @@ -225,6 +293,21 @@ def _get_local_options(self, options: Options) -> Options: opts.update_options(**options) return opts + def _post_process_v2(self, result: SamplerResult): + quasis = [] + for i in range(len(result)): + bitstring_counts = result[i].data.meas.get_counts() + + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + quasi_probs = {k: v for k, v in counts.items() if int(k) < 2**self.num_virtual_qubits} + quasis.append(quasi_probs) + return quasis + @staticmethod def _get_global_fidelity(probability_distribution: dict[int, float]) -> float: """Process the probability distribution of a measurement to determine the diff --git a/qiskit_machine_learning/utils/deprecation.py b/qiskit_machine_learning/utils/deprecation.py new file mode 100644 index 000000000..14fb89ecb --- /dev/null +++ b/qiskit_machine_learning/utils/deprecation.py @@ -0,0 +1,84 @@ +# This code is part of a Qiskit project. +# +# (C) Copyright IBM 2024, 2024. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. +"""Deprecation utilities""" + +from typing import Callable, Any +import functools +import warnings + + +def deprecate_function(deprecated: str, version: str, remedy: str, stacklevel: int = 2) -> Callable: + """Emit a warning prior to calling decorated function. + Args: + deprecated: Function being deprecated. + version: First release the function is deprecated. + remedy: User action to take. + stacklevel: The warning stack-level to use. + + Returns: + The decorated, deprecated callable. + """ + + def decorator(func: Callable) -> Callable: + """Emit a deprecation warning.""" + + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Callable: + """Emit a deprecation warning.""" + issue_deprecation_msg( + f"The {deprecated} method is deprecated", + version, + remedy, + stacklevel + 1, + ) + return func(*args, **kwargs) + + return wrapper + + return decorator + + +def deprecate_arguments(deprecated: str, version: str, remedy: str, stacklevel: int = 2) -> None: + """Emit a warning about deprecated keyword arguments. + + Args: + deprecated: Keyword arguments being deprecated. + version: First release the function is deprecated. + remedy: User action to take. + stacklevel: The warning stack-level to use. + """ + issue_deprecation_msg( + f"The '{deprecated}' keyword arguments are deprecated", + version, + remedy, + stacklevel + 1, + ) + + +def issue_deprecation_msg( + msg: str, version: str, remedy: str, stacklevel: int = 2, period: str = "3 months" +) -> None: + """Emit a deprecation warning. + + Args: + msg: Deprecation message. + version: First release the function is deprecated. + remedy: User action to take. + stacklevel: The warning stack-level to use. + period: Deprecation period. + """ + warnings.warn( + f"{msg} as of qiskit-machine-learning {version} " + f"and will be removed no sooner than {period} after the release date. {remedy}", + DeprecationWarning, + stacklevel=stacklevel + 1, # Increment to account for this function. + ) diff --git a/releasenotes/notes/v2-primitive-support-2cf30f1701c31d0f.yaml b/releasenotes/notes/v2-primitive-support-2cf30f1701c31d0f.yaml new file mode 100644 index 000000000..17d00b5aa --- /dev/null +++ b/releasenotes/notes/v2-primitive-support-2cf30f1701c31d0f.yaml @@ -0,0 +1,35 @@ +--- +features: + - | + **Support for V2 Primitives**: + The `EstimatorQNN` and `SamplerQNN` classes now support `V2` primitives + (`EstimatorV2` and `SamplerV2`), allowing direct execution on IBM Quantum backends. + This enhancement ensures compatibility with Qiskit IBM Runtime’s Primitive Unified + Block (PUB) requirements and instruction set architecture (ISA) constraints for + circuits and observables. Users can switch between `V1` primitives + and `V2` primitives from version `0.8`. From version `0.9`, V1 primitives will be + removed. + +upgrade: + - | + Users working with real backends are advised to migrate to `V2` primitives + (`EstimatorV2` and `SamplerV2`) to ensure compatibility with Qiskit IBM Runtime + hardware requirements. These `V2` primitives will become the standard in + the `0.8` release going forward, while `V1` primitives are deprecated. + +deprecations: + - | + **Deprecated V1 Primitives**: + The `V1` primitives (e.g., `EstimatorV1` and `SamplerV1`) are no longer compatible + with real quantum backends via Qiskit IBM Runtime. This update provides initial + transitional support, but `V1` primitives may be fully deprecated and removed in + version `0.9`. Users should adopt `V2` primitives for both local and hardware + executions to ensure long-term compatibility. + +known_issues: + - | + **Optimizer compatibility may be unstable**: + Current implementations of `EstimatorQNN` and `SamplerQNN` using `V2` primitives + may require further testing with optimizers, especially those depending on gradient + calculations. Users are advised to use optimizers with caution and report any + issues related to optimizer compatibility in Qiskit Machine Learning’s issue tracker. diff --git a/requirements-dev.txt b/requirements-dev.txt index bdfa45cba..6a56691fc 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -17,3 +17,4 @@ mypy>=0.981 mypy-extensions>=0.4.3 nbsphinx qiskit_sphinx_theme~=1.16.0 +qiskit-ibm-runtime>=0.21 diff --git a/setup.py b/setup.py index d7caaa4be..ea5e885e4 100644 --- a/setup.py +++ b/setup.py @@ -16,12 +16,16 @@ import os import re -with open('requirements.txt') as f: +with open("requirements.txt") as f: REQUIREMENTS = f.read().splitlines() -if not hasattr(setuptools, 'find_namespace_packages') or not inspect.ismethod(setuptools.find_namespace_packages): - print("Your setuptools version:'{}' does not support PEP 420 (find_namespace_packages). " - "Upgrade it to version >='40.1.0' and repeat install.".format(setuptools.__version__)) +if not hasattr(setuptools, "find_namespace_packages") or not inspect.ismethod( + setuptools.find_namespace_packages +): + print( + "Your setuptools version:'{}' does not support PEP 420 (find_namespace_packages). " + "Upgrade it to version >='40.1.0' and repeat install.".format(setuptools.__version__) + ) sys.exit(1) VERSION_PATH = os.path.join(os.path.dirname(__file__), "qiskit_machine_learning", "VERSION.txt") @@ -39,15 +43,15 @@ ) setuptools.setup( - name='qiskit-machine-learning', + name="qiskit-machine-learning", version=VERSION, - description='Qiskit Machine Learning: A library of quantum computing machine learning experiments', + description="Qiskit Machine Learning: A library of quantum computing machine learning experiments", long_description=README, long_description_content_type="text/markdown", - url='https://github.com/qiskit-community/qiskit-machine-learning', - author='Qiskit Machine Learning Development Team', - author_email='qiskit@us.ibm.com', - license='Apache-2.0', + url="https://github.com/qiskit-community/qiskit-machine-learning", + author="Qiskit Machine Learning Development Team", + author_email="qiskit@us.ibm.com", + license="Apache-2.0", classifiers=[ "Environment :: Console", "License :: OSI Approved :: Apache Software License", @@ -61,21 +65,23 @@ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", - "Topic :: Scientific/Engineering" + "Topic :: Scientific/Engineering", ], - keywords='qiskit sdk quantum machine learning ml', - packages=setuptools.find_packages(include=['qiskit_machine_learning','qiskit_machine_learning.*']), + keywords="qiskit sdk quantum machine learning ml", + packages=setuptools.find_packages( + include=["qiskit_machine_learning", "qiskit_machine_learning.*"] + ), install_requires=REQUIREMENTS, include_package_data=True, python_requires=">=3.9", extras_require={ - 'torch': ["torch"], - 'sparse': ["sparse"], + "torch": ["torch"], + "sparse": ["sparse"], }, project_urls={ "Bug Tracker": "https://github.com/qiskit-community/qiskit-machine-learning/issues", "Documentation": "https://qiskit-community.github.io/qiskit-machine-learning/", "Source Code": "https://github.com/qiskit-community/qiskit-machine-learning", }, - zip_safe=False + zip_safe=False, ) diff --git a/test/neural_networks/test_estimator_qnn.py b/test/neural_networks/test_estimator_qnn_v1.py similarity index 99% rename from test/neural_networks/test_estimator_qnn.py rename to test/neural_networks/test_estimator_qnn_v1.py index 566329f27..483eaf0c1 100644 --- a/test/neural_networks/test_estimator_qnn.py +++ b/test/neural_networks/test_estimator_qnn_v1.py @@ -20,9 +20,10 @@ from qiskit.circuit import Parameter, QuantumCircuit from qiskit.circuit.library import ZZFeatureMap, RealAmplitudes, ZFeatureMap from qiskit.quantum_info import SparsePauliOp -from qiskit_machine_learning.circuit.library import QNNCircuit +from qiskit_machine_learning.circuit.library import QNNCircuit from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN +from qiskit_machine_learning.utils import algorithm_globals CASE_DATA = { "shape_1_1": { @@ -178,6 +179,7 @@ def _test_network_passes( estimator_qnn, case_data, ): + algorithm_globals.random_seed = 52 test_data = case_data["test_data"] weights = case_data["weights"] correct_forwards = case_data["correct_forwards"] @@ -407,7 +409,7 @@ def test_setters_getters(self): estimator_qnn.input_gradients = True self.assertTrue(estimator_qnn.input_gradients) - def test_qnn_qc_circui_construction(self): + def test_qnn_qc_circuit_construction(self): """Test Estimator QNN properties and forward/backward pass for QNNCircuit construction""" num_qubits = 2 feature_map = ZZFeatureMap(feature_dimension=num_qubits) diff --git a/test/neural_networks/test_estimator_qnn_v2.py b/test/neural_networks/test_estimator_qnn_v2.py new file mode 100644 index 000000000..b8fad6557 --- /dev/null +++ b/test/neural_networks/test_estimator_qnn_v2.py @@ -0,0 +1,569 @@ +# This code is part of a Qiskit project. +# +# (C) Copyright IBM 2022, 2024. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +""" Test EstimatorQNN """ + +import unittest + +from test import QiskitMachineLearningTestCase + +import numpy as np + +from qiskit.circuit import Parameter, QuantumCircuit +from qiskit.circuit.library import ZZFeatureMap, RealAmplitudes, ZFeatureMap +from qiskit.quantum_info import SparsePauliOp +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager +from qiskit_ibm_runtime import Session, EstimatorV2 + +from qiskit_machine_learning.circuit.library import QNNCircuit +from qiskit_machine_learning.neural_networks.estimator_qnn import EstimatorQNN +from qiskit_machine_learning.utils import algorithm_globals + +from qiskit_machine_learning.gradients import ParamShiftEstimatorGradient + +algorithm_globals.random_seed = 52 + +CASE_DATA = { + "shape_1_1": { + "test_data": [1, [1], [[1], [2]], [[[1], [2]], [[3], [4]]]], + "weights": [1], + "correct_forwards": [ + [[0.08565359]], + [[0.08565359]], + [[0.08565359], [-0.90744233]], + [[[0.08565359], [-0.90744233]], [[-1.06623996], [-0.24474149]]], + ], + "correct_weight_backwards": [ + [[[0.70807342]]], + [[[0.70807342]]], + [[[0.70807342]], [[0.7651474]]], + [[[[0.70807342]], [[0.7651474]]], [[[0.11874839]], [[-0.63682734]]]], + ], + "correct_input_backwards": [ + [[[-1.13339757]]], + [[[-1.13339757]]], + [[[-1.13339757]], [[-0.68445233]]], + [[[[-1.13339757]], [[-0.68445233]]], [[[0.39377522]], [[1.10996765]]]], + ], + }, + "shape_2_1": { + "test_data": [[1, 2], [[1, 2]], [[1, 2], [3, 4]]], + "weights": [1, 2], + "correct_forwards": [ + [[0.41256026]], + [[0.41256026]], + [[0.41256026], [0.72848859]], + ], + "correct_weight_backwards": [ + [[[0.12262287, -0.17203964]]], + [[[0.12262287, -0.17203964]]], + [[[0.12262287, -0.17203964]], [[0.03230095, -0.04531817]]], + ], + "correct_input_backwards": [ + [[[-0.81570272, -0.39688474]]], + [[[-0.81570272, -0.39688474]]], + [[[-0.81570272, -0.39688474]], [[0.25229775, 0.67111573]]], + ], + }, + "shape_1_2": { + "test_data": [ + [1], + [[1], [2]], + [[[1], [2]], [[3], [4]]], + ], + "weights": [1], + "correct_forwards": [ + [[0.08565359, 0.17130718]], + [[0.08565359, 0.17130718], [-0.90744233, -1.81488467]], + [ + [[0.08565359, 0.17130718], [-0.90744233, -1.81488467]], + [[-1.06623996, -2.13247992], [-0.24474149, -0.48948298]], + ], + ], + "correct_weight_backwards": [ + [[[0.70807342], [1.41614684]]], + [[[0.70807342], [1.41614684]], [[0.7651474], [1.5302948]]], + [ + [[[0.70807342], [1.41614684]], [[0.7651474], [1.5302948]]], + [[[0.11874839], [0.23749678]], [[-0.63682734], [-1.27365468]]], + ], + ], + "correct_input_backwards": [ + [[[-1.13339757], [-2.26679513]]], + [[[-1.13339757], [-2.26679513]], [[-0.68445233], [-1.36890466]]], + [ + [[[-1.13339757], [-2.26679513]], [[-0.68445233], [-1.36890466]]], + [[[0.39377522], [0.78755044]], [[1.10996765], [2.2199353]]], + ], + ], + }, + "shape_2_2": { + "test_data": [[1, 2], [[1, 2], [3, 4]]], + "weights": [1, 2], + "correct_forwards": [ + [[-0.07873524, 0.4912955]], + [[-0.07873524, 0.4912955], [-0.0207402, 0.74922879]], + ], + "correct_weight_backwards": [ + [[[0.12262287, -0.17203964], [0, 0]]], + [[[0.12262287, -0.17203964], [0, 0]], [[0.03230095, -0.04531817], [0, 0]]], + ], + "correct_input_backwards": [ + [[[-0.05055532, -0.17203964], [-0.7651474, -0.2248451]]], + [ + [[-0.05055532, -0.17203964], [-0.7651474, -0.2248451]], + [[0.14549777, 0.02401345], [0.10679997, 0.64710228]], + ], + ], + }, + "no_input_parameters": { + "test_data": [None], + "weights": [1, 1], + "correct_forwards": [[[0.08565359]]], + "correct_weight_backwards": [[[[-1.13339757, 0.70807342]]]], + "correct_input_backwards": [None], + }, + "no_weight_parameters": { + "test_data": [[1, 1]], + "weights": None, + "correct_forwards": [[[0.08565359]]], + "correct_weight_backwards": [None], + "correct_input_backwards": [[[[-1.13339757, 0.70807342]]]], + }, + "no_parameters": { + "test_data": [None], + "weights": None, + "correct_forwards": [[[1]]], + "correct_weight_backwards": [None], + "correct_input_backwards": [None], + }, + "default_observables": { + "test_data": [[[1], [2]]], + "weights": [1], + "correct_forwards": [[[-0.45464871], [-0.4912955]]], + "correct_weight_backwards": [[[[0.70807342]], [[0.7651474]]]], + "correct_input_backwards": [[[[-0.29192658]], [[0.2248451]]]], + }, + "single_observable": { + "test_data": [1, [1], [[1], [2]], [[[1], [2]], [[3], [4]]]], + "weights": [1], + "correct_forwards": [ + [[0.08565359]], + [[0.08565359]], + [[0.08565359], [-0.90744233]], + [[[0.08565359], [-0.90744233]], [[-1.06623996], [-0.24474149]]], + ], + "correct_weight_backwards": [ + [[[0.70807342]]], + [[[0.70807342]]], + [[[0.70807342]], [[0.7651474]]], + [[[[0.70807342]], [[0.7651474]]], [[[0.11874839]], [[-0.63682734]]]], + ], + "correct_input_backwards": [ + [[[-1.13339757]]], + [[[-1.13339757]]], + [[[-1.13339757]], [[-0.68445233]]], + [[[[-1.13339757]], [[-0.68445233]]], [[[0.39377522]], [[1.10996765]]]], + ], + }, +} + + +class TestEstimatorQNNV2(QiskitMachineLearningTestCase): + """EstimatorQNN Tests for estimator_v2. The correct references is obtained from EstimatorQNN""" + + tolerance: dict[str, float] = dict(atol=3 * 1.0e-1, rtol=3 * 1.0e-1) + backend = GenericBackendV2(num_qubits=2, seed=123) + session = Session(backend=backend) + + def __init__( + self, + TestCase, + ): + self.estimator = EstimatorV2(mode=self.session, options={"default_shots": 1e3}) + self.pm = generate_preset_pass_manager(backend=self.backend, optimization_level=0) + self.gradient = ParamShiftEstimatorGradient(estimator=self.estimator, pass_manager=self.pm) + super().__init__(TestCase) + + def _test_network_passes( + self, + estimator_qnn, + case_data, + ): + test_data = case_data["test_data"] + weights = case_data["weights"] + correct_forwards = case_data["correct_forwards"] + correct_weight_backwards = case_data["correct_weight_backwards"] + correct_input_backwards = case_data["correct_input_backwards"] + + # test forward pass + with self.subTest("forward pass"): + for i, inputs in enumerate(test_data): + forward = estimator_qnn.forward(inputs, weights) + np.testing.assert_allclose(forward, correct_forwards[i], **self.tolerance) + # test backward pass without input_gradients + with self.subTest("backward pass without input gradients"): + for i, inputs in enumerate(test_data): + input_backward, weight_backward = estimator_qnn.backward(inputs, weights) + if correct_weight_backwards[i] is None: + self.assertIsNone(weight_backward) + else: + np.testing.assert_allclose( + weight_backward, correct_weight_backwards[i], **self.tolerance + ) + self.assertIsNone(input_backward) + # test backward pass with input_gradients + with self.subTest("backward pass with input gradients"): + estimator_qnn.input_gradients = True + for i, inputs in enumerate(test_data): + input_backward, weight_backward = estimator_qnn.backward(inputs, weights) + if correct_weight_backwards[i] is None: + self.assertIsNone(weight_backward) + else: + np.testing.assert_allclose( + weight_backward, correct_weight_backwards[i], **self.tolerance + ) + if correct_input_backwards[i] is None: + self.assertIsNone(input_backward) + else: + np.testing.assert_allclose( + input_backward, correct_input_backwards[i], **self.tolerance + ) + + def test_estimator_qnn_1_1(self): + """Test Estimator QNN with input/output dimension 1/1.""" + params = [Parameter("input1"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + isa_ob = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[isa_ob], + input_params=[params[0]], + weight_params=[params[1]], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + self._test_network_passes(estimator_qnn, CASE_DATA["shape_1_1"]) + + def test_estimator_qnn_2_1(self): + """Test Estimator QNN with input/output dimension 2/1.""" + params = [ + Parameter("input1"), + Parameter("input2"), + Parameter("weight1"), + Parameter("weight2"), + ] + qc = QuantumCircuit(2) + qc.h(0) + qc.ry(params[0], 0) + qc.ry(params[1], 1) + qc.rx(params[2], 0) + qc.rx(params[3], 1) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("ZZ", 1), ("XX", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op], + input_params=params[:2], + weight_params=params[2:], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + self._test_network_passes(estimator_qnn, CASE_DATA["shape_2_1"]) + + def test_estimator_qnn_1_2(self): + """Test Estimator QNN with input/output dimension 1/2.""" + params = [Parameter("input1"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + + isa_qc = self.pm.run(qc) + op1 = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op1 = op1.apply_layout(isa_qc.layout) + op2 = SparsePauliOp.from_list([("Z", 2), ("X", 2)]) + op2 = op2.apply_layout(isa_qc.layout) + + # construct QNN + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op1, op2], + input_params=[params[0]], + weight_params=[params[1]], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + self._test_network_passes(estimator_qnn, CASE_DATA["shape_1_2"]) + + def test_estimator_qnn_2_2(self): + """Test Estimator QNN with input/output dimension 2/2.""" + params = [ + Parameter("input1"), + Parameter("input2"), + Parameter("weight1"), + Parameter("weight2"), + ] + qc = QuantumCircuit(2) + qc.h(0) + qc.ry(params[0], 0) + qc.ry(params[1], 1) + qc.rx(params[2], 0) + qc.rx(params[3], 1) + isa_qc = self.pm.run(qc) + op1 = SparsePauliOp.from_list([("ZZ", 1)]) + op1 = op1.apply_layout(isa_qc.layout) + op2 = SparsePauliOp.from_list([("XX", 1)]) + op2 = op2.apply_layout(isa_qc.layout) + + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op1, op2], + input_params=params[:2], + weight_params=params[2:], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + self._test_network_passes(estimator_qnn, CASE_DATA["shape_2_2"]) + + def test_no_input_parameters(self): + """Test Estimator QNN with no input parameters.""" + params = [Parameter("weight0"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op], + input_params=None, + weight_params=params, + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + self._test_network_passes(estimator_qnn, CASE_DATA["no_input_parameters"]) + + def test_no_weight_parameters(self): + """Test Estimator QNN with no weight parameters.""" + params = [Parameter("input0"), Parameter("input1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op], + input_params=params, + weight_params=None, + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + self._test_network_passes(estimator_qnn, CASE_DATA["no_weight_parameters"]) + + def test_no_parameters(self): + """Test Estimator QNN with no parameters.""" + qc = QuantumCircuit(1) + qc.h(0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op], + input_params=None, + weight_params=None, + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + self._test_network_passes(estimator_qnn, CASE_DATA["no_parameters"]) + + def test_default_observables(self): + """Test Estimator QNN with default observables.""" + params = [Parameter("input1"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + input_params=[params[0]], + weight_params=[params[1]], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + self._test_network_passes(estimator_qnn, CASE_DATA["default_observables"]) + + def test_single_observable(self): + """Test Estimator QNN with single observable.""" + params = [Parameter("input1"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=op, + input_params=[params[0]], + weight_params=[params[1]], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=isa_qc.num_qubits, + ) + self._test_network_passes(estimator_qnn, CASE_DATA["single_observable"]) + + def test_setters_getters(self): + """Test Estimator QNN properties.""" + params = [Parameter("input1"), Parameter("weight1")] + qc = QuantumCircuit(1) + qc.h(0) + qc.ry(params[0], 0) + qc.rx(params[1], 0) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z", 1), ("X", 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=[op], + input_params=[params[0]], + weight_params=[params[1]], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + with self.subTest("Test circuit getter."): + self.assertEqual(estimator_qnn.circuit, isa_qc) + with self.subTest("Test observables getter."): + self.assertEqual(estimator_qnn.observables, [op]) + with self.subTest("Test input_params getter."): + self.assertEqual(estimator_qnn.input_params, [params[0]]) + with self.subTest("Test weight_params getter."): + self.assertEqual(estimator_qnn.weight_params, [params[1]]) + with self.subTest("Test input_gradients setter and getter."): + self.assertFalse(estimator_qnn.input_gradients) + estimator_qnn.input_gradients = True + self.assertTrue(estimator_qnn.input_gradients) + + @unittest.skip("Test unstable, to be checked.") + def test_qnn_qc_circuit_construction(self): + """Test Estimator QNN properties and forward/backward pass for QNNCircuit construction""" + num_qubits = 2 + feature_map = ZZFeatureMap(feature_dimension=num_qubits) + ansatz = RealAmplitudes(num_qubits=num_qubits, reps=1) + + qc = QuantumCircuit(num_qubits) + qc.compose(feature_map, inplace=True) + qc.compose(ansatz, inplace=True) + isa_qc = self.pm.run(qc) + + estimator_qc = EstimatorQNN( + circuit=isa_qc, + input_params=feature_map.parameters, + weight_params=ansatz.parameters, + input_gradients=True, + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + qnn_qc = QNNCircuit(num_qubits=num_qubits, feature_map=feature_map, ansatz=ansatz) + isa_qnn_qc = self.pm.run(qnn_qc) + estimator_qnn_qc = EstimatorQNN( + circuit=isa_qnn_qc, + input_params=qnn_qc.feature_map.parameters, + weight_params=qnn_qc.ansatz.parameters, + input_gradients=True, + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + input_data = [1, 2] + weights = [1, 2, 3, 4] + + with self.subTest("Test if Estimator QNN properties are equal."): + self.assertEqual(estimator_qnn_qc.input_params, estimator_qc.input_params) + self.assertEqual(estimator_qnn_qc.weight_params, estimator_qc.weight_params) + self.assertEqual(estimator_qnn_qc.observables, estimator_qc.observables) + + with self.subTest("Test if forward pass yields equal results."): + forward_qc = estimator_qc.forward(input_data=input_data, weights=weights) + forward_qnn_qc = estimator_qnn_qc.forward(input_data=input_data, weights=weights) + np.testing.assert_allclose(forward_qc, forward_qnn_qc, **self.tolerance) + + with self.subTest("Test if backward pass yields equal results."): + backward_qc = estimator_qc.backward(input_data=input_data, weights=weights) + backward_qnn_qc = estimator_qnn_qc.backward(input_data=input_data, weights=weights) + + # Test if input grad is close (difference due to shots) + np.testing.assert_allclose(backward_qc[0], backward_qnn_qc[0], **self.tolerance) + # Test if weights grad is close (difference due to shots) + np.testing.assert_allclose(backward_qc[1], backward_qnn_qc[1], **self.tolerance) + + def test_binding_order(self): + """Test parameter binding order gives result as expected""" + qc = ZFeatureMap(feature_dimension=2, reps=1) + input_params = qc.parameters + weight = Parameter("weight") + for i in range(qc.num_qubits): + qc.rx(weight, i) + isa_qc = self.pm.run(qc) + op = SparsePauliOp.from_list([("Z" * isa_qc.num_qubits, 1)]) + op = op.apply_layout(isa_qc.layout) + estimator_qnn = EstimatorQNN( + circuit=isa_qc, + observables=op, + input_params=input_params, + weight_params=[weight], + estimator=self.estimator, + gradient=self.gradient, + num_virtual_qubits=qc.num_qubits, + ) + + estimator_qnn_weights = [3] + estimator_qnn_input = [2, 33] + res = estimator_qnn.forward(estimator_qnn_input, estimator_qnn_weights) + # When parameters were used in circuit order, before being assigned correctly, so inputs + # went to input params, weights to weight params, this gave 0.00613403 + self.assertAlmostEqual(res[0][0], 0.00040017, delta=0.05) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/neural_networks/test_sampler_qnn.py b/test/neural_networks/test_sampler_qnn.py index 07283b59c..9651a93d4 100644 --- a/test/neural_networks/test_sampler_qnn.py +++ b/test/neural_networks/test_sampler_qnn.py @@ -23,11 +23,18 @@ from qiskit.circuit import Parameter, QuantumCircuit from qiskit.primitives import Sampler +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap -from qiskit_machine_learning.utils import algorithm_globals +from qiskit_ibm_runtime import Session, SamplerV2 + +from qiskit_machine_learning.utils import algorithm_globals from qiskit_machine_learning.circuit.library import QNNCircuit from qiskit_machine_learning.neural_networks.sampler_qnn import SamplerQNN +from qiskit_machine_learning.gradients.param_shift.param_shift_sampler_gradient import ( + ParamShiftSamplerGradient, +) import qiskit_machine_learning.optionals as _optionals if _optionals.HAS_SPARSE: @@ -45,8 +52,9 @@ class SparseArray: # type: ignore DEFAULT = "default" SHOTS = "shots" +V2 = "v2" SPARSE = [True, False] -SAMPLERS = [DEFAULT, SHOTS] +SAMPLERS = [DEFAULT, SHOTS, V2] INTERPRET_TYPES = [0, 1, 2] BATCH_SIZES = [2] INPUT_GRADS = [True, False] @@ -69,6 +77,8 @@ def setUp(self): self.qc = QuantumCircuit(num_qubits) self.qc.append(feature_map, range(2)) self.qc.append(var_form, range(2)) + self.qc.measure_all() + self.num_virtual_qubits = num_qubits # store params self.input_params = list(feature_map.parameters) @@ -93,7 +103,10 @@ def interpret_2d(x): # define sampler primitives self.sampler = Sampler() self.sampler_shots = Sampler(options={"shots": 100, "seed": 42}) - + self.backend = GenericBackendV2(num_qubits=8) + self.session = Session(backend=self.backend) + self.sampler_v2 = SamplerV2(mode=self.session) + self.pm = None self.array_type = {True: SparseArray, False: np.ndarray} # pylint: disable=too-many-positional-arguments @@ -101,12 +114,33 @@ def _get_qnn( self, sparse, sampler_type, interpret_id, input_params, weight_params, input_grads ): """Construct QNN from configuration.""" + # get interpret setting + interpret = None + output_shape = None + if interpret_id == 1: + interpret = self.interpret_1d + output_shape = self.output_shape_1d + elif interpret_id == 2: + interpret = self.interpret_2d + output_shape = self.output_shape_2d # get quantum instance + gradient = None if sampler_type == SHOTS: sampler = self.sampler_shots elif sampler_type == DEFAULT: sampler = self.sampler + elif sampler_type == V2: + sampler = self.sampler_v2 + + if self.qc.layout is None: + self.pm = generate_preset_pass_manager(optimization_level=1, backend=self.backend) + self.qc = self.pm.run(self.qc) + gradient = ParamShiftSamplerGradient( + sampler=self.sampler, + len_quasi_dist=2**self.num_virtual_qubits, + pass_manager=self.pm, + ) else: sampler = None @@ -124,11 +158,13 @@ def _get_qnn( qnn = SamplerQNN( sampler=sampler, circuit=self.qc, + num_virtual_qubits=self.num_virtual_qubits, input_params=input_params, weight_params=weight_params, sparse=sparse, interpret=interpret, output_shape=output_shape, + gradient=gradient, input_gradients=input_grads, ) return qnn @@ -345,7 +381,7 @@ def test_no_parameters(self): sampler_qnn.input_gradients = True self._verify_qnn(sampler_qnn, 1, input_data=None, weights=None) - def test_qnn_qc_circui_construction(self): + def test_qnn_qc_circuit_construction(self): """Test Sampler QNN properties and forward/backward pass for QNNCircuit construction""" num_qubits = 2 feature_map = ZZFeatureMap(feature_dimension=num_qubits) diff --git a/test/state_fidelities/test_compute_uncompute_v2.py b/test/state_fidelities/test_compute_uncompute_v2.py new file mode 100644 index 000000000..819b206fc --- /dev/null +++ b/test/state_fidelities/test_compute_uncompute_v2.py @@ -0,0 +1,343 @@ +# This code is part of a Qiskit project. +# +# (C) Copyright IBM 2022, 2024. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Tests for Fidelity.""" + +import unittest +from test import QiskitMachineLearningTestCase + +import numpy as np + +from qiskit.circuit import QuantumCircuit, ParameterVector +from qiskit.circuit.library import RealAmplitudes +from qiskit.primitives import Sampler +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager + +from qiskit_ibm_runtime import Session, SamplerV2 + +from qiskit_machine_learning.state_fidelities import ComputeUncompute + + +class TestComputeUncompute(QiskitMachineLearningTestCase): + """Test Compute-Uncompute Fidelity class""" + + def setUp(self): + super().setUp() + parameters = ParameterVector("x", 2) + + rx_rotations = QuantumCircuit(2) + rx_rotations.rx(parameters[0], 0) + rx_rotations.rx(parameters[1], 1) + + ry_rotations = QuantumCircuit(2) + ry_rotations.ry(parameters[0], 0) + ry_rotations.ry(parameters[1], 1) + + plus = QuantumCircuit(2) + plus.h([0, 1]) + + zero = QuantumCircuit(2) + + rx_rotation = QuantumCircuit(2) + rx_rotation.rx(parameters[0], 0) + rx_rotation.h(1) + + self._circuit = [rx_rotations, ry_rotations, plus, zero, rx_rotation] + + self.backend = GenericBackendV2( + num_qubits=4, + calibrate_instructions=None, + pulse_channels=False, + noise_info=False, + seed=123, + ) + self.session = Session(backend=self.backend) + self._sampler = SamplerV2(mode=self.session) + self.pm = generate_preset_pass_manager(optimization_level=0, backend=self.backend) + + self._left_params = np.array([[0, 0], [np.pi / 2, 0], [0, np.pi / 2], [np.pi, np.pi]]) + self._right_params = np.array([[0, 0], [0, 0], [np.pi / 2, 0], [0, 0]]) + + def test_1param_pair(self): + """test for fidelity with one pair of parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + job = fidelity.run( + self._circuit[0], self._circuit[1], self._left_params[0], self._right_params[0] + ) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_1param_pair_local(self): + """test for fidelity with one pair of parameters""" + fidelity = ComputeUncompute( + self._sampler, + local=True, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[0].num_qubits, + ) + job = fidelity.run( + self._circuit[0], self._circuit[1], self._left_params[0], self._right_params[0] + ) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_local(self): + """test difference between local and global fidelity""" + fidelity_global = ComputeUncompute( + self._sampler, + local=False, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelity_local = ComputeUncompute( + self._sampler, + local=True, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelities = [] + for fidelity in [fidelity_global, fidelity_local]: + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + fidelities.append(result.fidelities[0]) + np.testing.assert_allclose(fidelities, np.array([0.25, 0.5]), atol=1e-1, rtol=1e-1) + + def test_4param_pairs(self): + """test for fidelity with four pairs of parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[0]] * n, [self._circuit[1]] * n, self._left_params, self._right_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.25, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_symmetry(self): + """test for fidelity with the same circuit""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + job_1 = fidelity.run( + [self._circuit[0]] * n, [self._circuit[0]] * n, self._left_params, self._right_params + ) + job_2 = fidelity.run( + [self._circuit[0]] * n, [self._circuit[0]] * n, self._right_params, self._left_params + ) + print(job_1) + results_1 = job_1.result() + results_2 = job_2.result() + np.testing.assert_allclose(results_1.fidelities, results_2.fidelities, atol=1e-1, rtol=1e-1) + + def test_no_params(self): + """test for fidelity without parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[2].num_qubits + ) + job = fidelity.run([self._circuit[2]], [self._circuit[3]]) + results = job.result() + np.testing.assert_allclose(results.fidelities, np.array([0.25]), atol=1e-1, rtol=1e-1) + + job = fidelity.run([self._circuit[2]], [self._circuit[3]], [], []) + results = job.result() + np.testing.assert_allclose(results.fidelities, np.array([0.25]), atol=1e-1, rtol=1e-1) + + def test_left_param(self): + """test for fidelity with only left parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[1].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[1]] * n, [self._circuit[3]] * n, values_1=self._left_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.5, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_right_param(self): + """test for fidelity with only right parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[1].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[3]] * n, [self._circuit[1]] * n, values_2=self._left_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.5, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_not_set_circuits(self): + """test for fidelity with no circuits.""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + with self.assertRaises(TypeError): + job = fidelity.run( + circuits_1=None, + circuits_2=None, + values_1=self._left_params, + values_2=self._right_params, + ) + job.result() + + def test_circuit_mismatch(self): + """test for fidelity with different number of left/right circuits.""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + with self.assertRaises(ValueError): + job = fidelity.run( + [self._circuit[0]] * n, + [self._circuit[1]] * (n + 1), + self._left_params, + self._right_params, + ) + job.result() + + def test_asymmetric_params(self): + """test for fidelity when the 2 circuits have different number of + left/right parameters.""" + + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + right_params = [[p] for p in self._right_params[:, 0]] + job = fidelity.run( + [self._circuit[0]] * n, [self._circuit[4]] * n, self._left_params, right_params + ) + result = job.result() + np.testing.assert_allclose( + result.fidelities, np.array([0.5, 0.25, 0.25, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_input_format(self): + """test for different input format variations""" + + circuit = RealAmplitudes(2) + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=circuit.num_qubits + ) + values = np.random.random(circuit.num_parameters) + shift = np.ones_like(values) * 0.01 + + # lists of circuits, lists of numpy arrays + job = fidelity.run([circuit], [circuit], [values], [values + shift]) + result_1 = job.result() + + # lists of circuits, lists of lists + shift_val = values + shift + job = fidelity.run([circuit], [circuit], [values.tolist()], [shift_val.tolist()]) + result_2 = job.result() + + # circuits, lists + shift_val = values + shift + job = fidelity.run(circuit, circuit, values.tolist(), shift_val.tolist()) + result_3 = job.result() + + # circuits, np.arrays + job = fidelity.run(circuit, circuit, values, values + shift) + result_4 = job.result() + + np.testing.assert_allclose(result_1.fidelities, result_2.fidelities, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(result_1.fidelities, result_3.fidelities, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(result_1.fidelities, result_4.fidelities, atol=1e-1, rtol=1e-1) + + def test_input_measurements(self): + """test for fidelity with measurements on input circuits""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + circuit_1 = self._circuit[0] + circuit_1.measure_all() + circuit_2 = self._circuit[1] + circuit_2.measure_all() + + job = fidelity.run(circuit_1, circuit_2, self._left_params[0], self._right_params[0]) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_options(self): + """Test fidelity's run options""" + sampler_shots = Sampler(options={"shots": 1024}) + + with self.subTest("sampler"): + # Only options in sampler + fidelity = ComputeUncompute( + sampler_shots, pass_manager=self.pm, num_virtual_qubits=self._circuit[2].num_qubits + ) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 1024}) + self.assertEqual(result.options.__dict__, {"shots": 1024}) + + with self.subTest("fidelity init"): + # Fidelity default options override sampler + # options and add new fields + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 2048, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 2048, "dummy": 100}) + + with self.subTest("fidelity update"): + # Update fidelity options + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelity.update_default_options(shots=100) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 100, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 100, "dummy": 100}) + + with self.subTest("fidelity run"): + # Run options override fidelity options + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + job = fidelity.run(self._circuit[2], self._circuit[3], shots=50, dummy=None) + options = fidelity.options + result = job.result() + # Only default + sampler options. Not run. + self.assertEqual(options.__dict__, {"shots": 2048, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 50, "dummy": None}) + + +if __name__ == "__main__": + unittest.main()