Skip to content

Commit

Permalink
Merge branch 'master' into ad/deprecate-legacy-devices
Browse files Browse the repository at this point in the history
  • Loading branch information
Shiro-Raven authored Jul 23, 2024
2 parents 1043ed6 + 46fa7ff commit dad606b
Show file tree
Hide file tree
Showing 4 changed files with 147 additions and 42 deletions.
9 changes: 7 additions & 2 deletions doc/releases/changelog-dev.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,15 @@

* `SProd.terms` now flattens out the terms if the base is a multi-term observable.
[(#5885)](https://github.com/PennyLaneAI/pennylane/pull/5885)

* A new method `to_mat` has been added to the `FermiWord` and `FermiSentence` classes, which allows
computing the matrix representation of these Fermi operators.
[(#5920)](https://github.com/PennyLaneAI/pennylane/pull/5920)

* New functionality has been added to natively support exponential extrapolation when using the `mitigate_with_zne`. This allows
users to have more control over the error mitigation protocol without needing to add further dependencies.
[(#5972)](https://github.com/PennyLaneAI/pennylane/pull/5972)

<h3>Improvements 🛠</h3>

* `StateMP.process_state` defines rules in `cast_to_complex` for complex casting, avoiding a superfluous state vector copy in Lightning simulations
Expand Down Expand Up @@ -179,4 +183,5 @@ Christina Lee,
William Maxwell,
Vincent Michaud-Rioux,
Mudit Pandey,
Erik Schultheis.
Erik Schultheis,
nate stemen.
9 changes: 8 additions & 1 deletion pennylane/transforms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@
~transforms.fold_global
~transforms.poly_extrapolate
~transforms.richardson_extrapolate
~transforms.exponential_extrapolate
Other transforms
~~~~~~~~~~~~~~~~
Expand Down Expand Up @@ -322,7 +323,13 @@ def circuit(params):
from .split_to_single_terms import split_to_single_terms
from .insert_ops import insert

from .mitigate import mitigate_with_zne, fold_global, poly_extrapolate, richardson_extrapolate
from .mitigate import (
mitigate_with_zne,
fold_global,
poly_extrapolate,
richardson_extrapolate,
exponential_extrapolate,
)
from .optimization import (
cancel_inverses,
commute_controlled,
Expand Down
42 changes: 42 additions & 0 deletions pennylane/transforms/mitigate.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,48 @@ def richardson_extrapolate(x, y):
return poly_extrapolate(x, y, len(x) - 1)


def exponential_extrapolate(x, y, asymptote=None, eps=1.0e-6):
r"""Extrapolate to the zero-noise limit using an exponential model (:math:`Ae^{Bx} + C`). This
is done by linearizing the data using a logarithm, whereupon a linear fit is performed. Once
the model parameters are found, they are transformed back to exponential parameters.
Args:
x (Array): Data in x axis.
y (Array): Data in y axis such that :math:`y = f(x)`.
asymptote (float): Infinite noise limit expected for your circuit of interest (:math:`C`
in the equation above). Defaults to 0 in the case an asymptote is not supplied.
eps (float): Epsilon to regularize :math:`\log(y - C)` when the argument is to close to
zero or negative.
Returns:
float: Extrapolated value at f(0).
.. seealso:: :func:`~.pennylane.transforms.richardson_extrapolate`, :func:`~.pennylane.transforms.mitigate_with_zne`.
**Example:**
>>> np.random.seed(0)
>>> x = np.linspace(1, 10, 5)
>>> y = np.exp(-x) + np.random.normal(scale=0.1, size=len(x))
>>> qml.transforms.exponential_extrapolate(x, y)
0.23365009000522544
"""
y = qml.math.stack(y)
slope, y_intercept = _polyfit(x, y, 1)
if asymptote is None:
sign = qml.math.sign(-slope)
asymptote = 0.0
else:
sign = qml.math.sign(-(asymptote - y_intercept))

y_shifted = sign * (y - asymptote)
y_shifted = qml.math.where(y_shifted < eps, eps, y_shifted)
y_scaled = qml.math.log(y_shifted)

zne_unscaled = poly_extrapolate(x, y_scaled, 1)
return sign * qml.math.exp(zne_unscaled) + asymptote


# pylint: disable=too-many-arguments, protected-access
@transform
def mitigate_with_zne(
Expand Down
129 changes: 90 additions & 39 deletions tests/transforms/test_mitigate.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,12 @@
import pennylane as qml
from pennylane import numpy as np
from pennylane.tape import QuantumScript
from pennylane.transforms import fold_global, mitigate_with_zne, richardson_extrapolate
from pennylane.transforms import (
exponential_extrapolate,
fold_global,
mitigate_with_zne,
richardson_extrapolate,
)

with qml.queuing.AnnotatedQueue() as q_tape:
qml.BasisState([1], wires=0)
Expand Down Expand Up @@ -109,7 +114,8 @@ def test_extrapolate_call(self, mocker):
for t in tapes:
same_tape(t, tape)

def test_multi_returns(self):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_multi_returns(self, extrapolate):
"""Tests if the expected shape is returned when mitigating a circuit with two returns"""
noise_strength = 0.05

Expand All @@ -126,7 +132,7 @@ def test_multi_returns(self):
qml.transforms.mitigate_with_zne,
scale_factors=[1, 2, 3],
folding=fold_global,
extrapolate=richardson_extrapolate,
extrapolate=extrapolate,
)
@qml.qnode(dev)
def mitigated_circuit(w1, w2):
Expand Down Expand Up @@ -507,17 +513,67 @@ def test_polyfit(self):
coeffs = qml.transforms.mitigate._polyfit(x, y, 2)
assert qml.math.allclose(qml.math.squeeze(coeffs), [3, 2, 1])

@pytest.mark.parametrize("exp_params", [[0.5, -2, 2], [-9, -4, 0]])
def test_exponential_extrapolation_accuracy(self, exp_params):
"""Testing the exponential extrapolation works as expected for known exponential models."""
A, B, asymptote = exp_params
x = np.linspace(1, 4, 4)
y = A * np.exp(B * x) + asymptote
zne_val = qml.transforms.exponential_extrapolate(x, y, asymptote=asymptote)
assert qml.math.allclose(zne_val, A + asymptote, atol=1e-3)

@pytest.mark.autograd
def test_exponential_extrapolation_autograd(self):
"""Test exponential extrapolation works with expvals stored as a numpy array."""
scale_factors = [1, 3, 5]
noise_scaled_expvals = np.array([0.9, 0.8, 0.7])
zne_val = qml.transforms.exponential_extrapolate(scale_factors, noise_scaled_expvals)
assert isinstance(zne_val, np.ndarray)
assert zne_val.ndim == 0

@pytest.mark.tf
def test_exponential_extrapolation_tf(self):
"""Test exponential extrapolation works with expvals stored as a tensorflow tensor."""
import tensorflow as tf

scale_factors = [1, 3, 5]
noise_scaled_expvals = tf.constant([0.9, 0.8, 0.7], dtype=tf.float32)
zne_val = qml.transforms.exponential_extrapolate(scale_factors, noise_scaled_expvals)
assert tf.is_tensor(zne_val)
assert zne_val.shape.ndims == 0

@pytest.mark.torch
def test_exponential_extrapolation_torch(self):
"""Test exponential extrapolation works with expvals stored as a torch tensor."""
import torch

scale_factors = [1, 3, 5]
noise_scaled_expvals = torch.tensor([0.9, 0.8, 0.7])
zne_val = qml.transforms.exponential_extrapolate(scale_factors, noise_scaled_expvals)
assert torch.is_tensor(zne_val)
assert zne_val.ndimension() == 0

@pytest.mark.jax
def test_exponential_extrapolation_jax(self):
"""Test exponential extrapolation works with expvals stored as a jax array."""
import jax.numpy as jnp

scale_factors = [1, 3, 5]
noise_scaled_expvals = jnp.array([0.9, 0.8, 0.7])
zne_val = qml.transforms.exponential_extrapolate(scale_factors, noise_scaled_expvals)
assert isinstance(zne_val, jnp.ndarray)
assert zne_val.ndim == 0

@pytest.mark.autograd
def test_diffability_autograd(self):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_autograd(self, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns the correct gradient in autograd"""
qnode_noisy = qml.QNode(qfunc, dev_noisy)
qnode_ideal = qml.QNode(qfunc, dev_ideal)

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = np.array([np.pi / 4, np.pi / 4], requires_grad=True)

Expand All @@ -531,7 +587,8 @@ def test_diffability_autograd(self):

@pytest.mark.jax
@pytest.mark.parametrize("interface", ["auto", "jax"])
def test_diffability_jax(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_jax(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns the correct gradient in jax"""
import jax
import jax.numpy as jnp
Expand All @@ -541,9 +598,7 @@ def test_diffability_jax(self, interface):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = jnp.array(
[np.pi / 4, np.pi / 4],
Expand All @@ -559,7 +614,8 @@ def test_diffability_jax(self, interface):

@pytest.mark.jax
@pytest.mark.parametrize("interface", ["auto", "jax", "jax-jit"])
def test_diffability_jaxjit(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_jaxjit(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns the correct gradient in jax-jit"""
import jax
import jax.numpy as jnp
Expand All @@ -570,7 +626,7 @@ def test_diffability_jaxjit(self, interface):
scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = jax.jit(
mitigate_with_zne(qnode_noisy, scale_factors, fold_global, richardson_extrapolate)
mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)
)

theta = jnp.array(
Expand All @@ -587,7 +643,8 @@ def test_diffability_jaxjit(self, interface):

@pytest.mark.torch
@pytest.mark.parametrize("interface", ["auto", "torch"])
def test_diffability_torch(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_torch(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns the correct gradient in torch"""
import torch

Expand All @@ -596,9 +653,7 @@ def test_diffability_torch(self, interface):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = torch.tensor([np.pi / 4, np.pi / 4], requires_grad=True)

Expand All @@ -616,7 +671,8 @@ def test_diffability_torch(self, interface):

@pytest.mark.tf
@pytest.mark.parametrize("interface", ["auto", "tf"])
def test_diffability_tf(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_tf(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns the correct gradient in tf"""
import tensorflow as tf

Expand All @@ -625,9 +681,7 @@ def test_diffability_tf(self, interface):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = tf.Variable([np.pi / 4, np.pi / 4])

Expand All @@ -645,17 +699,16 @@ def test_diffability_tf(self, interface):
assert qml.math.allclose(grad, grad_ideal, atol=1e-2)

@pytest.mark.autograd
def test_diffability_autograd_multi(self):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_autograd_multi(self, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns
the correct gradient in autograd for multiple measurements"""
qnode_noisy = qml.QNode(qfunc_multi, dev_noisy)
qnode_ideal = qml.QNode(qfunc_multi, dev_ideal)

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = np.array([np.pi / 4, np.pi / 6], requires_grad=True)

Expand All @@ -669,7 +722,8 @@ def test_diffability_autograd_multi(self):

@pytest.mark.jax
@pytest.mark.parametrize("interface", ["auto", "jax"])
def test_diffability_jax_multi(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_jax_multi(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns
the correct gradient in jax for multiple measurements"""
import jax
Expand All @@ -680,9 +734,7 @@ def test_diffability_jax_multi(self, interface):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = jnp.array(
[np.pi / 4, np.pi / 6],
Expand All @@ -698,7 +750,8 @@ def test_diffability_jax_multi(self, interface):

@pytest.mark.jax
@pytest.mark.parametrize("interface", ["auto", "jax", "jax-jit"])
def test_diffability_jaxjit_multi(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_jaxjit_multi(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and
returns the correct gradient in jax-jit for multiple measurements"""
import jax
Expand All @@ -710,7 +763,7 @@ def test_diffability_jaxjit_multi(self, interface):
scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = jax.jit(
mitigate_with_zne(qnode_noisy, scale_factors, fold_global, richardson_extrapolate)
mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)
)

theta = jnp.array(
Expand All @@ -727,7 +780,8 @@ def test_diffability_jaxjit_multi(self, interface):

@pytest.mark.torch
@pytest.mark.parametrize("interface", ["auto", "torch"])
def test_diffability_torch_multi(self, interface):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_torch_multi(self, interface, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns
the correct gradient in torch for multiple measurements"""
import torch
Expand All @@ -737,9 +791,7 @@ def test_diffability_torch_multi(self, interface):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = torch.tensor([np.pi / 4, np.pi / 6], requires_grad=True)

Expand All @@ -756,7 +808,8 @@ def test_diffability_torch_multi(self, interface):
assert qml.math.allclose(grad, grad_ideal, atol=1e-2)

@pytest.mark.tf
def test_diffability_tf_multi(self):
@pytest.mark.parametrize("extrapolate", [richardson_extrapolate, exponential_extrapolate])
def test_diffability_tf_multi(self, extrapolate):
"""Testing that the mitigated qnode can be differentiated and returns
the correct gradient in tf for multiple measurements"""
import tensorflow as tf
Expand All @@ -766,9 +819,7 @@ def test_diffability_tf_multi(self):

scale_factors = [1.0, 2.0, 3.0]

mitigated_qnode = mitigate_with_zne(
qnode_noisy, scale_factors, fold_global, richardson_extrapolate
)
mitigated_qnode = mitigate_with_zne(qnode_noisy, scale_factors, fold_global, extrapolate)

theta = tf.Variable([np.pi / 4, np.pi / 6])

Expand Down

0 comments on commit dad606b

Please sign in to comment.