From 2b11e342973e5020b239146e65d8425e0aaec483 Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Mon, 12 Aug 2024 21:30:32 -0400 Subject: [PATCH 1/6] added roadmap link --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1cc741d79d..b358296794 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ compiling quantum programs in clever ways. Want to know more? - Check out our [documentation](https://mitiq.readthedocs.io/en/stable/guide/guide.html). +- To see what's in store for Mitiq this year, look at the 2024 Roadmap in the [wiki](https://github.com/unitaryfund/mitiq/wiki). - For code, repo, or theory questions, especially those requiring more detailed responses, submit a [Discussion](https://github.com/unitaryfund/mitiq/discussions). - For casual or time sensitive questions, chat with us on [Discord](http://discord.unitary.fund). - Join our weekly community call on [Discord](http://discord.unitary.fund) ([public agenda](https://docs.google.com/document/d/1lZfct4AOCS7fdyWkudcGyER0n0nsCxSFKSicUEeJgtA/)). From f096b084a89a380dc657cfceb247e94c0fabccd3 Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Thu, 22 Aug 2024 10:10:38 -0700 Subject: [PATCH 2/6] removed mention of specific year --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b358296794..dc6871bad1 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ compiling quantum programs in clever ways. Want to know more? - Check out our [documentation](https://mitiq.readthedocs.io/en/stable/guide/guide.html). -- To see what's in store for Mitiq this year, look at the 2024 Roadmap in the [wiki](https://github.com/unitaryfund/mitiq/wiki). +- To see what's in store for Mitiq, look at our roadmap in the [wiki](https://github.com/unitaryfund/mitiq/wiki). - For code, repo, or theory questions, especially those requiring more detailed responses, submit a [Discussion](https://github.com/unitaryfund/mitiq/discussions). - For casual or time sensitive questions, chat with us on [Discord](http://discord.unitary.fund). - Join our weekly community call on [Discord](http://discord.unitary.fund) ([public agenda](https://docs.google.com/document/d/1lZfct4AOCS7fdyWkudcGyER0n0nsCxSFKSicUEeJgtA/)). From 9a288ee53c15fb97f5399fe81b2688127c4fc37e Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Wed, 30 Oct 2024 13:54:29 -0700 Subject: [PATCH 3/6] adding work --- .../ddd_experiments/ddd_experiment.json | 3 ++ .../ddd_experiments/ddd_experiment_xyxy.json | 3 ++ .../ddd_experiments/ddd_experiment_yy.json | 3 ++ mitiq/schema/technique_schema/ddd_schema.json | 26 +++++++++++++++ mitiq/schema/technique_schema/zne_schema.json | 33 +++++++++++++++++++ .../zne_experiments/zne_experiment.json | 6 ++++ .../zne_experiments/zne_experiment_exp.json | 5 +++ .../zne_experiments/zne_experiment_lin.json | 5 +++ .../zne_experiments/zne_experiment_poly.json | 5 +++ 9 files changed, 89 insertions(+) create mode 100644 mitiq/schema/ddd_experiments/ddd_experiment.json create mode 100644 mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json create mode 100644 mitiq/schema/ddd_experiments/ddd_experiment_yy.json create mode 100644 mitiq/schema/technique_schema/ddd_schema.json create mode 100644 mitiq/schema/technique_schema/zne_schema.json create mode 100644 mitiq/schema/zne_experiments/zne_experiment.json create mode 100644 mitiq/schema/zne_experiments/zne_experiment_exp.json create mode 100644 mitiq/schema/zne_experiments/zne_experiment_lin.json create mode 100644 mitiq/schema/zne_experiments/zne_experiment_poly.json diff --git a/mitiq/schema/ddd_experiments/ddd_experiment.json b/mitiq/schema/ddd_experiments/ddd_experiment.json new file mode 100644 index 0000000000..860258f4c5 --- /dev/null +++ b/mitiq/schema/ddd_experiments/ddd_experiment.json @@ -0,0 +1,3 @@ +{ + "rule": "xx" +} \ No newline at end of file diff --git a/mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json b/mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json new file mode 100644 index 0000000000..c31c7dd2cb --- /dev/null +++ b/mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json @@ -0,0 +1,3 @@ +{ + "rule": "xyxy" +} \ No newline at end of file diff --git a/mitiq/schema/ddd_experiments/ddd_experiment_yy.json b/mitiq/schema/ddd_experiments/ddd_experiment_yy.json new file mode 100644 index 0000000000..316070bf8d --- /dev/null +++ b/mitiq/schema/ddd_experiments/ddd_experiment_yy.json @@ -0,0 +1,3 @@ +{ + "rule": "yy" +} \ No newline at end of file diff --git a/mitiq/schema/technique_schema/ddd_schema.json b/mitiq/schema/technique_schema/ddd_schema.json new file mode 100644 index 0000000000..1fbbf36081 --- /dev/null +++ b/mitiq/schema/technique_schema/ddd_schema.json @@ -0,0 +1,26 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DDD Schema", + "description": "Schema for defining a DDD implementation, to potentially be used as a sub-schema in larger QEM-schema", + "type": "object", + "required": ["rule"], + "properties": { + "rule": { + "description": "Method used to make the cirucit more sensitive to noise, takes circuit and scale factors as inputm, returns new circuit that is theoretically equivalent but practically noisier", + "type": "string", + "enum": ["xx", "yy", "xyxy", "general", "repeated", "custom"] + }, + + "num_trials": { + "description": "Number of independent experiments to average over. A number larger than 1 can be useful to average over multiple applications of a rule returning non-deterministic DDD sequences", + "type": "integer" + + }, + + "full_output":{ + "description": "Whether to return the full output of the DDD rule, or just the mitigated expectation value", + "type": "boolean" + } + }, + "additionalProperties": false +} diff --git a/mitiq/schema/technique_schema/zne_schema.json b/mitiq/schema/technique_schema/zne_schema.json new file mode 100644 index 0000000000..5b80c6ee8f --- /dev/null +++ b/mitiq/schema/technique_schema/zne_schema.json @@ -0,0 +1,33 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ZNE Schema", + "description": "Schema for defining a ZNE implementation, to potentially be used as a sub-schema in larger QEM-schema", + "type": "object", + "properties": { + "noise_scaling_factors": { + "description": "Real scale factors used in the Factory for extrapolating to zero noise case, one for each point that is to be extrapolated", + "type": "array", + "items": { + "type": "number" + } + }, + + "noise_scaling_method": { + "description": "Method used to make the cirucit more sensitive to noise, takes circuit and scale factors as inputm, returns new circuit that is theoretically equivalent but practically noisier", + "type": "string", + "enum": ["global", "local_random", "local_all", "layer", "identity_scaling"] + }, + + "scale_factor": { + "description": "Indicates how many times to fold the input circuit according to the noise_scaling_method, folded circuit will have number of gates approximately equal to scale_factor * len(circuit)", + "type": "number" + }, + + "extrapolation": { + "description": "Method used to extrapolate the noise scaling factors to points not in the original set", + "type": "string", + "enum": ["linear", "richardson", "polynomial", "exponential", "poly-exp", "adaptive-exp"] + } + }, + "additionalProperties": false +} diff --git a/mitiq/schema/zne_experiments/zne_experiment.json b/mitiq/schema/zne_experiments/zne_experiment.json new file mode 100644 index 0000000000..2faaf85937 --- /dev/null +++ b/mitiq/schema/zne_experiments/zne_experiment.json @@ -0,0 +1,6 @@ +{ + "noise_scaling_factors": [2, 4, 6, 8], + "noise_scaling_method": "global", + "extrapolation": "richardson", + "scale_factor": 2 + } \ No newline at end of file diff --git a/mitiq/schema/zne_experiments/zne_experiment_exp.json b/mitiq/schema/zne_experiments/zne_experiment_exp.json new file mode 100644 index 0000000000..c4efd4f2be --- /dev/null +++ b/mitiq/schema/zne_experiments/zne_experiment_exp.json @@ -0,0 +1,5 @@ +{ + "noise_scaling_factors": [2, 4, 6, 8], + "noise_scaling_method": "global", + "extrapolation": "exponential" + } \ No newline at end of file diff --git a/mitiq/schema/zne_experiments/zne_experiment_lin.json b/mitiq/schema/zne_experiments/zne_experiment_lin.json new file mode 100644 index 0000000000..fafc23b6e6 --- /dev/null +++ b/mitiq/schema/zne_experiments/zne_experiment_lin.json @@ -0,0 +1,5 @@ +{ + "noise_scaling_factors": [2, 4, 6, 8], + "noise_scaling_method": "global", + "extrapolation": "linear" + } \ No newline at end of file diff --git a/mitiq/schema/zne_experiments/zne_experiment_poly.json b/mitiq/schema/zne_experiments/zne_experiment_poly.json new file mode 100644 index 0000000000..92a4cd7378 --- /dev/null +++ b/mitiq/schema/zne_experiments/zne_experiment_poly.json @@ -0,0 +1,5 @@ +{ + "noise_scaling_factors": [2, 4, 6, 8], + "noise_scaling_method": "global", + "extrapolation": "polynomial" + } \ No newline at end of file From 4ccb838031a89d4654ee2f228de69e383b890414 Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Fri, 6 Dec 2024 15:37:21 -0800 Subject: [PATCH 4/6] copied stuff from qem_schema over --- mitiq/schema/arg_function_maps.py | 109 +++++++++++++++ .../ddd_experiments/ddd_experiment.json | 0 .../ddd_experiment_with_technique.json | 4 + .../ddd_experiments/ddd_experiment_xyxy.json | 0 .../ddd_experiments/ddd_experiment_yy.json | 0 .../pec_experiments/pec_experiment.json | 3 + .../rem_experiments/rem_experiment.json | 3 + .../zne_experiments/zne_experiment.json | 0 .../zne_experiments/zne_experiment_exp.json | 0 .../zne_experiments/zne_experiment_lin.json | 0 .../zne_experiments/zne_experiment_poly.json | 0 .../zne_experiment_with_technique.json | 7 + mitiq/schema/functions_to_use.py | 130 ++++++++++++++++++ mitiq/schema/schema/composition_schema.json | 23 ++++ .../schema/composition_schema_ordered.json | 22 +++ .../ddd_schema.json | 17 +++ mitiq/schema/schema/pec_schema.json | 52 +++++++ mitiq/schema/schema/rem_schema.json | 27 ++++ .../zne_schema.json | 6 + mitiq/schema/schema_executors.py | 104 ++++++++++++++ 20 files changed, 507 insertions(+) create mode 100644 mitiq/schema/arg_function_maps.py rename mitiq/schema/{ => experiments}/ddd_experiments/ddd_experiment.json (100%) create mode 100644 mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json rename mitiq/schema/{ => experiments}/ddd_experiments/ddd_experiment_xyxy.json (100%) rename mitiq/schema/{ => experiments}/ddd_experiments/ddd_experiment_yy.json (100%) create mode 100644 mitiq/schema/experiments/pec_experiments/pec_experiment.json create mode 100644 mitiq/schema/experiments/rem_experiments/rem_experiment.json rename mitiq/schema/{ => experiments}/zne_experiments/zne_experiment.json (100%) rename mitiq/schema/{ => experiments}/zne_experiments/zne_experiment_exp.json (100%) rename mitiq/schema/{ => experiments}/zne_experiments/zne_experiment_lin.json (100%) rename mitiq/schema/{ => experiments}/zne_experiments/zne_experiment_poly.json (100%) create mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json create mode 100644 mitiq/schema/functions_to_use.py create mode 100644 mitiq/schema/schema/composition_schema.json create mode 100644 mitiq/schema/schema/composition_schema_ordered.json rename mitiq/schema/{technique_schema => schema}/ddd_schema.json (65%) create mode 100644 mitiq/schema/schema/pec_schema.json create mode 100644 mitiq/schema/schema/rem_schema.json rename mitiq/schema/{technique_schema => schema}/zne_schema.json (89%) create mode 100644 mitiq/schema/schema_executors.py diff --git a/mitiq/schema/arg_function_maps.py b/mitiq/schema/arg_function_maps.py new file mode 100644 index 0000000000..d318955bd8 --- /dev/null +++ b/mitiq/schema/arg_function_maps.py @@ -0,0 +1,109 @@ +from mitiq import zne, ddd, pec, rem +from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all +from mitiq.zne.scaling.layer_scaling import get_layer_folding +from mitiq.zne.scaling.identity_insertion import insert_id_layers + +def ddd_schema_to_params(experiment): + ddd_rule_map = { + "xx": ddd.rules.xx, + "yy": ddd.rules.yy, + "xyxy": ddd.rules.xyxy, + "general": ddd.rules.general_rule, # need to adjust ddd_schema here to better allow for this + "repeated": ddd.rules.repeated_rule, # .. and this + "custom": None, # ... and this + } + + rule = ddd_rule_map[experiment['rule']] + + params = { + "rule": rule + } + + return params + + + +def zne_schema_to_params(experiment): + + zne_noise_scaling_method_map = { + "global": fold_global, + "local_random": fold_gates_at_random, + "local_all": fold_all, + "layer": get_layer_folding, + "identity_scaling": insert_id_layers + } + + zne_extrapolation_map = { + "linear": zne.inference.LinearFactory(scale_factors=experiment['noise_scaling_factors']), + "richardson": zne.inference.RichardsonFactory(scale_factors=experiment['noise_scaling_factors']), + "polynomial": zne.inference.PolyFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # need to add order as an option in the metashcema here ... + "exponential": zne.inference.ExpFactory(scale_factors=experiment['noise_scaling_factors']), + "poly-exp": zne.inference.PolyExpFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # .. and here + "adaptive-exp": zne.inference.AdaExpFactory(scale_factor=experiment['noise_scaling_factors'][0], steps=4, asymptote=None), # need to adjust metaschema here for steps + } + + extrapolation = zne_extrapolation_map[experiment['extrapolation']] + noise_scaling = zne_noise_scaling_method_map[experiment['noise_scaling_method']] + + params = { + "factory": extrapolation, + "scale_noise": noise_scaling + } + return params + + + +def pec_schema_to_params(experiment): + + params = { + "representations": experiment['operation_representations'], + "observable": experiment.get('observable', None), + "precision": experiment.get('precision', 0.03), + "num_samples" : experiment.get('num_samples', None), + "force_run_all": experiment.get('force_to_run_all', False), + "random_state": experiment.get('random_state', None), + "full_output": experiment.get('full_ouput', False) + + } + return params + + +def schema_to_params(experiment): + map_dict = { + "zne": zne_schema_to_params, + "ddd": ddd_schema_to_params, + "pec": pec_schema_to_params + } + + params = map_dict[experiment['technique']](experiment) + return params + + +def run_composed_experiment(composed_experiment, circuit, executor): + + experiments = composed_experiment['experiments'] + + mitigate_exectuor_map = { + "zne": zne.mitigate_executor, + "ddd": ddd.mitigate_executor, + "pec": pec.mitigate_executor, + "rem": rem.mitigate_executor + } + + execute_with_map = { + "zne": zne.execute_with_zne, + "ddd": ddd.execute_with_ddd, + "pec": pec.execute_with_pec, + "rem": rem.execute_with_rem + } + + for i, experiment in enumerate(experiments): + technique = experiment['technique'] + params = schema_to_params(experiment) + if i < len(experiments) - 1: + new_executor = mitigate_exectuor_map[technique](executor=executor, **params) + executor = new_executor + else: + result = execute_with_map[technique](executor=executor, circuit=circuit, **params) + + return result diff --git a/mitiq/schema/ddd_experiments/ddd_experiment.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment.json similarity index 100% rename from mitiq/schema/ddd_experiments/ddd_experiment.json rename to mitiq/schema/experiments/ddd_experiments/ddd_experiment.json diff --git a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json new file mode 100644 index 0000000000..29baefa685 --- /dev/null +++ b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json @@ -0,0 +1,4 @@ +{ + "technique": "ddd", + "rule": "xx" +} \ No newline at end of file diff --git a/mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json similarity index 100% rename from mitiq/schema/ddd_experiments/ddd_experiment_xyxy.json rename to mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json diff --git a/mitiq/schema/ddd_experiments/ddd_experiment_yy.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json similarity index 100% rename from mitiq/schema/ddd_experiments/ddd_experiment_yy.json rename to mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json diff --git a/mitiq/schema/experiments/pec_experiments/pec_experiment.json b/mitiq/schema/experiments/pec_experiments/pec_experiment.json new file mode 100644 index 0000000000..860258f4c5 --- /dev/null +++ b/mitiq/schema/experiments/pec_experiments/pec_experiment.json @@ -0,0 +1,3 @@ +{ + "rule": "xx" +} \ No newline at end of file diff --git a/mitiq/schema/experiments/rem_experiments/rem_experiment.json b/mitiq/schema/experiments/rem_experiments/rem_experiment.json new file mode 100644 index 0000000000..860258f4c5 --- /dev/null +++ b/mitiq/schema/experiments/rem_experiments/rem_experiment.json @@ -0,0 +1,3 @@ +{ + "rule": "xx" +} \ No newline at end of file diff --git a/mitiq/schema/zne_experiments/zne_experiment.json b/mitiq/schema/experiments/zne_experiments/zne_experiment.json similarity index 100% rename from mitiq/schema/zne_experiments/zne_experiment.json rename to mitiq/schema/experiments/zne_experiments/zne_experiment.json diff --git a/mitiq/schema/zne_experiments/zne_experiment_exp.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json similarity index 100% rename from mitiq/schema/zne_experiments/zne_experiment_exp.json rename to mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json diff --git a/mitiq/schema/zne_experiments/zne_experiment_lin.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json similarity index 100% rename from mitiq/schema/zne_experiments/zne_experiment_lin.json rename to mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json diff --git a/mitiq/schema/zne_experiments/zne_experiment_poly.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json similarity index 100% rename from mitiq/schema/zne_experiments/zne_experiment_poly.json rename to mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json new file mode 100644 index 0000000000..92f30ba11e --- /dev/null +++ b/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json @@ -0,0 +1,7 @@ +{ + "technique": "zne", + "noise_scaling_factors": [2, 4, 6, 8], + "noise_scaling_method": "global", + "extrapolation": "richardson", + "scale_factor": 2 + } \ No newline at end of file diff --git a/mitiq/schema/functions_to_use.py b/mitiq/schema/functions_to_use.py new file mode 100644 index 0000000000..f13bbd07b8 --- /dev/null +++ b/mitiq/schema/functions_to_use.py @@ -0,0 +1,130 @@ + +import random +import json +import jsonschema +from jsonschema import validate +from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error +from qiskit_aer import AerSimulator +from qiskit import transpile + + +def load(schema_path): + with open(schema_path, 'r') as file: + return json.load(file) + +def add_random_pauli_identities(circuit, k): + """Adds random Pauli gate pairs that act as identity on each qubit in the circuit. This is bc zne was throwing an error when that the cirucit was very short for zne to work. + + Parameters: + circuit: qiskit circuit to modify. + k (int): number of Pauli identity operations to add (must be even) + + Returns: + qiskit circuit with added identities + """ + + circuit = circuit.copy() + if k % 2 != 0: + raise ValueError("k must be an even integer for the sequence to be equivalent to identity.") + + num_qubits = circuit.num_qubits + pauli_pairs = [('x', 'x'), ('y', 'y'), ('z', 'z')] # Possible Pauli identity pairs + + for qubit in range(num_qubits): + for _ in range(k // 2): + pauli_1, pauli_2 = random.choice(pauli_pairs) + getattr(circuit, pauli_1)(qubit) + getattr(circuit, pauli_2)(qubit) + + return circuit + + +def validate_experiment(experiment, schema): + try: + validate(instance=experiment, schema=schema) + print("validation passed") + except jsonschema.exceptions.ValidationError as e: + print("validation failed") + return None + + + +def validate_composed_experiment(composed_experiment, schema): + """This function validates a composed experiment against multiple schemas, ensuring the techniques used are compatible, accounting for their order as well + + Parameters: + experiment: dicitionary containing the key "techniques" whose value is an array of experiments the user wishes to compose. + schema: this is the schema to validate the individa + + """ + try: + validate(instance=composed_experiment, schema=schema) + print("individual experiment validation passed") + + except jsonschema.exceptions.ValidationError as e: + print(f"individual experiment validation failed") + + experiments = composed_experiment["experiments"] + techniques = [experiment["technique"] for experiment in experiments] + + # REM has to go first in the list of techniques + if "rem" in techniques and techniques[0] != "rem": + raise ValueError("REM is only compatible if applied as the first error mitigation technique.") + + + + # so now we have the ordered list of techniques the user is trying to use, so now we will check compatibility: + # zne, pec, and ddd are the three we have right now: + # -------> zne+pec: if zne is first, would want to apply pec to each of the noise scaled s=circuits, so would not be using execute_with_zne + # if pec is first, would want to apply zne to each of the sampled circuits + # -------> zne+ddd: if zne is first, would want to apply ddd to each of the noise scaled circuits + # if ddd is first,would noise scale the circuit with the windows filled (not sure this makes sense to do?) + # -------> pec+ddd: if pec is first, would want to apply ddd to each of the sampled circuits + # if ddd is first, would want to apply pec to each circuits with slack windows filled + # -------> zne+pec+ddd: if zne is first, would want to apply pec to each of the noise scaled circuits, then apply ddd to each of the pec circuits + # REM needs and executor that returns raw measurment results + + + + return None + + +def basic_noise(n_qubits, prob=0.005): + + noise_model = NoiseModel() + + for i in range(n_qubits): + readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]]) + noise_model.add_readout_error(readout_err, [i]) + + depolarizing_err1 = depolarizing_error(prob, num_qubits=1) + depolarizing_err2 = depolarizing_error(prob, num_qubits=2) + noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z']) + noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx']) + + return noise_model + + + +# simple exector that returns the probability of measuring either |00...0> +# eventually, will want an exector that does not actually run the circuits at all and instead computes overhead just from the mitigation experiment info + +def execute_0s_ideal(circuit, noise_model=True): + circ = circuit.copy() + circ.measure_all() + num_qubits = circ.num_qubits + + if noise_model: + noise_model = basic_noise(num_qubits) + + backend = AerSimulator(noise_model=noise_model) + transpiled_circuit = transpile(circ, optimization_level=0, backend=backend) + + results = backend.run(transpiled_circuit, optimization_level=0, shots=1000).result() + counts = results.get_counts(transpiled_circuit) + + total_shots = sum(counts.values()) + probabilities = {state: count / total_shots for state, count in counts.items()} + + expectation_value = probabilities['0'*num_qubits] + return expectation_value \ No newline at end of file diff --git a/mitiq/schema/schema/composition_schema.json b/mitiq/schema/schema/composition_schema.json new file mode 100644 index 0000000000..0872071958 --- /dev/null +++ b/mitiq/schema/schema/composition_schema.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "file:////Users/ellac/Documents/Unitary_Fund/qem_schema/schema/composition_schema.json", + "type": "object", + "properties": { + "experiments": { + "type": "array", + "description": "Array of individual experiments the user wishes to check compatibility for", + "items": { + "oneOf": [ + { "$ref": "./zne_schema.json" }, + { "$ref": "./ddd_schema.json" }, + { "$ref": "./pec_schema.json" }, + { "$ref": "./rem_schema.json" } + ] + }, + "minItems": 1, + "uniqueItems": true + } + }, + "required": ["experiments"], + "additionalProperties": false + } \ No newline at end of file diff --git a/mitiq/schema/schema/composition_schema_ordered.json b/mitiq/schema/schema/composition_schema_ordered.json new file mode 100644 index 0000000000..75ba1124b4 --- /dev/null +++ b/mitiq/schema/schema/composition_schema_ordered.json @@ -0,0 +1,22 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "file:////Users/ellac/Documents/Unitary_Fund/qem_schema/schema/composition_schema.json", + "type": "object", + "properties": { + "techniques": { + "type": "array", + "description": "Array of techniques to validate", + "items": { + "oneOf": [ + { "$ref": "./zne_schema.json" }, + { "$ref": "./ddd_schema.json" }, + { "$ref": "./pec_schema.json" } + ] + }, + "minItems": 1, + "uniqueItems": true + } + }, + "required": ["techniques"], + "additionalProperties": false + } \ No newline at end of file diff --git a/mitiq/schema/technique_schema/ddd_schema.json b/mitiq/schema/schema/ddd_schema.json similarity index 65% rename from mitiq/schema/technique_schema/ddd_schema.json rename to mitiq/schema/schema/ddd_schema.json index 1fbbf36081..947e8edd2a 100644 --- a/mitiq/schema/technique_schema/ddd_schema.json +++ b/mitiq/schema/schema/ddd_schema.json @@ -5,12 +5,29 @@ "type": "object", "required": ["rule"], "properties": { + "technique": { + "description": "Technique name, this is reuqired when composing techniques", + "type": "string", + "enum": ["ddd"] + }, + + "observable": { + "description": "Observable to compute the expectation value of. If None, the executor must return an expectation value", + "type": "object" + }, + + "rule": { "description": "Method used to make the cirucit more sensitive to noise, takes circuit and scale factors as inputm, returns new circuit that is theoretically equivalent but practically noisier", "type": "string", "enum": ["xx", "yy", "xyxy", "general", "repeated", "custom"] }, + "rule_args": { + "description": " An optional dictionary of keyword arguments for rule (if using rule not in ['xx', 'yy', 'xyxy'])", + "type": "object" + }, + "num_trials": { "description": "Number of independent experiments to average over. A number larger than 1 can be useful to average over multiple applications of a rule returning non-deterministic DDD sequences", "type": "integer" diff --git a/mitiq/schema/schema/pec_schema.json b/mitiq/schema/schema/pec_schema.json new file mode 100644 index 0000000000..3de23a7b08 --- /dev/null +++ b/mitiq/schema/schema/pec_schema.json @@ -0,0 +1,52 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "PEC Schema", + "description": "Schema for defining a PEC implementation, to potentially be used as a sub-schema in larger QEM-schema", + "type": "object", + "properties": { + "technique": { + "description": "Technique name, this is reuqired when composing techniques", + "type": "string", + "enum": ["pec"] + }, + + "operation_representations": { + "description": "Representations of the ideal gates in the circuit, expressed as a linear combination of noisy gates", + "type": "array", + "items": {} + }, + + "observable": { + "description": "Observable to compute the expectation value of. If None, the executor must return an expectation value" + }, + + "precision": { + "description": "The desired estimation precision (assuming the observable is bounded by 1), ignored if num_samples is given", + "type": "number", + "minimum": 0 + }, + + "num_samples": { + "description": "The number of noisy circuits to be sampled for PEC. If not given, deduced from the precision", + "type": "integer", + "minimum": 1 + }, + + "force_run_all": { + "description": "If True, all sampled circuits are executed regardless of uniqueness, else a minimal unique set is executed", + "type": "boolean" + }, + + "random_state": { + "description": "Seed for sampling circuits", + "type": "integer" + }, + + "full_output": { + "description": "If False only the average PEC value is returned. If True a dictionary containing all PEC data is returned too", + "type": "boolean" + } + + }, + "additionalProperties": false +} diff --git a/mitiq/schema/schema/rem_schema.json b/mitiq/schema/schema/rem_schema.json new file mode 100644 index 0000000000..0754ae5cae --- /dev/null +++ b/mitiq/schema/schema/rem_schema.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "REM Schema", + "description": "Schema for defining an REM implementation", + "type": "object", + "properties": { + "technique": { + "description": "Technique name, this is reuqired when composing techniques", + "type": "string", + "enum": ["rem"] + }, + + "observable": { + "description": "Observable to compute the expectation value of", + "type": "object" + }, + + "inverse_confusion_matrix": { + "description": "The inverse confusion matrix to apply to the probability vector estimated with noisy measurement results.", + "type": "array", + "items": { + "type": "number" + } + }, + "additionalProperties": false + } +} diff --git a/mitiq/schema/technique_schema/zne_schema.json b/mitiq/schema/schema/zne_schema.json similarity index 89% rename from mitiq/schema/technique_schema/zne_schema.json rename to mitiq/schema/schema/zne_schema.json index 5b80c6ee8f..0e30e2dc8a 100644 --- a/mitiq/schema/technique_schema/zne_schema.json +++ b/mitiq/schema/schema/zne_schema.json @@ -4,6 +4,12 @@ "description": "Schema for defining a ZNE implementation, to potentially be used as a sub-schema in larger QEM-schema", "type": "object", "properties": { + "technique": { + "description": "Technique name, this is reuqired when composing techniques", + "type": "string", + "enum": ["zne"] + }, + "noise_scaling_factors": { "description": "Real scale factors used in the Factory for extrapolating to zero noise case, one for each point that is to be extrapolated", "type": "array", diff --git a/mitiq/schema/schema_executors.py b/mitiq/schema/schema_executors.py new file mode 100644 index 0000000000..923f75611a --- /dev/null +++ b/mitiq/schema/schema_executors.py @@ -0,0 +1,104 @@ +import json +import jsonschema +import random +import itertools +import numpy as np +from jsonschema import validate +from qiskit import transpile +from qiskit_aer import AerSimulator +from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error +from qiskit.circuit.random import random_circuit +from mitiq import zne, ddd +from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all +from mitiq.zne.scaling.layer_scaling import get_layer_folding +from mitiq.zne.scaling.identity_insertion import insert_id_layers +from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit +from qiskit.circuit import Gate +from collections import defaultdict +import functools +import tomlkit +import toml + +class OverheadExecutors(): + + def __init__(self): + self.count_circs = [] + self.count_1qbit_gates = [] + self.count_2qbit_gates = [] + self.depths = [] + + # count_circs = [] + # count_1qbit_gates = [] + # count_2qbit_gates = [] + # depths = [] + + def execute_w_metadata(self, circuit, shots, backend): + + global count_circs, count_1qbit_gates, count_2qbit_gates, depths + + qc = circuit.copy() + qc.measure_all() + num_qubits = qc.num_qubits + + depth = qc.depth() + + gate_counts = {1: 0, 2: 0} + for inst in circuit.data: + if isinstance(inst.operation, Gate): + qubit_count = len(inst.qubits) + gate_counts[qubit_count] = gate_counts.get(qubit_count, 0) + 1 + + self.count_circs.append(1) + self.count_1qbit_gates.append(gate_counts[1]) + self.count_2qbit_gates.append(gate_counts[2]) + self.depths.append(depth) + + + transpiled_qc = transpile(qc, backend=backend, optimization_level=0) + result = backend.run(transpiled_qc, shots=shots, optimization_level=0).result() + counts = result.get_counts(transpiled_qc) + + total_shots = sum(counts.values()) + probabilities = {state: count / total_shots for state, count in counts.items()} + + expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits] + + return expectation_value + + + + def write_metadata(self, circuit, shots, backend): + # def write_metadata(self, circuit, shots, backend, metadata_path): + + #doc = tomlkit.document() + + metadata_dict = {} + + metadata_dict = {x: set() for x in ['mit_circs', 'mit_1qbit_gates', 'mit_2qbit_gates', 'mit_depth', 'mit_exp_val']} + + new_exec = functools.partial(self.execute_w_metadata, shots=shots, backend=backend) + + mit_exp_val = zne.execute_with_zne(circuit, new_exec) + + metadata_dict["mit_circs"] = sum(self.count_circs) + metadata_dict["mit_1qbit_gates"] = sum(self.count_1qbit_gates) + metadata_dict["mit_2qbit_gates"] = sum(self.count_2qbit_gates) + metadata_dict["mit_depth"] = sum(self.depths) + metadata_dict["mit_exp_val"] = mit_exp_val + + #doc.update(metadata_dict) + #doc.add(tomlkit.nl()) + + #with open(metadata_path, 'w') as f: + # f.write(tomlkit.dumps(doc)) + + self.count_circs = [] + self.count_1qbit_gates = [] + self.count_2qbit_gates = [] + self.depths = [] + + return metadata_dict + + + + From 093c171d514a4b53aeff2b6d2e4f42a04ce0ba46 Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Fri, 6 Dec 2024 15:53:56 -0800 Subject: [PATCH 5/6] adding testing notebooks --- mitiq/schema/composing_testing.ipynb | 228 +++++++++ mitiq/schema/overhead_testing.ipynb | 431 ++++++++++++++++ mitiq/schema/schema_testing.ipynb | 713 +++++++++++++++++++++++++++ mitiq/schema/sweep_testing.ipynb | 484 ++++++++++++++++++ 4 files changed, 1856 insertions(+) create mode 100644 mitiq/schema/composing_testing.ipynb create mode 100644 mitiq/schema/overhead_testing.ipynb create mode 100644 mitiq/schema/schema_testing.ipynb create mode 100644 mitiq/schema/sweep_testing.ipynb diff --git a/mitiq/schema/composing_testing.ipynb b/mitiq/schema/composing_testing.ipynb new file mode 100644 index 0000000000..10d5ece614 --- /dev/null +++ b/mitiq/schema/composing_testing.ipynb @@ -0,0 +1,228 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from mitiq.benchmarks import generate_rb_circuits\n", + "from mitiq import zne, ddd, pec, rem\n", + "\n", + "from qiskit_ibm_runtime.fake_provider import FakeJakartaV2 \n", + "from functions_to_use import load, validate_experiment, validate_composed_experiment, execute_0s_ideal\n", + "from arg_function_maps import schema_to_params, run_composed_experiment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### So let's imagine someone wants to use both DDD and ZNE on their cirucit" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "zne_experiment: \n", + " {\n", + " \"technique\": \"zne\",\n", + " \"noise_scaling_factors\": [\n", + " 2,\n", + " 4,\n", + " 6,\n", + " 8\n", + " ],\n", + " \"noise_scaling_method\": \"global\",\n", + " \"extrapolation\": \"richardson\",\n", + " \"scale_factor\": 2\n", + "} \n", + "\n", + "ddd_experiment: \n", + " {\n", + " \"technique\": \"ddd\",\n", + " \"rule\": \"xx\"\n", + "}\n" + ] + } + ], + "source": [ + "zne_experiment = load('./experiments/zne_experiments/zne_experiment_with_technique.json')\n", + "ddd_experiment = load('./experiments/ddd_experiments/ddd_experiment_with_technique.json')\n", + "print(\"zne_experiment: \\n\", json.dumps(zne_experiment, indent=4), \"\\n\")\n", + "print(\"ddd_experiment: \\n\", json.dumps(ddd_experiment, indent=4))" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [], + "source": [ + "zne_params = schema_to_params(zne_experiment)\n", + "ddd_params = schema_to_params(ddd_experiment)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "composed_experiment = {\"experiments\": [ddd_experiment, zne_experiment]}\n", + "composition_schema = load('./schema/composition_schema.json')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*** something to think about: right now the $id for the composition_schema is the complete filepath. This is necessary because we are referncing other schema in it (the technique-specific schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "individual experiment validation passed\n" + ] + } + ], + "source": [ + "validate_composed_experiment(composed_experiment, composition_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "n_qubits = 2\n", + "depth_circuit = 2\n", + "shots = 10 ** 4\n", + "\n", + "circuit = generate_rb_circuits(n_qubits, depth_circuit, trials=1, return_type=\"qiskit\")[0]\n", + "execute = execute_0s_ideal" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.9169999999999968\n" + ] + } + ], + "source": [ + "composed_result = run_composed_experiment(composed_experiment, circuit, execute)\n", + "print(composed_result)" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ideal = \t \t 1\n", + "unmitigated = \t \t 0.90900\n", + "zne mitigated = \t 1.21000\n", + "ddd mitigated = \t 0.92500\n", + "composed result = \t 0.91700\n" + ] + } + ], + "source": [ + "zne_mitigated = zne.execute_with_zne(circuit, execute, **zne_params)\n", + "ddd_mitigated = ddd.execute_with_ddd(circuit, execute, **ddd_params)\n", + "unmitigated = execute_0s_ideal(circuit)\n", + "ideal = 1 #property of RB circuits\n", + "\n", + "print(\"ideal = \\t \\t\",ideal)\n", + "print(\"unmitigated = \\t \\t\", \"{:.5f}\".format(unmitigated))\n", + "print(\"zne mitigated = \\t\", \"{:.5f}\".format(zne_mitigated))\n", + "print(\"ddd mitigated = \\t\", \"{:.5f}\".format(ddd_mitigated))\n", + "print(\"composed result = \\t\", \"{:.5f}\".format(composed_result))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "
\n", + "
\n", + "
\n", + "
\n", + "
\n", + "
" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "# ddd_executor = ddd.mitigate_executor(executor=execute, rule=ddd_params['rule'])\n", + "# combined_executor_ddd1st = zne.mitigate_executor(executor=ddd_executor, factory=zne_params['factory'], scale_noise=zne_params['scale_noise'])\n", + "# combined_result_ddd1st = combined_executor_ddd1st(circuit)\n", + "\n", + "# zne_executor = zne.mitigate_executor(executor=execute, factory=zne_params['factory'], scale_noise=zne_params['scale_noise'])\n", + "# combined_executor_zne1st = ddd.mitigate_executor(executor=zne_executor, rule=ddd_params['rule'])\n", + "# combined_result_zne1st = combined_executor_zne1st(circuit)\n", + "\n", + "# print(\"Mitigated value obtained with DDD then ZNE:\", \"{:.5f}\".format(combined_result_ddd1st.real))\n", + "# print(\"Mitigated value obtained with ZNE then DDD:\", \"{:.5f}\".format(combined_result_zne1st.real))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "mitiq_env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/mitiq/schema/overhead_testing.ipynb b/mitiq/schema/overhead_testing.ipynb new file mode 100644 index 0000000000..7ec2153fd7 --- /dev/null +++ b/mitiq/schema/overhead_testing.ipynb @@ -0,0 +1,431 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import jsonschema\n", + "import random\n", + "import itertools\n", + "import numpy as np\n", + "from jsonschema import validate\n", + "from qiskit import transpile\n", + "from qiskit_aer import AerSimulator\n", + "from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error\n", + "from qiskit.circuit.random import random_circuit\n", + "from mitiq import zne, ddd\n", + "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", + "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", + "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", + "from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit\n", + "from qiskit.circuit import Gate\n", + "from collections import defaultdict\n", + "import functools\n", + "import tomlkit\n", + "import toml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ZNE:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "# Load Schema:\n", + "\n", + "def load(schema_path):\n", + " with open(schema_path, 'r') as file:\n", + " return json.load(file)\n", + " \n", + "zne_schema =load('./schema/zne_schema.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a test \"batch\" experiment:\n", + "\n", + "zne_batch_test = {\"noise_scaling_factors\": [[1, 1.25, 1.5], [1,2,3], [2,4,6]], # Noise scaling values\n", + " \"noise_scaling_method\": [\"global\"], # Folding method\n", + " \"extrapolation\": [\"polynomial\", \"linear\"], # Extrapolation method\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deconstructing a Batch Dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Create single experiments from batch object:\n", + "\n", + "# Define a function to make all combinations of experiments from a \"batch dictionary\" which should be formatted like the test case above\n", + "def make_experiment_list(batch_dict):\n", + "\n", + " # Initialize empty list where we will append each new experiment\n", + " exp_list = []\n", + "\n", + " # Make list with all combinations of key values from our \"batch dictionary\"\n", + " combo_list = list(itertools.product(batch_dict['noise_scaling_factors'], \n", + " batch_dict['noise_scaling_method'], \n", + " batch_dict['extrapolation']))\n", + " # Iterate over the list\n", + " for k in range(len(combo_list)):\n", + "\n", + " # Initialize single experiment dictionary with keys\n", + " exp = {x: set() for x in ['noise_scaling_factors', 'noise_scaling_method', 'extrapolation']}\n", + "\n", + " # Map each key to its unique value from our list of combinations\n", + " exp['noise_scaling_factors'] = combo_list[k][0]\n", + " exp['noise_scaling_method'] = combo_list[k][1]\n", + " exp['extrapolation'] = combo_list[k][2]\n", + "\n", + " # Pull out each experiment\n", + " exp_list.append(exp)\n", + "\n", + " # Returns a list of experiments\n", + " return exp_list" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'noise_scaling_factors': [1, 1.25, 1.5],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [1, 1.25, 1.5],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'},\n", + " {'noise_scaling_factors': [1, 2, 3],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [1, 2, 3],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'},\n", + " {'noise_scaling_factors': [2, 4, 6],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [2, 4, 6],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'}]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Implementing our function using the test case defined above:\n", + "\n", + "make_experiment_list(zne_batch_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Validating a batch of experiments:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "# Validate each experiment in our batch object:\n", + "\n", + "# Create function that takes in a batch dictionary and schema to validate against\n", + "def batch_validate(batch_dict, schema):\n", + "\n", + " # Initialize empty list for validation results\n", + " validation_results = []\n", + "\n", + " # Create list of experiments\n", + " formatted_batch = make_experiment_list(batch_dict)\n", + "\n", + " # Iterate over list of experiments and individually validate\n", + " for k in formatted_batch:\n", + " try:\n", + " validate(instance=k, schema=schema)\n", + " result = \"validation passed\"\n", + " except jsonschema.exceptions.ValidationError as e:\n", + " result = \"validation failed\"\n", + "\n", + " # Pull out validation results\n", + " validation_results.append(result)\n", + " \n", + " return validation_results" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed']" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Implementing our batch validate function using the same test case:\n", + "\n", + "batch_validate(zne_batch_test, zne_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Ella's basic noise model:\n", + "\n", + "def basic_noise(n_qubits, prob=0.005):\n", + "\n", + " noise_model = NoiseModel()\n", + "\n", + " for i in range(n_qubits):\n", + " readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]])\n", + " noise_model.add_readout_error(readout_err, [i])\n", + " \n", + " depolarizing_err1 = depolarizing_error(prob, num_qubits=1)\n", + " depolarizing_err2 = depolarizing_error(prob, num_qubits=2)\n", + " noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z'])\n", + " noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx'])\n", + "\n", + " return noise_model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Overhead Related Functions:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "# Single experiment executor (very slightly modified from Ella's):\n", + "# Likely to be unused, but just in case:\n", + "\n", + "def execute_no_overhead(circuit, backend, shots): # Set noise_model to thermal, basic, noiseless\n", + " circ = circuit.copy()\n", + " circ.measure_all()\n", + " num_qubits = circ.num_qubits\n", + " \n", + " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", + "\n", + " results = backend.run(transpiled_circuit, optimization_level=0, shots=shots).result()\n", + " counts = results.get_counts(transpiled_circuit)\n", + "\n", + " total_shots = sum(counts.values())\n", + " probabilities = {state: count / total_shots for state, count in counts.items()}\n", + "\n", + " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", + " return expectation_value" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def get_original_circ_counts(circuit, backend, shots):\n", + "\n", + " circ = circuit.copy()\n", + " circ.measure_all()\n", + " num_qubits = circ.num_qubits\n", + "\n", + " og_params = {x: set() for x in ['depth', '1qbit_gates', '2qbit_gates', 'exp_val']}\n", + "\n", + " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", + "\n", + " og_params[\"depth\"] = transpiled_circuit.depth()\n", + " counts = defaultdict(int)\n", + "\n", + " for inst in transpiled_circuit.data:\n", + " if isinstance(inst.operation, Gate):\n", + " counts[len(inst.qubits)] += 1\n", + " \n", + " o = {k: v for k, v in counts.items()}\n", + " \n", + " single_qubit_gates = (o.get(1)) if o.get(1) is not None else 0\n", + " two_qubit_gates = (o.get(2)) if o.get(2) is not None else 0\n", + " \n", + " og_params[\"1qbit_gates\"] = single_qubit_gates\n", + " og_params[\"2qbit_gates\"] = two_qubit_gates \n", + "\n", + " results = backend.run(transpiled_circuit, optimization_level=0, shots=shots).result()\n", + " counts = results.get_counts(transpiled_circuit)\n", + "\n", + " total_shots = sum(counts.values())\n", + " probabilities = {state: count / total_shots for state, count in counts.items()}\n", + "\n", + " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", + " og_params[\"exp_val\"] = expectation_value\n", + " \n", + " return og_params" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "def scaled_overhead(circuit, backend, shots):\n", + "\n", + " og_params = get_original_circ_counts(circuit, backend, shots)\n", + "\n", + " inst = schema_executors.overhead_executors()\n", + " \n", + " mit_params = inst.write_metadata(circuit=circuit, shots=shots, backend=backend)\n", + "\n", + " scaled_params = {x: set() for x in ['add_circs', 'add_depth', 'add_1qbit_gates', 'add_2qbit_gates', 'exp_val_improvement']}\n", + "\n", + " scaled_params[\"add_circs\"] = mit_params[\"mit_circs\"]\n", + " scaled_params[\"add_depth\"] = mit_params[\"mit_depth\"] - og_params[\"depth\"]\n", + " scaled_params[\"add_1qbit_gates\"] = mit_params[\"mit_1qbit_gates\"] - og_params[\"1qbit_gates\"]\n", + " scaled_params[\"add_2qbit_gates\"] = mit_params[\"mit_2qbit_gates\"] - og_params[\"2qbit_gates\"]\n", + " scaled_params[\"add_1qbit_gates\"] = mit_params[\"mit_1qbit_gates\"] - og_params[\"1qbit_gates\"]\n", + " scaled_params[\"exp_val_improvement\"] = og_params[\"exp_val\"]/mit_params[\"mit_exp_val\"]\n", + "\n", + " return scaled_params" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Testing:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ┌───┐ ┌───┐┌───┐ \n", + "q_0: ┤ H ├──■──┤ X ├┤ X ├───────────────\n", + " └───┘┌─┴─┐└───┘├───┤┌───┐┌───┐┌───┐\n", + "q_1: ─────┤ X ├──■──┤ X ├┤ X ├┤ Y ├┤ Y ├\n", + " └───┘┌─┴─┐├───┤├───┤└───┘└───┘\n", + "q_2: ──────────┤ X ├┤ Y ├┤ Y ├──────────\n", + " └───┘└───┘└───┘ \n" + ] + } + ], + "source": [ + "# Making a 3-qubit test GHZ circuit:\n", + "\n", + "n = 3\n", + "ghz_circ = generate_ghz_circuit(n, return_type='qiskit')\n", + "\n", + "ghz_circ.x([0,1])\n", + "ghz_circ.x([0,1])\n", + "ghz_circ.y([1,2])\n", + "ghz_circ.y([1,2])\n", + "\n", + "print(ghz_circ)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'mit_circs': 3, 'mit_1qbit_gates': 53, 'mit_2qbit_gates': 12, 'mit_depth': 44, 'mit_exp_val': 0.5899999999999986}\n" + ] + } + ], + "source": [ + "# import schema_executors\n", + "from schema_executors import OverheadExecutors\n", + "\n", + "inst = OverheadExecutors()\n", + "\n", + "test_backend = AerSimulator(noise_model=basic_noise(ghz_circ.num_qubits))\n", + "\n", + "get_original_circ_counts(ghz_circ, test_backend, 1000)\n", + "\n", + "test_path = '.test.toml'\n", + "\n", + "metadata = inst.write_metadata(circuit=ghz_circ, shots=1000, backend=test_backend)\n", + "print(metadata)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/mitiq/schema/schema_testing.ipynb b/mitiq/schema/schema_testing.ipynb new file mode 100644 index 0000000000..1f28b93ce3 --- /dev/null +++ b/mitiq/schema/schema_testing.ipynb @@ -0,0 +1,713 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import jsonschema\n", + "import numpy as np\n", + "from jsonschema import validate\n", + "from qiskit import transpile\n", + "from qiskit_aer import AerSimulator\n", + "from mitiq import zne, ddd, pec\n", + "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", + "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", + "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", + "from mitiq.benchmarks import generate_rb_circuits\n", + "from arg_function_maps import schema_to_params\n", + "from functions_to_use import basic_noise, execute_0s_ideal" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ZNE" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "

\n", + "Load the defined ZNE schema and an example of a ZNE experiment (experiment) using the schema-defined structure
\n", + "     zne_schema: defines the shape and requirements of JSON data, in our case the parameters of a zne experiment
\n", + "     zne_experiment: an example of the type of input data we will use the schema to validate, defines a zne experiment\n", + "

" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "def load(schema_path):\n", + " with open(schema_path, 'r') as file:\n", + " return json.load(file)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "zne_schema =load('./schema/zne_schema.json')\n", + "zne_experiment = load('./experiments/zne_experiments/zne_experiment.json')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Both of these objects behave a lot like python dictionaries with several nested dictionaries, and we can examine their contents as such:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "top-level keys: dict_keys(['$schema', 'title', 'description', 'type', 'properties', 'additionalProperties']) \n", + " properties keys: dict_keys(['technique', 'noise_scaling_factors', 'noise_scaling_method', 'scale_factor', 'extrapolation']) \n", + " \n", + " noise_scaling_factors keys and values: \n", + " {\n", + " \"description\": \"Real scale factors used in the Factory for extrapolating to zero noise case, one for each point that is to be extrapolated\",\n", + " \"type\": \"array\",\n", + " \"items\": {\n", + " \"type\": \"number\"\n", + " }\n", + "} \n", + " extrapolation keys and values: \n", + " {\n", + " \"description\": \"Method used to extrapolate the noise scaling factors to points not in the original set\",\n", + " \"type\": \"string\",\n", + " \"enum\": [\n", + " \"linear\",\n", + " \"richardson\",\n", + " \"polynomial\",\n", + " \"exponential\",\n", + " \"poly-exp\",\n", + " \"adaptive-exp\"\n", + " ]\n", + "}\n" + ] + } + ], + "source": [ + "print(\"top-level keys:\", zne_schema.keys(), '\\n properties keys:',\n", + " zne_schema['properties'].keys(), '\\n \\n noise_scaling_factors keys and values: \\n',\n", + " json.dumps(zne_schema['properties']['noise_scaling_factors'], indent=4), '\\n extrapolation keys and values: \\n',\n", + " json.dumps(zne_schema['properties']['extrapolation'], indent=4)\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "top-level keys:
\n", + "
    \n", + "
  • $schema: reference to the version of JSON Schema standard being used
  • \n", + "
  • title: readable title for the schema, often for documentation purposes
  • \n", + "
  • description: explanation of the schema's purpose and structure, providing context for users
  • \n", + "
  • type: specifies the data type (object, array, string, etc) of the input data being validated
  • \n", + "
  • properties: defines object that the input data may be expected to have, along with their structure and validation criteria.
  • \n", + "
      \n", + "
    • So looking at the at the properties defined in zne_schema, we see the familiar options that are avaialable when experimentning zne
    • \n", + "
    • Specifically looking at the noise_scaling_factors and extrapolation properties, we can see how different properties can have different expected data types and the 'enum' key can be used to specify a fixed list of options
    • \n", + "
    \n", + "
\n", + "\n", + "
\n", + "
\n", + "\n", + "The contents of zne_experiment show an example of what properties may be specified and how:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " \"noise_scaling_factors\": [\n", + " 2,\n", + " 4,\n", + " 6,\n", + " 8\n", + " ],\n", + " \"noise_scaling_method\": \"global\",\n", + " \"extrapolation\": \"richardson\",\n", + " \"scale_factor\": 2\n", + "}\n" + ] + } + ], + "source": [ + "print(json.dumps(zne_experiment, indent=4))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now let's use the schema to validate this experiment (check that it has the necessary structure and parameters defined)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def validate_experiment(experiment, schema):\n", + " try:\n", + " validate(instance=experiment, schema=schema)\n", + " print(\"validation passed\")\n", + " except jsonschema.exceptions.ValidationError as e:\n", + " print(\"validation failed\")\n", + " return None" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "validation passed\n" + ] + } + ], + "source": [ + "validate_experiment(zne_experiment, zne_schema)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now that the validation has passed, we can extract the implementation parameters from zne_experiment to actually perform the mitigation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "experiment = zne_experiment" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ┌──────────┐ ┌────┐ ┌──────────┐ ┌─────────┐┌────┐┌──────────┐»\n", + "q_0: ┤ Ry(-π/2) ├───┤ √X ├───┤ Ry(-π/2) ├─■─┤ Ry(π/2) ├┤ √X ├┤ Ry(-π/2) ├»\n", + " └──┬───┬───┘┌──┴────┴──┐└──────────┘ │ ├─────────┤├───┬┘└┬───────┬─┘»\n", + "q_1: ───┤ X ├────┤ Ry(-π/2) ├─────────────■─┤ Ry(π/2) ├┤ Y ├──┤ Rx(0) ├──»\n", + " └───┘ └──────────┘ └─────────┘└───┘ └───────┘ »\n", + "« ┌───────┐ ┌─────────┐ ┌──────┐ ┌──────────┐ ┌────┐ ┌──────────┐»\n", + "«q_0: ┤ Rx(0) ├─■─┤ Ry(π/2) ├─■───┤ √Xdg ├──┤ Ry(-π/2) ├───┤ √X ├──┤ Ry(-π/2) ├»\n", + "« └───────┘ │ └─┬──────┬┘ │ ┌─┴──────┴─┐└──┬────┬──┘┌──┴────┴─┐└──────────┘»\n", + "«q_1: ──────────■───┤ √Xdg ├──■─┤ Ry(-π/2) ├───┤ √X ├───┤ Ry(π/2) ├────────────»\n", + "« └──────┘ └──────────┘ └────┘ └─────────┘ »\n", + "« ┌───────┐ ┌─────────┐┌────┐\n", + "«q_0: ┤ Rx(0) ├─■─┤ Ry(π/2) ├┤ √X ├\n", + "« └───────┘ │ └──┬───┬──┘├────┤\n", + "«q_1: ──────────■────┤ Y ├───┤ √X ├\n", + "« └───┘ └────┘\n" + ] + } + ], + "source": [ + "# for the sake of this example, we will use an n-qubit randomized benchmarking circuit, which we can generate using mitiq's generate_rb_circuits function\n", + "n_qubits = 2\n", + "n_cliffords = 2\n", + "trials = 1\n", + "seed = 22\n", + "\n", + "rb_circ = generate_rb_circuits(n_qubits=n_qubits, num_cliffords=n_cliffords, trials=trials, return_type='qiskit', seed=seed)[0]\n", + "\n", + "print(rb_circ)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Counts: {'00': 1000}\n", + "Probabilities: {'00': 1.0}\n", + "|00...0> probability: 1.0 , error: 0.0\n" + ] + } + ], + "source": [ + "# let's calculate the noisless case of executing the circuit. A randomized benchmarking circuit should be equivalent to the identity operation, so we should expect the ideal output to be the all-zero state\n", + "\n", + "ideal_circ = rb_circ.copy()\n", + "ideal_circ.measure_all()\n", + "\n", + "ideal_backend = AerSimulator()\n", + "ideal_transpiled = transpile(ideal_circ, optimization_level=0, backend=ideal_backend)\n", + "result = ideal_backend.run(ideal_transpiled, optimization_level=0, shots=1000).result()\n", + "\n", + "counts = result.get_counts(ideal_transpiled)\n", + "print(\"Counts:\", counts)\n", + "\n", + "total_shots = sum(counts.values())\n", + "probabilities = {state: count / total_shots for state, count in counts.items()}\n", + "print(\"Probabilities:\", probabilities)\n", + "\n", + "print('|00...0> probability:', probabilities['0'*n_qubits], ', error:', np.abs(1.0-probabilities['0'*n_qubits]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Counts: {'01': 20, '11': 7, '10': 51, '00': 922}\n", + "Probabilities: {'01': 0.02, '11': 0.007, '10': 0.051, '00': 0.922}\n", + "|00...0> probability: 0.922 , error: 0.07799999999999996\n" + ] + } + ], + "source": [ + "circ_noisy = rb_circ.copy()\n", + "circ_noisy.measure_all()\n", + "\n", + "noise_model = basic_noise(n_qubits=n_qubits)\n", + "noisy_backend = AerSimulator(noise_model=noise_model)\n", + "transpiled_noisy = transpile(circ_noisy, optimization_level=0, backend=noisy_backend)\n", + "\n", + "result = noisy_backend.run(transpiled_noisy, optimization_level=0, shots=1000).result()\n", + "counts = result.get_counts()\n", + "print(\"Counts:\", counts)\n", + "\n", + "total_shots = sum(counts.values())\n", + "probabilities = {state: count / total_shots for state, count in counts.items()}\n", + "\n", + "print(\"Probabilities:\", probabilities)\n", + "print('|00...0> probability:', probabilities['0'*n_qubits], ', error:', np.abs(1.0-probabilities['0'*n_qubits]))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ideal EV: 1.0\n", + "unmitigated noisy EV: 0.937\n" + ] + } + ], + "source": [ + "# simple exector that returns the probability of measuring either |00...0>\n", + "execute = execute_0s_ideal\n", + "\n", + "# we can also just use this executor to get the ideal and unmitigated expectation values:\n", + "\n", + "print('ideal EV: ', execute(rb_circ, noise_model=False))\n", + "print('unmitigated noisy EV: ', execute(rb_circ))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# this is sort of a band-aid solution to the problem of mapping strings to functions, need to think about how to do this better\n", + "\n", + "extrapolation_map = {\n", + " \"linear\": zne.inference.LinearFactory(scale_factors=experiment['noise_scaling_factors']),\n", + " \"richardson\": zne.inference.RichardsonFactory(scale_factors=experiment['noise_scaling_factors']),\n", + " \"polynomial\": zne.inference.PolyFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # need to add order as an option in the metashcema here ...\n", + " \"exponential\": zne.inference.ExpFactory(scale_factors=experiment['noise_scaling_factors']),\n", + " \"poly-exp\": zne.inference.PolyExpFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # .. and here\n", + " \"adaptive-exp\": zne.inference.AdaExpFactory(scale_factor=experiment['noise_scaling_factors'][0], steps=4, asymptote=None), # need to adjust metaschema here for steps\n", + "}\n", + "\n", + "noise_scaling_map = {\n", + " \"global\": fold_global,\n", + " \"local_random\": fold_gates_at_random,\n", + " \"local_all\": fold_all,\n", + " \"layer\": get_layer_folding,\n", + " \"identity_scaling\": insert_id_layers\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "mitigated_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[zne_experiment['extrapolation']], scale_noise=noise_scaling_map[zne_experiment['noise_scaling_method']])" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1.0719999999999994" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "mitigated_result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "now we can look at experiments that use different extrapolation methods to see how thier mitigated results compare" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "validation passed\n", + "validation passed\n", + "validation passed\n" + ] + } + ], + "source": [ + "linear_experiment = load('./experiments/zne_experiments/zne_experiment_lin.json')\n", + "polynomial_experiment = load('./experiments/zne_experiments/zne_experiment_poly.json')\n", + "exponential_experiment = load('./experiments/zne_experiments/zne_experiment_exp.json')\n", + "\n", + "validate_experiment(linear_experiment, zne_schema)\n", + "validate_experiment(polynomial_experiment, zne_schema)\n", + "validate_experiment(exponential_experiment, zne_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Linear result: 0.9664999999999999\n", + "Polynomial result: 0.79775\n", + "Exponential result: 1.0184815115946293\n", + "Richardson result: 1.0719999999999994\n" + ] + } + ], + "source": [ + "linear_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[linear_experiment['extrapolation']], scale_noise=noise_scaling_map[linear_experiment['noise_scaling_method']])\n", + "polynomial_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[polynomial_experiment['extrapolation']], scale_noise=noise_scaling_map[polynomial_experiment['noise_scaling_method']])\n", + "exponential_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[exponential_experiment['extrapolation']], scale_noise=noise_scaling_map[exponential_experiment['noise_scaling_method']])\n", + "\n", + "print(\"Linear result:\", linear_result)\n", + "print(\"Polynomial result:\", polynomial_result)\n", + "print(\"Exponential result:\", exponential_result)\n", + "print(\"Richardson result:\", mitigated_result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DDD" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "validation passed\n" + ] + } + ], + "source": [ + "ddd_schema =load('./schema/ddd_schema.json')\n", + "ddd_experiment = load('./experiments/ddd_experiments/ddd_experiment.json')\n", + "\n", + "validate_experiment(ddd_experiment, ddd_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "rule_map = {\n", + " \"xx\": ddd.rules.xx,\n", + " \"yy\": ddd.rules.yy,\n", + " \"xyxy\": ddd.rules.xyxy, \n", + " \"general\": ddd.rules.general_rule, # need to adjust ddd_schema here to better allow for this \n", + " \"repeated\": ddd.rules.repeated_rule, # .. and this\n", + " \"custom\": None, # ... and this\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "mitigated_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[ddd_experiment['rule']])" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "experiment rule: xx\n" + ] + } + ], + "source": [ + "print(\"experiment rule: \", ddd_experiment['rule'])" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "validation passed\n", + "validation passed\n" + ] + } + ], + "source": [ + "yy_experiment = load('./experiments/ddd_experiments/ddd_experiment_yy.json')\n", + "xyxy_experiment = load('./experiments/ddd_experiments/ddd_experiment_xyxy.json')\n", + "\n", + "validate_experiment(yy_experiment, ddd_schema)\n", + "validate_experiment(xyxy_experiment, ddd_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "yy_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[yy_experiment['rule']])\n", + "xyxy_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[xyxy_experiment['rule']])" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "xx result: 0.945\n", + "yy result: 0.948\n", + "xyxy result: 0.947\n" + ] + } + ], + "source": [ + "print(\"xx result:\", mitigated_result)\n", + "print(\"yy result:\", yy_result)\n", + "print(\"xyxy result:\", xyxy_result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PEC" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "from mitiq.benchmarks import generate_rb_circuits\n", + "from qiskit_ibm_runtime.fake_provider import FakeJakartaV2 \n", + "from mitiq.pec.representations.depolarizing import represent_operations_in_circuit_with_local_depolarizing_noise\n", + "\n", + "n_qubits = 2\n", + "depth_circuit = 100\n", + "shots = 10 ** 4\n", + "\n", + "circuit = generate_rb_circuits(n_qubits, depth_circuit,return_type=\"qiskit\")[0]\n", + "circuit.measure_all()\n", + "\n", + "def execute_circuit(circuit):\n", + " \"\"\"Execute the input circuit and return the expectation value of |00..0><00..0|, which ideally should be 1 for randomized benchmarking circuits.\"\"\"\n", + " noisy_backend = FakeJakartaV2()\n", + " noisy_result = noisy_backend.run(circuit, shots=shots).result()\n", + " noisy_counts = noisy_result.get_counts(circuit)\n", + " noisy_expectation_value = noisy_counts[n_qubits * \"0\"] / shots\n", + " return noisy_expectation_value" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "13\n" + ] + } + ], + "source": [ + "noise_level = 0.01\n", + "reps = represent_operations_in_circuit_with_local_depolarizing_noise(rb_circ, noise_level)\n", + "print(len(reps))" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "pec_schema = load('./schema/pec_schema.json')\n", + "# print(json.dumps(pec_schema['properties'], indent=4))" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "validation passed\n" + ] + } + ], + "source": [ + "pec_experiment = {\n", + " \"technique\": \"pec\",\n", + " \"operation_representations\": reps,\n", + " \"num_samples\": 100\n", + "}\n", + "\n", + "validate_experiment(pec_experiment, pec_schema)" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PEC result: 0.9512032176170859\n" + ] + } + ], + "source": [ + "params = schema_to_params(pec_experiment)\n", + "result = pec.execute_with_pec(rb_circ, execute, **params)\n", + "print(\"PEC result:\", result)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/mitiq/schema/sweep_testing.ipynb b/mitiq/schema/sweep_testing.ipynb new file mode 100644 index 0000000000..1006b8af89 --- /dev/null +++ b/mitiq/schema/sweep_testing.ipynb @@ -0,0 +1,484 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "import jsonschema\n", + "import random\n", + "import itertools\n", + "import numpy as np\n", + "from jsonschema import validate\n", + "from qiskit import transpile\n", + "from qiskit_aer import AerSimulator\n", + "from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error\n", + "from qiskit.circuit.random import random_circuit\n", + "from mitiq import zne, ddd\n", + "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", + "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", + "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", + "from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ZNE:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Load Schema:\n", + "\n", + "def load(schema_path):\n", + " with open(schema_path, 'r') as file:\n", + " return json.load(file)\n", + " \n", + "zne_schema =load('./schema/zne_schema.json')" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a test \"batch\" experiment:\n", + "\n", + "zne_batch_test = {\"noise_scaling_factors\": [[1, 1.25, 1.5], [1,2,3], [2,4,6]], # Noise scaling values\n", + " \"noise_scaling_method\": [\"global\"], # Folding method\n", + " \"extrapolation\": [\"polynomial\", \"linear\"], # Extrapolation method\n", + " }" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deconstructing a Batch Dictionary:" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "# Create single experiments from batch object:\n", + "\n", + "# Define a function to make all combinations of experiments from a \"batch dictionary\" which should be formatted like the test case above\n", + "def make_experiment_list(batch_dict):\n", + "\n", + " # Initialize empty list where we will append each new experiment\n", + " exp_list = []\n", + "\n", + " # Make list with all combinations of key values from our \"batch dictionary\"\n", + " combo_list = list(itertools.product(batch_dict['noise_scaling_factors'], \n", + " batch_dict['noise_scaling_method'], \n", + " batch_dict['extrapolation']))\n", + " # Iterate over the list\n", + " for k in range(len(combo_list)):\n", + "\n", + " # Initialize single experiment dictionary with keys\n", + " exp = {x: set() for x in ['noise_scaling_factors', 'noise_scaling_method', 'extrapolation']}\n", + "\n", + " # Map each key to its unique value from our list of combinations\n", + " exp['noise_scaling_factors'] = combo_list[k][0]\n", + " exp['noise_scaling_method'] = combo_list[k][1]\n", + " exp['extrapolation'] = combo_list[k][2]\n", + "\n", + " # Pull out each experiment\n", + " exp_list.append(exp)\n", + "\n", + " # Returns a list of experiments\n", + " return exp_list" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'noise_scaling_factors': [1, 1.25, 1.5],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [1, 1.25, 1.5],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'},\n", + " {'noise_scaling_factors': [1, 2, 3],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [1, 2, 3],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'},\n", + " {'noise_scaling_factors': [2, 4, 6],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'polynomial'},\n", + " {'noise_scaling_factors': [2, 4, 6],\n", + " 'noise_scaling_method': 'global',\n", + " 'extrapolation': 'linear'}]" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Implementing our function using the test case defined above:\n", + "\n", + "make_experiment_list(zne_batch_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Validating a batch of experiments:" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "# Validate each experiment in our batch object:\n", + "\n", + "# Create function that takes in a batch dictionary and schema to validate against\n", + "def batch_validate(batch_dict, schema):\n", + "\n", + " # Initialize empty list for validation results\n", + " validation_results = []\n", + "\n", + " # Create list of experiments\n", + " formatted_batch = make_experiment_list(batch_dict)\n", + "\n", + " # Iterate over list of experiments and individually validate\n", + " for k in formatted_batch:\n", + " try:\n", + " validate(instance=k, schema=schema)\n", + " result = \"validation passed\"\n", + " except jsonschema.exceptions.ValidationError as e:\n", + " result = \"validation failed\"\n", + "\n", + " # Pull out validation results\n", + " validation_results.append(result)\n", + " \n", + " return validation_results" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed',\n", + " 'validation passed']" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Implementing our batch validate function using the same test case:\n", + "\n", + "batch_validate(zne_batch_test, zne_schema)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating Noise Models:" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "# Making a thermal noise model using IonQ T1, T2 parameters:\n", + "\n", + "from qiskit_aer.noise import (NoiseModel, thermal_relaxation_error)\n", + "\n", + "def thermal_relaxation_noise_ionq(N): \n", + " \n", + " # T1 and T2 values for qubits 0-N\n", + " T1s = np.random.normal(1.1e10, 0.2e10, N) # Sampled N values from normal dist, w/ mean = 1.1e10, std = 2e9 us, (converted to ns)\n", + " T2s = np.random.normal(2e8, 0.2e8, N) # Sampled N values from normal dist, w/ mean = 2e5, std = 2e4 us (converted to ns)\n", + "\n", + " # Instruction times (in nanoseconds)\n", + " time_ones = 1000 \n", + "\n", + " noise_thermal = NoiseModel(basis_gates=['h', 'x', 'y', 'z', 'cx']) # Initialize noise model w ansatz gates\n", + "\n", + " # QuantumError objects\n", + " errors_ones = [thermal_relaxation_error(t1, t2, time_ones) # Make tuples of errors for rx, rz gates \n", + " for t1, t2 in zip(T1s, T2s)]\n", + " \n", + " # Apply error to all qubits in circuit from randomly sampled parameters\n", + " for j in range(N):\n", + " noise_thermal.add_quantum_error(errors_ones[j], \"h\", [j])\n", + " noise_thermal.add_quantum_error(errors_ones[j], \"x\", [j])\n", + " noise_thermal.add_quantum_error(errors_ones[j], \"y\", [j])\n", + " noise_thermal.add_quantum_error(errors_ones[j], \"z\", [j])\n", + " \n", + " sim_noise = AerSimulator(noise_model=noise_thermal)\n", + " return sim_noise, noise_thermal" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "# Ella's basic noise model:\n", + "\n", + "def basic_noise(n_qubits, prob=0.005):\n", + "\n", + " noise_model = NoiseModel()\n", + "\n", + " for i in range(n_qubits):\n", + " readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]])\n", + " noise_model.add_readout_error(readout_err, [i])\n", + " \n", + " depolarizing_err1 = depolarizing_error(prob, num_qubits=1)\n", + " depolarizing_err2 = depolarizing_error(prob, num_qubits=2)\n", + " noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z'])\n", + " noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx'])\n", + "\n", + " return noise_model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining the Executor & Batch Executor:" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "# Single experiment executor (very slightly modified from Ella's):\n", + "\n", + "def execute(circuit, noise_model='basic'): # Set noise_model to thermal, basic, noiseless\n", + " circ = circuit.copy()\n", + " circ.measure_all()\n", + " num_qubits = circ.num_qubits\n", + "\n", + " if noise_model == 'thermal':\n", + " backend = thermal_relaxation_noise_ionq(num_qubits)[0]\n", + " elif noise_model == 'basic':\n", + " backend = AerSimulator(noise_model=basic_noise(num_qubits))\n", + " elif noise_model == 'noiseless':\n", + " backend = AerSimulator()\n", + " \n", + " #backend = AerSimulator(noise_model=noise_model)\n", + " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", + "\n", + " results = backend.run(transpiled_circuit, optimization_level=0, shots=1000).result()\n", + " counts = results.get_counts(transpiled_circuit)\n", + "\n", + " total_shots = sum(counts.values())\n", + " probabilities = {state: count / total_shots for state, count in counts.items()}\n", + "\n", + " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", + " return expectation_value" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [], + "source": [ + "# Adjusting Ella's mapping functionality:\n", + "\n", + "noise_scaling_map = {\n", + " \"global\": fold_global,\n", + " \"local_random\": fold_gates_at_random,\n", + " \"local_all\": fold_all,\n", + " \"layer\": get_layer_folding,\n", + " \"identity_scaling\": insert_id_layers\n", + "}\n", + "\n", + "def extrapolation_map(single_exp):\n", + " ex_map = {\n", + " \"linear\": zne.inference.LinearFactory(scale_factors=single_exp['noise_scaling_factors']),\n", + " \"richardson\": zne.inference.RichardsonFactory(scale_factors=single_exp['noise_scaling_factors']),\n", + " \"polynomial\": zne.inference.PolyFactory(scale_factors=single_exp['noise_scaling_factors'], order=2),\n", + " \"exponential\": zne.inference.ExpFactory(scale_factors=single_exp['noise_scaling_factors']),\n", + " \"poly-exp\": zne.inference.PolyExpFactory(scale_factors=single_exp['noise_scaling_factors'], order=1),\n", + " \"adaptive-exp\": zne.inference.AdaExpFactory(scale_factor=single_exp['noise_scaling_factors'][1], steps=4, asymptote=None), \n", + " }\n", + " return ex_map[single_exp['extrapolation']]" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [], + "source": [ + "# Batch execute function to return list of expectation values:\n", + "\n", + "def batch_execute(batch_dict, circuit, executor):\n", + "\n", + " # Define list of experiments\n", + " formatted_batch = make_experiment_list(batch_dict)\n", + "\n", + " # Initialize list to append expectation values into\n", + " exp_val_list = []\n", + "\n", + " # Iterate over each experiment\n", + " for k in formatted_batch:\n", + " exp_val = zne.execute_with_zne(circuit=circuit, executor=executor, factory=extrapolation_map(k), \n", + " scale_noise=noise_scaling_map[k['noise_scaling_method']])\n", + " \n", + " # Pull out expectation values\n", + " exp_val_list.append(exp_val)\n", + " \n", + " return exp_val_list" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Testing:" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ┌───┐ ┌───┐┌───┐ \n", + "q_0: ┤ H ├──■──┤ X ├┤ X ├───────────────\n", + " └───┘┌─┴─┐└───┘├───┤┌───┐┌───┐┌───┐\n", + "q_1: ─────┤ X ├──■──┤ X ├┤ X ├┤ Y ├┤ Y ├\n", + " └───┘┌─┴─┐├───┤├───┤└───┘└───┘\n", + "q_2: ──────────┤ X ├┤ Y ├┤ Y ├──────────\n", + " └───┘└───┘└───┘ \n" + ] + } + ], + "source": [ + "# Making a 3-qubit test GHZ circuit:\n", + "\n", + "n = 3\n", + "ghz_circ = generate_ghz_circuit(n, return_type='qiskit')\n", + "\n", + "ghz_circ.x([0,1])\n", + "ghz_circ.x([0,1])\n", + "ghz_circ.y([1,2])\n", + "ghz_circ.y([1,2])\n", + "\n", + "print(ghz_circ)" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ideal EV: 1.0\n", + "unmitigated noisy EV: 0.721\n" + ] + } + ], + "source": [ + "print('ideal EV: ', execute(ghz_circ, noise_model='noiseless'))\n", + "print('unmitigated noisy EV: ', execute(ghz_circ, noise_model='basic'))" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Experiment 1 Mitigated Expectation Value: 0.7759999999999967\n", + "Experiment 2 Mitigated Expectation Value: 0.7123333333333339\n", + "Experiment 3 Mitigated Expectation Value: 0.790999999999999\n", + "Experiment 4 Mitigated Expectation Value: 0.7943333333333336\n", + "Experiment 5 Mitigated Expectation Value: 0.663999999999999\n", + "Experiment 6 Mitigated Expectation Value: 0.7863333333333338\n" + ] + } + ], + "source": [ + "exp_results = batch_execute(zne_batch_test, ghz_circ, execute)\n", + "\n", + "for k in np.arange(1,7):\n", + " print(\"Experiment\", k, \"Mitigated Expectation Value:\", exp_results[k-1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From c20156f4eb9868098f75762e2c36ac8d3c273a3e Mon Sep 17 00:00:00 2001 From: Ella Carlander Date: Thu, 19 Dec 2024 13:29:27 -0600 Subject: [PATCH 6/6] Remove all code except individual technique schema and validation --- mitiq/schema/arg_function_maps.py | 109 --- mitiq/schema/composing_testing.ipynb | 228 ------ .../ddd_experiments/ddd_experiment.json | 3 - .../ddd_experiment_with_technique.json | 4 - .../ddd_experiments/ddd_experiment_xyxy.json | 3 - .../ddd_experiments/ddd_experiment_yy.json | 3 - .../pec_experiments/pec_experiment.json | 3 - .../rem_experiments/rem_experiment.json | 3 - .../zne_experiments/zne_experiment.json | 6 - .../zne_experiments/zne_experiment_exp.json | 5 - .../zne_experiments/zne_experiment_lin.json | 5 - .../zne_experiments/zne_experiment_poly.json | 5 - .../zne_experiment_with_technique.json | 7 - mitiq/schema/functions_to_use.py | 130 ---- mitiq/schema/overhead_testing.ipynb | 431 ----------- mitiq/schema/schema/composition_schema.json | 23 - .../schema/composition_schema_ordered.json | 22 - mitiq/schema/schema_executors.py | 104 --- mitiq/schema/schema_testing.ipynb | 713 ------------------ mitiq/schema/sweep_testing.ipynb | 484 ------------ .../ddd_schema.json | 0 .../pec_schema.json | 0 .../rem_schema.json | 0 .../zne_schema.json | 0 mitiq/schema/validation_utils.py | 18 + 25 files changed, 18 insertions(+), 2291 deletions(-) delete mode 100644 mitiq/schema/arg_function_maps.py delete mode 100644 mitiq/schema/composing_testing.ipynb delete mode 100644 mitiq/schema/experiments/ddd_experiments/ddd_experiment.json delete mode 100644 mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json delete mode 100644 mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json delete mode 100644 mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json delete mode 100644 mitiq/schema/experiments/pec_experiments/pec_experiment.json delete mode 100644 mitiq/schema/experiments/rem_experiments/rem_experiment.json delete mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment.json delete mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json delete mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json delete mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json delete mode 100644 mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json delete mode 100644 mitiq/schema/functions_to_use.py delete mode 100644 mitiq/schema/overhead_testing.ipynb delete mode 100644 mitiq/schema/schema/composition_schema.json delete mode 100644 mitiq/schema/schema/composition_schema_ordered.json delete mode 100644 mitiq/schema/schema_executors.py delete mode 100644 mitiq/schema/schema_testing.ipynb delete mode 100644 mitiq/schema/sweep_testing.ipynb rename mitiq/schema/{schema => technique_schema}/ddd_schema.json (100%) rename mitiq/schema/{schema => technique_schema}/pec_schema.json (100%) rename mitiq/schema/{schema => technique_schema}/rem_schema.json (100%) rename mitiq/schema/{schema => technique_schema}/zne_schema.json (100%) create mode 100644 mitiq/schema/validation_utils.py diff --git a/mitiq/schema/arg_function_maps.py b/mitiq/schema/arg_function_maps.py deleted file mode 100644 index d318955bd8..0000000000 --- a/mitiq/schema/arg_function_maps.py +++ /dev/null @@ -1,109 +0,0 @@ -from mitiq import zne, ddd, pec, rem -from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all -from mitiq.zne.scaling.layer_scaling import get_layer_folding -from mitiq.zne.scaling.identity_insertion import insert_id_layers - -def ddd_schema_to_params(experiment): - ddd_rule_map = { - "xx": ddd.rules.xx, - "yy": ddd.rules.yy, - "xyxy": ddd.rules.xyxy, - "general": ddd.rules.general_rule, # need to adjust ddd_schema here to better allow for this - "repeated": ddd.rules.repeated_rule, # .. and this - "custom": None, # ... and this - } - - rule = ddd_rule_map[experiment['rule']] - - params = { - "rule": rule - } - - return params - - - -def zne_schema_to_params(experiment): - - zne_noise_scaling_method_map = { - "global": fold_global, - "local_random": fold_gates_at_random, - "local_all": fold_all, - "layer": get_layer_folding, - "identity_scaling": insert_id_layers - } - - zne_extrapolation_map = { - "linear": zne.inference.LinearFactory(scale_factors=experiment['noise_scaling_factors']), - "richardson": zne.inference.RichardsonFactory(scale_factors=experiment['noise_scaling_factors']), - "polynomial": zne.inference.PolyFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # need to add order as an option in the metashcema here ... - "exponential": zne.inference.ExpFactory(scale_factors=experiment['noise_scaling_factors']), - "poly-exp": zne.inference.PolyExpFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # .. and here - "adaptive-exp": zne.inference.AdaExpFactory(scale_factor=experiment['noise_scaling_factors'][0], steps=4, asymptote=None), # need to adjust metaschema here for steps - } - - extrapolation = zne_extrapolation_map[experiment['extrapolation']] - noise_scaling = zne_noise_scaling_method_map[experiment['noise_scaling_method']] - - params = { - "factory": extrapolation, - "scale_noise": noise_scaling - } - return params - - - -def pec_schema_to_params(experiment): - - params = { - "representations": experiment['operation_representations'], - "observable": experiment.get('observable', None), - "precision": experiment.get('precision', 0.03), - "num_samples" : experiment.get('num_samples', None), - "force_run_all": experiment.get('force_to_run_all', False), - "random_state": experiment.get('random_state', None), - "full_output": experiment.get('full_ouput', False) - - } - return params - - -def schema_to_params(experiment): - map_dict = { - "zne": zne_schema_to_params, - "ddd": ddd_schema_to_params, - "pec": pec_schema_to_params - } - - params = map_dict[experiment['technique']](experiment) - return params - - -def run_composed_experiment(composed_experiment, circuit, executor): - - experiments = composed_experiment['experiments'] - - mitigate_exectuor_map = { - "zne": zne.mitigate_executor, - "ddd": ddd.mitigate_executor, - "pec": pec.mitigate_executor, - "rem": rem.mitigate_executor - } - - execute_with_map = { - "zne": zne.execute_with_zne, - "ddd": ddd.execute_with_ddd, - "pec": pec.execute_with_pec, - "rem": rem.execute_with_rem - } - - for i, experiment in enumerate(experiments): - technique = experiment['technique'] - params = schema_to_params(experiment) - if i < len(experiments) - 1: - new_executor = mitigate_exectuor_map[technique](executor=executor, **params) - executor = new_executor - else: - result = execute_with_map[technique](executor=executor, circuit=circuit, **params) - - return result diff --git a/mitiq/schema/composing_testing.ipynb b/mitiq/schema/composing_testing.ipynb deleted file mode 100644 index 10d5ece614..0000000000 --- a/mitiq/schema/composing_testing.ipynb +++ /dev/null @@ -1,228 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "from mitiq.benchmarks import generate_rb_circuits\n", - "from mitiq import zne, ddd, pec, rem\n", - "\n", - "from qiskit_ibm_runtime.fake_provider import FakeJakartaV2 \n", - "from functions_to_use import load, validate_experiment, validate_composed_experiment, execute_0s_ideal\n", - "from arg_function_maps import schema_to_params, run_composed_experiment" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### So let's imagine someone wants to use both DDD and ZNE on their cirucit" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "zne_experiment: \n", - " {\n", - " \"technique\": \"zne\",\n", - " \"noise_scaling_factors\": [\n", - " 2,\n", - " 4,\n", - " 6,\n", - " 8\n", - " ],\n", - " \"noise_scaling_method\": \"global\",\n", - " \"extrapolation\": \"richardson\",\n", - " \"scale_factor\": 2\n", - "} \n", - "\n", - "ddd_experiment: \n", - " {\n", - " \"technique\": \"ddd\",\n", - " \"rule\": \"xx\"\n", - "}\n" - ] - } - ], - "source": [ - "zne_experiment = load('./experiments/zne_experiments/zne_experiment_with_technique.json')\n", - "ddd_experiment = load('./experiments/ddd_experiments/ddd_experiment_with_technique.json')\n", - "print(\"zne_experiment: \\n\", json.dumps(zne_experiment, indent=4), \"\\n\")\n", - "print(\"ddd_experiment: \\n\", json.dumps(ddd_experiment, indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [], - "source": [ - "zne_params = schema_to_params(zne_experiment)\n", - "ddd_params = schema_to_params(ddd_experiment)" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "composed_experiment = {\"experiments\": [ddd_experiment, zne_experiment]}\n", - "composition_schema = load('./schema/composition_schema.json')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "*** something to think about: right now the $id for the composition_schema is the complete filepath. This is necessary because we are referncing other schema in it (the technique-specific schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "individual experiment validation passed\n" - ] - } - ], - "source": [ - "validate_composed_experiment(composed_experiment, composition_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "n_qubits = 2\n", - "depth_circuit = 2\n", - "shots = 10 ** 4\n", - "\n", - "circuit = generate_rb_circuits(n_qubits, depth_circuit, trials=1, return_type=\"qiskit\")[0]\n", - "execute = execute_0s_ideal" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.9169999999999968\n" - ] - } - ], - "source": [ - "composed_result = run_composed_experiment(composed_experiment, circuit, execute)\n", - "print(composed_result)" - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ideal = \t \t 1\n", - "unmitigated = \t \t 0.90900\n", - "zne mitigated = \t 1.21000\n", - "ddd mitigated = \t 0.92500\n", - "composed result = \t 0.91700\n" - ] - } - ], - "source": [ - "zne_mitigated = zne.execute_with_zne(circuit, execute, **zne_params)\n", - "ddd_mitigated = ddd.execute_with_ddd(circuit, execute, **ddd_params)\n", - "unmitigated = execute_0s_ideal(circuit)\n", - "ideal = 1 #property of RB circuits\n", - "\n", - "print(\"ideal = \\t \\t\",ideal)\n", - "print(\"unmitigated = \\t \\t\", \"{:.5f}\".format(unmitigated))\n", - "print(\"zne mitigated = \\t\", \"{:.5f}\".format(zne_mitigated))\n", - "print(\"ddd mitigated = \\t\", \"{:.5f}\".format(ddd_mitigated))\n", - "print(\"composed result = \\t\", \"{:.5f}\".format(composed_result))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - "
\n", - "
\n", - "
\n", - "
\n", - "
" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "# ddd_executor = ddd.mitigate_executor(executor=execute, rule=ddd_params['rule'])\n", - "# combined_executor_ddd1st = zne.mitigate_executor(executor=ddd_executor, factory=zne_params['factory'], scale_noise=zne_params['scale_noise'])\n", - "# combined_result_ddd1st = combined_executor_ddd1st(circuit)\n", - "\n", - "# zne_executor = zne.mitigate_executor(executor=execute, factory=zne_params['factory'], scale_noise=zne_params['scale_noise'])\n", - "# combined_executor_zne1st = ddd.mitigate_executor(executor=zne_executor, rule=ddd_params['rule'])\n", - "# combined_result_zne1st = combined_executor_zne1st(circuit)\n", - "\n", - "# print(\"Mitigated value obtained with DDD then ZNE:\", \"{:.5f}\".format(combined_result_ddd1st.real))\n", - "# print(\"Mitigated value obtained with ZNE then DDD:\", \"{:.5f}\".format(combined_result_zne1st.real))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "mitiq_env", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/mitiq/schema/experiments/ddd_experiments/ddd_experiment.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment.json deleted file mode 100644 index 860258f4c5..0000000000 --- a/mitiq/schema/experiments/ddd_experiments/ddd_experiment.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rule": "xx" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json deleted file mode 100644 index 29baefa685..0000000000 --- a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_with_technique.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "technique": "ddd", - "rule": "xx" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json deleted file mode 100644 index c31c7dd2cb..0000000000 --- a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_xyxy.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rule": "xyxy" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json b/mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json deleted file mode 100644 index 316070bf8d..0000000000 --- a/mitiq/schema/experiments/ddd_experiments/ddd_experiment_yy.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rule": "yy" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/pec_experiments/pec_experiment.json b/mitiq/schema/experiments/pec_experiments/pec_experiment.json deleted file mode 100644 index 860258f4c5..0000000000 --- a/mitiq/schema/experiments/pec_experiments/pec_experiment.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rule": "xx" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/rem_experiments/rem_experiment.json b/mitiq/schema/experiments/rem_experiments/rem_experiment.json deleted file mode 100644 index 860258f4c5..0000000000 --- a/mitiq/schema/experiments/rem_experiments/rem_experiment.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rule": "xx" -} \ No newline at end of file diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment.json b/mitiq/schema/experiments/zne_experiments/zne_experiment.json deleted file mode 100644 index 2faaf85937..0000000000 --- a/mitiq/schema/experiments/zne_experiments/zne_experiment.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "noise_scaling_factors": [2, 4, 6, 8], - "noise_scaling_method": "global", - "extrapolation": "richardson", - "scale_factor": 2 - } \ No newline at end of file diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json deleted file mode 100644 index c4efd4f2be..0000000000 --- a/mitiq/schema/experiments/zne_experiments/zne_experiment_exp.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "noise_scaling_factors": [2, 4, 6, 8], - "noise_scaling_method": "global", - "extrapolation": "exponential" - } \ No newline at end of file diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json deleted file mode 100644 index fafc23b6e6..0000000000 --- a/mitiq/schema/experiments/zne_experiments/zne_experiment_lin.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "noise_scaling_factors": [2, 4, 6, 8], - "noise_scaling_method": "global", - "extrapolation": "linear" - } \ No newline at end of file diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json deleted file mode 100644 index 92a4cd7378..0000000000 --- a/mitiq/schema/experiments/zne_experiments/zne_experiment_poly.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "noise_scaling_factors": [2, 4, 6, 8], - "noise_scaling_method": "global", - "extrapolation": "polynomial" - } \ No newline at end of file diff --git a/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json b/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json deleted file mode 100644 index 92f30ba11e..0000000000 --- a/mitiq/schema/experiments/zne_experiments/zne_experiment_with_technique.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "technique": "zne", - "noise_scaling_factors": [2, 4, 6, 8], - "noise_scaling_method": "global", - "extrapolation": "richardson", - "scale_factor": 2 - } \ No newline at end of file diff --git a/mitiq/schema/functions_to_use.py b/mitiq/schema/functions_to_use.py deleted file mode 100644 index f13bbd07b8..0000000000 --- a/mitiq/schema/functions_to_use.py +++ /dev/null @@ -1,130 +0,0 @@ - -import random -import json -import jsonschema -from jsonschema import validate -from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error -from qiskit_aer import AerSimulator -from qiskit import transpile - - -def load(schema_path): - with open(schema_path, 'r') as file: - return json.load(file) - -def add_random_pauli_identities(circuit, k): - """Adds random Pauli gate pairs that act as identity on each qubit in the circuit. This is bc zne was throwing an error when that the cirucit was very short for zne to work. - - Parameters: - circuit: qiskit circuit to modify. - k (int): number of Pauli identity operations to add (must be even) - - Returns: - qiskit circuit with added identities - """ - - circuit = circuit.copy() - if k % 2 != 0: - raise ValueError("k must be an even integer for the sequence to be equivalent to identity.") - - num_qubits = circuit.num_qubits - pauli_pairs = [('x', 'x'), ('y', 'y'), ('z', 'z')] # Possible Pauli identity pairs - - for qubit in range(num_qubits): - for _ in range(k // 2): - pauli_1, pauli_2 = random.choice(pauli_pairs) - getattr(circuit, pauli_1)(qubit) - getattr(circuit, pauli_2)(qubit) - - return circuit - - -def validate_experiment(experiment, schema): - try: - validate(instance=experiment, schema=schema) - print("validation passed") - except jsonschema.exceptions.ValidationError as e: - print("validation failed") - return None - - - -def validate_composed_experiment(composed_experiment, schema): - """This function validates a composed experiment against multiple schemas, ensuring the techniques used are compatible, accounting for their order as well - - Parameters: - experiment: dicitionary containing the key "techniques" whose value is an array of experiments the user wishes to compose. - schema: this is the schema to validate the individa - - """ - try: - validate(instance=composed_experiment, schema=schema) - print("individual experiment validation passed") - - except jsonschema.exceptions.ValidationError as e: - print(f"individual experiment validation failed") - - experiments = composed_experiment["experiments"] - techniques = [experiment["technique"] for experiment in experiments] - - # REM has to go first in the list of techniques - if "rem" in techniques and techniques[0] != "rem": - raise ValueError("REM is only compatible if applied as the first error mitigation technique.") - - - - # so now we have the ordered list of techniques the user is trying to use, so now we will check compatibility: - # zne, pec, and ddd are the three we have right now: - # -------> zne+pec: if zne is first, would want to apply pec to each of the noise scaled s=circuits, so would not be using execute_with_zne - # if pec is first, would want to apply zne to each of the sampled circuits - # -------> zne+ddd: if zne is first, would want to apply ddd to each of the noise scaled circuits - # if ddd is first,would noise scale the circuit with the windows filled (not sure this makes sense to do?) - # -------> pec+ddd: if pec is first, would want to apply ddd to each of the sampled circuits - # if ddd is first, would want to apply pec to each circuits with slack windows filled - # -------> zne+pec+ddd: if zne is first, would want to apply pec to each of the noise scaled circuits, then apply ddd to each of the pec circuits - # REM needs and executor that returns raw measurment results - - - - return None - - -def basic_noise(n_qubits, prob=0.005): - - noise_model = NoiseModel() - - for i in range(n_qubits): - readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]]) - noise_model.add_readout_error(readout_err, [i]) - - depolarizing_err1 = depolarizing_error(prob, num_qubits=1) - depolarizing_err2 = depolarizing_error(prob, num_qubits=2) - noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z']) - noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx']) - - return noise_model - - - -# simple exector that returns the probability of measuring either |00...0> -# eventually, will want an exector that does not actually run the circuits at all and instead computes overhead just from the mitigation experiment info - -def execute_0s_ideal(circuit, noise_model=True): - circ = circuit.copy() - circ.measure_all() - num_qubits = circ.num_qubits - - if noise_model: - noise_model = basic_noise(num_qubits) - - backend = AerSimulator(noise_model=noise_model) - transpiled_circuit = transpile(circ, optimization_level=0, backend=backend) - - results = backend.run(transpiled_circuit, optimization_level=0, shots=1000).result() - counts = results.get_counts(transpiled_circuit) - - total_shots = sum(counts.values()) - probabilities = {state: count / total_shots for state, count in counts.items()} - - expectation_value = probabilities['0'*num_qubits] - return expectation_value \ No newline at end of file diff --git a/mitiq/schema/overhead_testing.ipynb b/mitiq/schema/overhead_testing.ipynb deleted file mode 100644 index 7ec2153fd7..0000000000 --- a/mitiq/schema/overhead_testing.ipynb +++ /dev/null @@ -1,431 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import jsonschema\n", - "import random\n", - "import itertools\n", - "import numpy as np\n", - "from jsonschema import validate\n", - "from qiskit import transpile\n", - "from qiskit_aer import AerSimulator\n", - "from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error\n", - "from qiskit.circuit.random import random_circuit\n", - "from mitiq import zne, ddd\n", - "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", - "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", - "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", - "from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit\n", - "from qiskit.circuit import Gate\n", - "from collections import defaultdict\n", - "import functools\n", - "import tomlkit\n", - "import toml" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# ZNE:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "# Load Schema:\n", - "\n", - "def load(schema_path):\n", - " with open(schema_path, 'r') as file:\n", - " return json.load(file)\n", - " \n", - "zne_schema =load('./schema/zne_schema.json')" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a test \"batch\" experiment:\n", - "\n", - "zne_batch_test = {\"noise_scaling_factors\": [[1, 1.25, 1.5], [1,2,3], [2,4,6]], # Noise scaling values\n", - " \"noise_scaling_method\": [\"global\"], # Folding method\n", - " \"extrapolation\": [\"polynomial\", \"linear\"], # Extrapolation method\n", - " }" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deconstructing a Batch Dictionary:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# Create single experiments from batch object:\n", - "\n", - "# Define a function to make all combinations of experiments from a \"batch dictionary\" which should be formatted like the test case above\n", - "def make_experiment_list(batch_dict):\n", - "\n", - " # Initialize empty list where we will append each new experiment\n", - " exp_list = []\n", - "\n", - " # Make list with all combinations of key values from our \"batch dictionary\"\n", - " combo_list = list(itertools.product(batch_dict['noise_scaling_factors'], \n", - " batch_dict['noise_scaling_method'], \n", - " batch_dict['extrapolation']))\n", - " # Iterate over the list\n", - " for k in range(len(combo_list)):\n", - "\n", - " # Initialize single experiment dictionary with keys\n", - " exp = {x: set() for x in ['noise_scaling_factors', 'noise_scaling_method', 'extrapolation']}\n", - "\n", - " # Map each key to its unique value from our list of combinations\n", - " exp['noise_scaling_factors'] = combo_list[k][0]\n", - " exp['noise_scaling_method'] = combo_list[k][1]\n", - " exp['extrapolation'] = combo_list[k][2]\n", - "\n", - " # Pull out each experiment\n", - " exp_list.append(exp)\n", - "\n", - " # Returns a list of experiments\n", - " return exp_list" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'noise_scaling_factors': [1, 1.25, 1.5],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [1, 1.25, 1.5],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'},\n", - " {'noise_scaling_factors': [1, 2, 3],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [1, 2, 3],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'},\n", - " {'noise_scaling_factors': [2, 4, 6],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [2, 4, 6],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'}]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Implementing our function using the test case defined above:\n", - "\n", - "make_experiment_list(zne_batch_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Validating a batch of experiments:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "# Validate each experiment in our batch object:\n", - "\n", - "# Create function that takes in a batch dictionary and schema to validate against\n", - "def batch_validate(batch_dict, schema):\n", - "\n", - " # Initialize empty list for validation results\n", - " validation_results = []\n", - "\n", - " # Create list of experiments\n", - " formatted_batch = make_experiment_list(batch_dict)\n", - "\n", - " # Iterate over list of experiments and individually validate\n", - " for k in formatted_batch:\n", - " try:\n", - " validate(instance=k, schema=schema)\n", - " result = \"validation passed\"\n", - " except jsonschema.exceptions.ValidationError as e:\n", - " result = \"validation failed\"\n", - "\n", - " # Pull out validation results\n", - " validation_results.append(result)\n", - " \n", - " return validation_results" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed']" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Implementing our batch validate function using the same test case:\n", - "\n", - "batch_validate(zne_batch_test, zne_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "# Ella's basic noise model:\n", - "\n", - "def basic_noise(n_qubits, prob=0.005):\n", - "\n", - " noise_model = NoiseModel()\n", - "\n", - " for i in range(n_qubits):\n", - " readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]])\n", - " noise_model.add_readout_error(readout_err, [i])\n", - " \n", - " depolarizing_err1 = depolarizing_error(prob, num_qubits=1)\n", - " depolarizing_err2 = depolarizing_error(prob, num_qubits=2)\n", - " noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z'])\n", - " noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx'])\n", - "\n", - " return noise_model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Overhead Related Functions:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "# Single experiment executor (very slightly modified from Ella's):\n", - "# Likely to be unused, but just in case:\n", - "\n", - "def execute_no_overhead(circuit, backend, shots): # Set noise_model to thermal, basic, noiseless\n", - " circ = circuit.copy()\n", - " circ.measure_all()\n", - " num_qubits = circ.num_qubits\n", - " \n", - " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", - "\n", - " results = backend.run(transpiled_circuit, optimization_level=0, shots=shots).result()\n", - " counts = results.get_counts(transpiled_circuit)\n", - "\n", - " total_shots = sum(counts.values())\n", - " probabilities = {state: count / total_shots for state, count in counts.items()}\n", - "\n", - " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", - " return expectation_value" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "def get_original_circ_counts(circuit, backend, shots):\n", - "\n", - " circ = circuit.copy()\n", - " circ.measure_all()\n", - " num_qubits = circ.num_qubits\n", - "\n", - " og_params = {x: set() for x in ['depth', '1qbit_gates', '2qbit_gates', 'exp_val']}\n", - "\n", - " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", - "\n", - " og_params[\"depth\"] = transpiled_circuit.depth()\n", - " counts = defaultdict(int)\n", - "\n", - " for inst in transpiled_circuit.data:\n", - " if isinstance(inst.operation, Gate):\n", - " counts[len(inst.qubits)] += 1\n", - " \n", - " o = {k: v for k, v in counts.items()}\n", - " \n", - " single_qubit_gates = (o.get(1)) if o.get(1) is not None else 0\n", - " two_qubit_gates = (o.get(2)) if o.get(2) is not None else 0\n", - " \n", - " og_params[\"1qbit_gates\"] = single_qubit_gates\n", - " og_params[\"2qbit_gates\"] = two_qubit_gates \n", - "\n", - " results = backend.run(transpiled_circuit, optimization_level=0, shots=shots).result()\n", - " counts = results.get_counts(transpiled_circuit)\n", - "\n", - " total_shots = sum(counts.values())\n", - " probabilities = {state: count / total_shots for state, count in counts.items()}\n", - "\n", - " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", - " og_params[\"exp_val\"] = expectation_value\n", - " \n", - " return og_params" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "def scaled_overhead(circuit, backend, shots):\n", - "\n", - " og_params = get_original_circ_counts(circuit, backend, shots)\n", - "\n", - " inst = schema_executors.overhead_executors()\n", - " \n", - " mit_params = inst.write_metadata(circuit=circuit, shots=shots, backend=backend)\n", - "\n", - " scaled_params = {x: set() for x in ['add_circs', 'add_depth', 'add_1qbit_gates', 'add_2qbit_gates', 'exp_val_improvement']}\n", - "\n", - " scaled_params[\"add_circs\"] = mit_params[\"mit_circs\"]\n", - " scaled_params[\"add_depth\"] = mit_params[\"mit_depth\"] - og_params[\"depth\"]\n", - " scaled_params[\"add_1qbit_gates\"] = mit_params[\"mit_1qbit_gates\"] - og_params[\"1qbit_gates\"]\n", - " scaled_params[\"add_2qbit_gates\"] = mit_params[\"mit_2qbit_gates\"] - og_params[\"2qbit_gates\"]\n", - " scaled_params[\"add_1qbit_gates\"] = mit_params[\"mit_1qbit_gates\"] - og_params[\"1qbit_gates\"]\n", - " scaled_params[\"exp_val_improvement\"] = og_params[\"exp_val\"]/mit_params[\"mit_exp_val\"]\n", - "\n", - " return scaled_params" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Testing:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " ┌───┐ ┌───┐┌───┐ \n", - "q_0: ┤ H ├──■──┤ X ├┤ X ├───────────────\n", - " └───┘┌─┴─┐└───┘├───┤┌───┐┌───┐┌───┐\n", - "q_1: ─────┤ X ├──■──┤ X ├┤ X ├┤ Y ├┤ Y ├\n", - " └───┘┌─┴─┐├───┤├───┤└───┘└───┘\n", - "q_2: ──────────┤ X ├┤ Y ├┤ Y ├──────────\n", - " └───┘└───┘└───┘ \n" - ] - } - ], - "source": [ - "# Making a 3-qubit test GHZ circuit:\n", - "\n", - "n = 3\n", - "ghz_circ = generate_ghz_circuit(n, return_type='qiskit')\n", - "\n", - "ghz_circ.x([0,1])\n", - "ghz_circ.x([0,1])\n", - "ghz_circ.y([1,2])\n", - "ghz_circ.y([1,2])\n", - "\n", - "print(ghz_circ)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'mit_circs': 3, 'mit_1qbit_gates': 53, 'mit_2qbit_gates': 12, 'mit_depth': 44, 'mit_exp_val': 0.5899999999999986}\n" - ] - } - ], - "source": [ - "# import schema_executors\n", - "from schema_executors import OverheadExecutors\n", - "\n", - "inst = OverheadExecutors()\n", - "\n", - "test_backend = AerSimulator(noise_model=basic_noise(ghz_circ.num_qubits))\n", - "\n", - "get_original_circ_counts(ghz_circ, test_backend, 1000)\n", - "\n", - "test_path = '.test.toml'\n", - "\n", - "metadata = inst.write_metadata(circuit=ghz_circ, shots=1000, backend=test_backend)\n", - "print(metadata)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/mitiq/schema/schema/composition_schema.json b/mitiq/schema/schema/composition_schema.json deleted file mode 100644 index 0872071958..0000000000 --- a/mitiq/schema/schema/composition_schema.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "file:////Users/ellac/Documents/Unitary_Fund/qem_schema/schema/composition_schema.json", - "type": "object", - "properties": { - "experiments": { - "type": "array", - "description": "Array of individual experiments the user wishes to check compatibility for", - "items": { - "oneOf": [ - { "$ref": "./zne_schema.json" }, - { "$ref": "./ddd_schema.json" }, - { "$ref": "./pec_schema.json" }, - { "$ref": "./rem_schema.json" } - ] - }, - "minItems": 1, - "uniqueItems": true - } - }, - "required": ["experiments"], - "additionalProperties": false - } \ No newline at end of file diff --git a/mitiq/schema/schema/composition_schema_ordered.json b/mitiq/schema/schema/composition_schema_ordered.json deleted file mode 100644 index 75ba1124b4..0000000000 --- a/mitiq/schema/schema/composition_schema_ordered.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "file:////Users/ellac/Documents/Unitary_Fund/qem_schema/schema/composition_schema.json", - "type": "object", - "properties": { - "techniques": { - "type": "array", - "description": "Array of techniques to validate", - "items": { - "oneOf": [ - { "$ref": "./zne_schema.json" }, - { "$ref": "./ddd_schema.json" }, - { "$ref": "./pec_schema.json" } - ] - }, - "minItems": 1, - "uniqueItems": true - } - }, - "required": ["techniques"], - "additionalProperties": false - } \ No newline at end of file diff --git a/mitiq/schema/schema_executors.py b/mitiq/schema/schema_executors.py deleted file mode 100644 index 923f75611a..0000000000 --- a/mitiq/schema/schema_executors.py +++ /dev/null @@ -1,104 +0,0 @@ -import json -import jsonschema -import random -import itertools -import numpy as np -from jsonschema import validate -from qiskit import transpile -from qiskit_aer import AerSimulator -from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error -from qiskit.circuit.random import random_circuit -from mitiq import zne, ddd -from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all -from mitiq.zne.scaling.layer_scaling import get_layer_folding -from mitiq.zne.scaling.identity_insertion import insert_id_layers -from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit -from qiskit.circuit import Gate -from collections import defaultdict -import functools -import tomlkit -import toml - -class OverheadExecutors(): - - def __init__(self): - self.count_circs = [] - self.count_1qbit_gates = [] - self.count_2qbit_gates = [] - self.depths = [] - - # count_circs = [] - # count_1qbit_gates = [] - # count_2qbit_gates = [] - # depths = [] - - def execute_w_metadata(self, circuit, shots, backend): - - global count_circs, count_1qbit_gates, count_2qbit_gates, depths - - qc = circuit.copy() - qc.measure_all() - num_qubits = qc.num_qubits - - depth = qc.depth() - - gate_counts = {1: 0, 2: 0} - for inst in circuit.data: - if isinstance(inst.operation, Gate): - qubit_count = len(inst.qubits) - gate_counts[qubit_count] = gate_counts.get(qubit_count, 0) + 1 - - self.count_circs.append(1) - self.count_1qbit_gates.append(gate_counts[1]) - self.count_2qbit_gates.append(gate_counts[2]) - self.depths.append(depth) - - - transpiled_qc = transpile(qc, backend=backend, optimization_level=0) - result = backend.run(transpiled_qc, shots=shots, optimization_level=0).result() - counts = result.get_counts(transpiled_qc) - - total_shots = sum(counts.values()) - probabilities = {state: count / total_shots for state, count in counts.items()} - - expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits] - - return expectation_value - - - - def write_metadata(self, circuit, shots, backend): - # def write_metadata(self, circuit, shots, backend, metadata_path): - - #doc = tomlkit.document() - - metadata_dict = {} - - metadata_dict = {x: set() for x in ['mit_circs', 'mit_1qbit_gates', 'mit_2qbit_gates', 'mit_depth', 'mit_exp_val']} - - new_exec = functools.partial(self.execute_w_metadata, shots=shots, backend=backend) - - mit_exp_val = zne.execute_with_zne(circuit, new_exec) - - metadata_dict["mit_circs"] = sum(self.count_circs) - metadata_dict["mit_1qbit_gates"] = sum(self.count_1qbit_gates) - metadata_dict["mit_2qbit_gates"] = sum(self.count_2qbit_gates) - metadata_dict["mit_depth"] = sum(self.depths) - metadata_dict["mit_exp_val"] = mit_exp_val - - #doc.update(metadata_dict) - #doc.add(tomlkit.nl()) - - #with open(metadata_path, 'w') as f: - # f.write(tomlkit.dumps(doc)) - - self.count_circs = [] - self.count_1qbit_gates = [] - self.count_2qbit_gates = [] - self.depths = [] - - return metadata_dict - - - - diff --git a/mitiq/schema/schema_testing.ipynb b/mitiq/schema/schema_testing.ipynb deleted file mode 100644 index 1f28b93ce3..0000000000 --- a/mitiq/schema/schema_testing.ipynb +++ /dev/null @@ -1,713 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import jsonschema\n", - "import numpy as np\n", - "from jsonschema import validate\n", - "from qiskit import transpile\n", - "from qiskit_aer import AerSimulator\n", - "from mitiq import zne, ddd, pec\n", - "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", - "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", - "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", - "from mitiq.benchmarks import generate_rb_circuits\n", - "from arg_function_maps import schema_to_params\n", - "from functions_to_use import basic_noise, execute_0s_ideal" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# ZNE" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "

\n", - "Load the defined ZNE schema and an example of a ZNE experiment (experiment) using the schema-defined structure
\n", - "     zne_schema: defines the shape and requirements of JSON data, in our case the parameters of a zne experiment
\n", - "     zne_experiment: an example of the type of input data we will use the schema to validate, defines a zne experiment\n", - "

" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "def load(schema_path):\n", - " with open(schema_path, 'r') as file:\n", - " return json.load(file)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "zne_schema =load('./schema/zne_schema.json')\n", - "zne_experiment = load('./experiments/zne_experiments/zne_experiment.json')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Both of these objects behave a lot like python dictionaries with several nested dictionaries, and we can examine their contents as such:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "top-level keys: dict_keys(['$schema', 'title', 'description', 'type', 'properties', 'additionalProperties']) \n", - " properties keys: dict_keys(['technique', 'noise_scaling_factors', 'noise_scaling_method', 'scale_factor', 'extrapolation']) \n", - " \n", - " noise_scaling_factors keys and values: \n", - " {\n", - " \"description\": \"Real scale factors used in the Factory for extrapolating to zero noise case, one for each point that is to be extrapolated\",\n", - " \"type\": \"array\",\n", - " \"items\": {\n", - " \"type\": \"number\"\n", - " }\n", - "} \n", - " extrapolation keys and values: \n", - " {\n", - " \"description\": \"Method used to extrapolate the noise scaling factors to points not in the original set\",\n", - " \"type\": \"string\",\n", - " \"enum\": [\n", - " \"linear\",\n", - " \"richardson\",\n", - " \"polynomial\",\n", - " \"exponential\",\n", - " \"poly-exp\",\n", - " \"adaptive-exp\"\n", - " ]\n", - "}\n" - ] - } - ], - "source": [ - "print(\"top-level keys:\", zne_schema.keys(), '\\n properties keys:',\n", - " zne_schema['properties'].keys(), '\\n \\n noise_scaling_factors keys and values: \\n',\n", - " json.dumps(zne_schema['properties']['noise_scaling_factors'], indent=4), '\\n extrapolation keys and values: \\n',\n", - " json.dumps(zne_schema['properties']['extrapolation'], indent=4)\n", - ")\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "top-level keys:
\n", - "
    \n", - "
  • $schema: reference to the version of JSON Schema standard being used
  • \n", - "
  • title: readable title for the schema, often for documentation purposes
  • \n", - "
  • description: explanation of the schema's purpose and structure, providing context for users
  • \n", - "
  • type: specifies the data type (object, array, string, etc) of the input data being validated
  • \n", - "
  • properties: defines object that the input data may be expected to have, along with their structure and validation criteria.
  • \n", - "
      \n", - "
    • So looking at the at the properties defined in zne_schema, we see the familiar options that are avaialable when experimentning zne
    • \n", - "
    • Specifically looking at the noise_scaling_factors and extrapolation properties, we can see how different properties can have different expected data types and the 'enum' key can be used to specify a fixed list of options
    • \n", - "
    \n", - "
\n", - "\n", - "
\n", - "
\n", - "\n", - "The contents of zne_experiment show an example of what properties may be specified and how:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\n", - " \"noise_scaling_factors\": [\n", - " 2,\n", - " 4,\n", - " 6,\n", - " 8\n", - " ],\n", - " \"noise_scaling_method\": \"global\",\n", - " \"extrapolation\": \"richardson\",\n", - " \"scale_factor\": 2\n", - "}\n" - ] - } - ], - "source": [ - "print(json.dumps(zne_experiment, indent=4))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "now let's use the schema to validate this experiment (check that it has the necessary structure and parameters defined)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "def validate_experiment(experiment, schema):\n", - " try:\n", - " validate(instance=experiment, schema=schema)\n", - " print(\"validation passed\")\n", - " except jsonschema.exceptions.ValidationError as e:\n", - " print(\"validation failed\")\n", - " return None" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "validation passed\n" - ] - } - ], - "source": [ - "validate_experiment(zne_experiment, zne_schema)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "now that the validation has passed, we can extract the implementation parameters from zne_experiment to actually perform the mitigation" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "experiment = zne_experiment" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " ┌──────────┐ ┌────┐ ┌──────────┐ ┌─────────┐┌────┐┌──────────┐»\n", - "q_0: ┤ Ry(-π/2) ├───┤ √X ├───┤ Ry(-π/2) ├─■─┤ Ry(π/2) ├┤ √X ├┤ Ry(-π/2) ├»\n", - " └──┬───┬───┘┌──┴────┴──┐└──────────┘ │ ├─────────┤├───┬┘└┬───────┬─┘»\n", - "q_1: ───┤ X ├────┤ Ry(-π/2) ├─────────────■─┤ Ry(π/2) ├┤ Y ├──┤ Rx(0) ├──»\n", - " └───┘ └──────────┘ └─────────┘└───┘ └───────┘ »\n", - "« ┌───────┐ ┌─────────┐ ┌──────┐ ┌──────────┐ ┌────┐ ┌──────────┐»\n", - "«q_0: ┤ Rx(0) ├─■─┤ Ry(π/2) ├─■───┤ √Xdg ├──┤ Ry(-π/2) ├───┤ √X ├──┤ Ry(-π/2) ├»\n", - "« └───────┘ │ └─┬──────┬┘ │ ┌─┴──────┴─┐└──┬────┬──┘┌──┴────┴─┐└──────────┘»\n", - "«q_1: ──────────■───┤ √Xdg ├──■─┤ Ry(-π/2) ├───┤ √X ├───┤ Ry(π/2) ├────────────»\n", - "« └──────┘ └──────────┘ └────┘ └─────────┘ »\n", - "« ┌───────┐ ┌─────────┐┌────┐\n", - "«q_0: ┤ Rx(0) ├─■─┤ Ry(π/2) ├┤ √X ├\n", - "« └───────┘ │ └──┬───┬──┘├────┤\n", - "«q_1: ──────────■────┤ Y ├───┤ √X ├\n", - "« └───┘ └────┘\n" - ] - } - ], - "source": [ - "# for the sake of this example, we will use an n-qubit randomized benchmarking circuit, which we can generate using mitiq's generate_rb_circuits function\n", - "n_qubits = 2\n", - "n_cliffords = 2\n", - "trials = 1\n", - "seed = 22\n", - "\n", - "rb_circ = generate_rb_circuits(n_qubits=n_qubits, num_cliffords=n_cliffords, trials=trials, return_type='qiskit', seed=seed)[0]\n", - "\n", - "print(rb_circ)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Counts: {'00': 1000}\n", - "Probabilities: {'00': 1.0}\n", - "|00...0> probability: 1.0 , error: 0.0\n" - ] - } - ], - "source": [ - "# let's calculate the noisless case of executing the circuit. A randomized benchmarking circuit should be equivalent to the identity operation, so we should expect the ideal output to be the all-zero state\n", - "\n", - "ideal_circ = rb_circ.copy()\n", - "ideal_circ.measure_all()\n", - "\n", - "ideal_backend = AerSimulator()\n", - "ideal_transpiled = transpile(ideal_circ, optimization_level=0, backend=ideal_backend)\n", - "result = ideal_backend.run(ideal_transpiled, optimization_level=0, shots=1000).result()\n", - "\n", - "counts = result.get_counts(ideal_transpiled)\n", - "print(\"Counts:\", counts)\n", - "\n", - "total_shots = sum(counts.values())\n", - "probabilities = {state: count / total_shots for state, count in counts.items()}\n", - "print(\"Probabilities:\", probabilities)\n", - "\n", - "print('|00...0> probability:', probabilities['0'*n_qubits], ', error:', np.abs(1.0-probabilities['0'*n_qubits]))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Counts: {'01': 20, '11': 7, '10': 51, '00': 922}\n", - "Probabilities: {'01': 0.02, '11': 0.007, '10': 0.051, '00': 0.922}\n", - "|00...0> probability: 0.922 , error: 0.07799999999999996\n" - ] - } - ], - "source": [ - "circ_noisy = rb_circ.copy()\n", - "circ_noisy.measure_all()\n", - "\n", - "noise_model = basic_noise(n_qubits=n_qubits)\n", - "noisy_backend = AerSimulator(noise_model=noise_model)\n", - "transpiled_noisy = transpile(circ_noisy, optimization_level=0, backend=noisy_backend)\n", - "\n", - "result = noisy_backend.run(transpiled_noisy, optimization_level=0, shots=1000).result()\n", - "counts = result.get_counts()\n", - "print(\"Counts:\", counts)\n", - "\n", - "total_shots = sum(counts.values())\n", - "probabilities = {state: count / total_shots for state, count in counts.items()}\n", - "\n", - "print(\"Probabilities:\", probabilities)\n", - "print('|00...0> probability:', probabilities['0'*n_qubits], ', error:', np.abs(1.0-probabilities['0'*n_qubits]))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ideal EV: 1.0\n", - "unmitigated noisy EV: 0.937\n" - ] - } - ], - "source": [ - "# simple exector that returns the probability of measuring either |00...0>\n", - "execute = execute_0s_ideal\n", - "\n", - "# we can also just use this executor to get the ideal and unmitigated expectation values:\n", - "\n", - "print('ideal EV: ', execute(rb_circ, noise_model=False))\n", - "print('unmitigated noisy EV: ', execute(rb_circ))" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "# this is sort of a band-aid solution to the problem of mapping strings to functions, need to think about how to do this better\n", - "\n", - "extrapolation_map = {\n", - " \"linear\": zne.inference.LinearFactory(scale_factors=experiment['noise_scaling_factors']),\n", - " \"richardson\": zne.inference.RichardsonFactory(scale_factors=experiment['noise_scaling_factors']),\n", - " \"polynomial\": zne.inference.PolyFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # need to add order as an option in the metashcema here ...\n", - " \"exponential\": zne.inference.ExpFactory(scale_factors=experiment['noise_scaling_factors']),\n", - " \"poly-exp\": zne.inference.PolyExpFactory(scale_factors=experiment['noise_scaling_factors'], order=0), # .. and here\n", - " \"adaptive-exp\": zne.inference.AdaExpFactory(scale_factor=experiment['noise_scaling_factors'][0], steps=4, asymptote=None), # need to adjust metaschema here for steps\n", - "}\n", - "\n", - "noise_scaling_map = {\n", - " \"global\": fold_global,\n", - " \"local_random\": fold_gates_at_random,\n", - " \"local_all\": fold_all,\n", - " \"layer\": get_layer_folding,\n", - " \"identity_scaling\": insert_id_layers\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "mitigated_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[zne_experiment['extrapolation']], scale_noise=noise_scaling_map[zne_experiment['noise_scaling_method']])" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "1.0719999999999994" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "mitigated_result" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "now we can look at experiments that use different extrapolation methods to see how thier mitigated results compare" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "validation passed\n", - "validation passed\n", - "validation passed\n" - ] - } - ], - "source": [ - "linear_experiment = load('./experiments/zne_experiments/zne_experiment_lin.json')\n", - "polynomial_experiment = load('./experiments/zne_experiments/zne_experiment_poly.json')\n", - "exponential_experiment = load('./experiments/zne_experiments/zne_experiment_exp.json')\n", - "\n", - "validate_experiment(linear_experiment, zne_schema)\n", - "validate_experiment(polynomial_experiment, zne_schema)\n", - "validate_experiment(exponential_experiment, zne_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Linear result: 0.9664999999999999\n", - "Polynomial result: 0.79775\n", - "Exponential result: 1.0184815115946293\n", - "Richardson result: 1.0719999999999994\n" - ] - } - ], - "source": [ - "linear_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[linear_experiment['extrapolation']], scale_noise=noise_scaling_map[linear_experiment['noise_scaling_method']])\n", - "polynomial_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[polynomial_experiment['extrapolation']], scale_noise=noise_scaling_map[polynomial_experiment['noise_scaling_method']])\n", - "exponential_result = zne.execute_with_zne(circuit=rb_circ, executor=execute, factory=extrapolation_map[exponential_experiment['extrapolation']], scale_noise=noise_scaling_map[exponential_experiment['noise_scaling_method']])\n", - "\n", - "print(\"Linear result:\", linear_result)\n", - "print(\"Polynomial result:\", polynomial_result)\n", - "print(\"Exponential result:\", exponential_result)\n", - "print(\"Richardson result:\", mitigated_result)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# DDD" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "validation passed\n" - ] - } - ], - "source": [ - "ddd_schema =load('./schema/ddd_schema.json')\n", - "ddd_experiment = load('./experiments/ddd_experiments/ddd_experiment.json')\n", - "\n", - "validate_experiment(ddd_experiment, ddd_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "rule_map = {\n", - " \"xx\": ddd.rules.xx,\n", - " \"yy\": ddd.rules.yy,\n", - " \"xyxy\": ddd.rules.xyxy, \n", - " \"general\": ddd.rules.general_rule, # need to adjust ddd_schema here to better allow for this \n", - " \"repeated\": ddd.rules.repeated_rule, # .. and this\n", - " \"custom\": None, # ... and this\n", - "}" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "mitigated_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[ddd_experiment['rule']])" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "experiment rule: xx\n" - ] - } - ], - "source": [ - "print(\"experiment rule: \", ddd_experiment['rule'])" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "validation passed\n", - "validation passed\n" - ] - } - ], - "source": [ - "yy_experiment = load('./experiments/ddd_experiments/ddd_experiment_yy.json')\n", - "xyxy_experiment = load('./experiments/ddd_experiments/ddd_experiment_xyxy.json')\n", - "\n", - "validate_experiment(yy_experiment, ddd_schema)\n", - "validate_experiment(xyxy_experiment, ddd_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "yy_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[yy_experiment['rule']])\n", - "xyxy_result = ddd.execute_with_ddd(circuit=rb_circ, executor=execute, rule=rule_map[xyxy_experiment['rule']])" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "xx result: 0.945\n", - "yy result: 0.948\n", - "xyxy result: 0.947\n" - ] - } - ], - "source": [ - "print(\"xx result:\", mitigated_result)\n", - "print(\"yy result:\", yy_result)\n", - "print(\"xyxy result:\", xyxy_result)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# PEC" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [], - "source": [ - "from mitiq.benchmarks import generate_rb_circuits\n", - "from qiskit_ibm_runtime.fake_provider import FakeJakartaV2 \n", - "from mitiq.pec.representations.depolarizing import represent_operations_in_circuit_with_local_depolarizing_noise\n", - "\n", - "n_qubits = 2\n", - "depth_circuit = 100\n", - "shots = 10 ** 4\n", - "\n", - "circuit = generate_rb_circuits(n_qubits, depth_circuit,return_type=\"qiskit\")[0]\n", - "circuit.measure_all()\n", - "\n", - "def execute_circuit(circuit):\n", - " \"\"\"Execute the input circuit and return the expectation value of |00..0><00..0|, which ideally should be 1 for randomized benchmarking circuits.\"\"\"\n", - " noisy_backend = FakeJakartaV2()\n", - " noisy_result = noisy_backend.run(circuit, shots=shots).result()\n", - " noisy_counts = noisy_result.get_counts(circuit)\n", - " noisy_expectation_value = noisy_counts[n_qubits * \"0\"] / shots\n", - " return noisy_expectation_value" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "13\n" - ] - } - ], - "source": [ - "noise_level = 0.01\n", - "reps = represent_operations_in_circuit_with_local_depolarizing_noise(rb_circ, noise_level)\n", - "print(len(reps))" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [], - "source": [ - "pec_schema = load('./schema/pec_schema.json')\n", - "# print(json.dumps(pec_schema['properties'], indent=4))" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "validation passed\n" - ] - } - ], - "source": [ - "pec_experiment = {\n", - " \"technique\": \"pec\",\n", - " \"operation_representations\": reps,\n", - " \"num_samples\": 100\n", - "}\n", - "\n", - "validate_experiment(pec_experiment, pec_schema)" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PEC result: 0.9512032176170859\n" - ] - } - ], - "source": [ - "params = schema_to_params(pec_experiment)\n", - "result = pec.execute_with_pec(rb_circ, execute, **params)\n", - "print(\"PEC result:\", result)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/mitiq/schema/sweep_testing.ipynb b/mitiq/schema/sweep_testing.ipynb deleted file mode 100644 index 1006b8af89..0000000000 --- a/mitiq/schema/sweep_testing.ipynb +++ /dev/null @@ -1,484 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import json\n", - "import jsonschema\n", - "import random\n", - "import itertools\n", - "import numpy as np\n", - "from jsonschema import validate\n", - "from qiskit import transpile\n", - "from qiskit_aer import AerSimulator\n", - "from qiskit_aer.noise import NoiseModel, ReadoutError, depolarizing_error\n", - "from qiskit.circuit.random import random_circuit\n", - "from mitiq import zne, ddd\n", - "from mitiq.zne.scaling.folding import fold_global, fold_gates_at_random, fold_all\n", - "from mitiq.zne.scaling.layer_scaling import get_layer_folding\n", - "from mitiq.zne.scaling.identity_insertion import insert_id_layers\n", - "from mitiq.benchmarks.ghz_circuits import generate_ghz_circuit" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# ZNE:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "# Load Schema:\n", - "\n", - "def load(schema_path):\n", - " with open(schema_path, 'r') as file:\n", - " return json.load(file)\n", - " \n", - "zne_schema =load('./schema/zne_schema.json')" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [], - "source": [ - "# Create a test \"batch\" experiment:\n", - "\n", - "zne_batch_test = {\"noise_scaling_factors\": [[1, 1.25, 1.5], [1,2,3], [2,4,6]], # Noise scaling values\n", - " \"noise_scaling_method\": [\"global\"], # Folding method\n", - " \"extrapolation\": [\"polynomial\", \"linear\"], # Extrapolation method\n", - " }" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deconstructing a Batch Dictionary:" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [], - "source": [ - "# Create single experiments from batch object:\n", - "\n", - "# Define a function to make all combinations of experiments from a \"batch dictionary\" which should be formatted like the test case above\n", - "def make_experiment_list(batch_dict):\n", - "\n", - " # Initialize empty list where we will append each new experiment\n", - " exp_list = []\n", - "\n", - " # Make list with all combinations of key values from our \"batch dictionary\"\n", - " combo_list = list(itertools.product(batch_dict['noise_scaling_factors'], \n", - " batch_dict['noise_scaling_method'], \n", - " batch_dict['extrapolation']))\n", - " # Iterate over the list\n", - " for k in range(len(combo_list)):\n", - "\n", - " # Initialize single experiment dictionary with keys\n", - " exp = {x: set() for x in ['noise_scaling_factors', 'noise_scaling_method', 'extrapolation']}\n", - "\n", - " # Map each key to its unique value from our list of combinations\n", - " exp['noise_scaling_factors'] = combo_list[k][0]\n", - " exp['noise_scaling_method'] = combo_list[k][1]\n", - " exp['extrapolation'] = combo_list[k][2]\n", - "\n", - " # Pull out each experiment\n", - " exp_list.append(exp)\n", - "\n", - " # Returns a list of experiments\n", - " return exp_list" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'noise_scaling_factors': [1, 1.25, 1.5],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [1, 1.25, 1.5],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'},\n", - " {'noise_scaling_factors': [1, 2, 3],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [1, 2, 3],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'},\n", - " {'noise_scaling_factors': [2, 4, 6],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'polynomial'},\n", - " {'noise_scaling_factors': [2, 4, 6],\n", - " 'noise_scaling_method': 'global',\n", - " 'extrapolation': 'linear'}]" - ] - }, - "execution_count": 36, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Implementing our function using the test case defined above:\n", - "\n", - "make_experiment_list(zne_batch_test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Validating a batch of experiments:" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [], - "source": [ - "# Validate each experiment in our batch object:\n", - "\n", - "# Create function that takes in a batch dictionary and schema to validate against\n", - "def batch_validate(batch_dict, schema):\n", - "\n", - " # Initialize empty list for validation results\n", - " validation_results = []\n", - "\n", - " # Create list of experiments\n", - " formatted_batch = make_experiment_list(batch_dict)\n", - "\n", - " # Iterate over list of experiments and individually validate\n", - " for k in formatted_batch:\n", - " try:\n", - " validate(instance=k, schema=schema)\n", - " result = \"validation passed\"\n", - " except jsonschema.exceptions.ValidationError as e:\n", - " result = \"validation failed\"\n", - "\n", - " # Pull out validation results\n", - " validation_results.append(result)\n", - " \n", - " return validation_results" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed',\n", - " 'validation passed']" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Implementing our batch validate function using the same test case:\n", - "\n", - "batch_validate(zne_batch_test, zne_schema)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Creating Noise Models:" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "# Making a thermal noise model using IonQ T1, T2 parameters:\n", - "\n", - "from qiskit_aer.noise import (NoiseModel, thermal_relaxation_error)\n", - "\n", - "def thermal_relaxation_noise_ionq(N): \n", - " \n", - " # T1 and T2 values for qubits 0-N\n", - " T1s = np.random.normal(1.1e10, 0.2e10, N) # Sampled N values from normal dist, w/ mean = 1.1e10, std = 2e9 us, (converted to ns)\n", - " T2s = np.random.normal(2e8, 0.2e8, N) # Sampled N values from normal dist, w/ mean = 2e5, std = 2e4 us (converted to ns)\n", - "\n", - " # Instruction times (in nanoseconds)\n", - " time_ones = 1000 \n", - "\n", - " noise_thermal = NoiseModel(basis_gates=['h', 'x', 'y', 'z', 'cx']) # Initialize noise model w ansatz gates\n", - "\n", - " # QuantumError objects\n", - " errors_ones = [thermal_relaxation_error(t1, t2, time_ones) # Make tuples of errors for rx, rz gates \n", - " for t1, t2 in zip(T1s, T2s)]\n", - " \n", - " # Apply error to all qubits in circuit from randomly sampled parameters\n", - " for j in range(N):\n", - " noise_thermal.add_quantum_error(errors_ones[j], \"h\", [j])\n", - " noise_thermal.add_quantum_error(errors_ones[j], \"x\", [j])\n", - " noise_thermal.add_quantum_error(errors_ones[j], \"y\", [j])\n", - " noise_thermal.add_quantum_error(errors_ones[j], \"z\", [j])\n", - " \n", - " sim_noise = AerSimulator(noise_model=noise_thermal)\n", - " return sim_noise, noise_thermal" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [], - "source": [ - "# Ella's basic noise model:\n", - "\n", - "def basic_noise(n_qubits, prob=0.005):\n", - "\n", - " noise_model = NoiseModel()\n", - "\n", - " for i in range(n_qubits):\n", - " readout_err = ReadoutError([[0.99, 0.01], [0.2, 0.8]])\n", - " noise_model.add_readout_error(readout_err, [i])\n", - " \n", - " depolarizing_err1 = depolarizing_error(prob, num_qubits=1)\n", - " depolarizing_err2 = depolarizing_error(prob, num_qubits=2)\n", - " noise_model.add_all_qubit_quantum_error(depolarizing_err1, ['h', 'x', 'y', 'z'])\n", - " noise_model.add_all_qubit_quantum_error(depolarizing_err2, ['cx'])\n", - "\n", - " return noise_model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Defining the Executor & Batch Executor:" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "# Single experiment executor (very slightly modified from Ella's):\n", - "\n", - "def execute(circuit, noise_model='basic'): # Set noise_model to thermal, basic, noiseless\n", - " circ = circuit.copy()\n", - " circ.measure_all()\n", - " num_qubits = circ.num_qubits\n", - "\n", - " if noise_model == 'thermal':\n", - " backend = thermal_relaxation_noise_ionq(num_qubits)[0]\n", - " elif noise_model == 'basic':\n", - " backend = AerSimulator(noise_model=basic_noise(num_qubits))\n", - " elif noise_model == 'noiseless':\n", - " backend = AerSimulator()\n", - " \n", - " #backend = AerSimulator(noise_model=noise_model)\n", - " transpiled_circuit = transpile(circ, optimization_level=0, backend=backend)\n", - "\n", - " results = backend.run(transpiled_circuit, optimization_level=0, shots=1000).result()\n", - " counts = results.get_counts(transpiled_circuit)\n", - "\n", - " total_shots = sum(counts.values())\n", - " probabilities = {state: count / total_shots for state, count in counts.items()}\n", - "\n", - " expectation_value = probabilities['0'*num_qubits]+probabilities['1'*num_qubits]\n", - " return expectation_value" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [], - "source": [ - "# Adjusting Ella's mapping functionality:\n", - "\n", - "noise_scaling_map = {\n", - " \"global\": fold_global,\n", - " \"local_random\": fold_gates_at_random,\n", - " \"local_all\": fold_all,\n", - " \"layer\": get_layer_folding,\n", - " \"identity_scaling\": insert_id_layers\n", - "}\n", - "\n", - "def extrapolation_map(single_exp):\n", - " ex_map = {\n", - " \"linear\": zne.inference.LinearFactory(scale_factors=single_exp['noise_scaling_factors']),\n", - " \"richardson\": zne.inference.RichardsonFactory(scale_factors=single_exp['noise_scaling_factors']),\n", - " \"polynomial\": zne.inference.PolyFactory(scale_factors=single_exp['noise_scaling_factors'], order=2),\n", - " \"exponential\": zne.inference.ExpFactory(scale_factors=single_exp['noise_scaling_factors']),\n", - " \"poly-exp\": zne.inference.PolyExpFactory(scale_factors=single_exp['noise_scaling_factors'], order=1),\n", - " \"adaptive-exp\": zne.inference.AdaExpFactory(scale_factor=single_exp['noise_scaling_factors'][1], steps=4, asymptote=None), \n", - " }\n", - " return ex_map[single_exp['extrapolation']]" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [], - "source": [ - "# Batch execute function to return list of expectation values:\n", - "\n", - "def batch_execute(batch_dict, circuit, executor):\n", - "\n", - " # Define list of experiments\n", - " formatted_batch = make_experiment_list(batch_dict)\n", - "\n", - " # Initialize list to append expectation values into\n", - " exp_val_list = []\n", - "\n", - " # Iterate over each experiment\n", - " for k in formatted_batch:\n", - " exp_val = zne.execute_with_zne(circuit=circuit, executor=executor, factory=extrapolation_map(k), \n", - " scale_noise=noise_scaling_map[k['noise_scaling_method']])\n", - " \n", - " # Pull out expectation values\n", - " exp_val_list.append(exp_val)\n", - " \n", - " return exp_val_list" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Testing:" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " ┌───┐ ┌───┐┌───┐ \n", - "q_0: ┤ H ├──■──┤ X ├┤ X ├───────────────\n", - " └───┘┌─┴─┐└───┘├───┤┌───┐┌───┐┌───┐\n", - "q_1: ─────┤ X ├──■──┤ X ├┤ X ├┤ Y ├┤ Y ├\n", - " └───┘┌─┴─┐├───┤├───┤└───┘└───┘\n", - "q_2: ──────────┤ X ├┤ Y ├┤ Y ├──────────\n", - " └───┘└───┘└───┘ \n" - ] - } - ], - "source": [ - "# Making a 3-qubit test GHZ circuit:\n", - "\n", - "n = 3\n", - "ghz_circ = generate_ghz_circuit(n, return_type='qiskit')\n", - "\n", - "ghz_circ.x([0,1])\n", - "ghz_circ.x([0,1])\n", - "ghz_circ.y([1,2])\n", - "ghz_circ.y([1,2])\n", - "\n", - "print(ghz_circ)" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "ideal EV: 1.0\n", - "unmitigated noisy EV: 0.721\n" - ] - } - ], - "source": [ - "print('ideal EV: ', execute(ghz_circ, noise_model='noiseless'))\n", - "print('unmitigated noisy EV: ', execute(ghz_circ, noise_model='basic'))" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Experiment 1 Mitigated Expectation Value: 0.7759999999999967\n", - "Experiment 2 Mitigated Expectation Value: 0.7123333333333339\n", - "Experiment 3 Mitigated Expectation Value: 0.790999999999999\n", - "Experiment 4 Mitigated Expectation Value: 0.7943333333333336\n", - "Experiment 5 Mitigated Expectation Value: 0.663999999999999\n", - "Experiment 6 Mitigated Expectation Value: 0.7863333333333338\n" - ] - } - ], - "source": [ - "exp_results = batch_execute(zne_batch_test, ghz_circ, execute)\n", - "\n", - "for k in np.arange(1,7):\n", - " print(\"Experiment\", k, \"Mitigated Expectation Value:\", exp_results[k-1])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/mitiq/schema/schema/ddd_schema.json b/mitiq/schema/technique_schema/ddd_schema.json similarity index 100% rename from mitiq/schema/schema/ddd_schema.json rename to mitiq/schema/technique_schema/ddd_schema.json diff --git a/mitiq/schema/schema/pec_schema.json b/mitiq/schema/technique_schema/pec_schema.json similarity index 100% rename from mitiq/schema/schema/pec_schema.json rename to mitiq/schema/technique_schema/pec_schema.json diff --git a/mitiq/schema/schema/rem_schema.json b/mitiq/schema/technique_schema/rem_schema.json similarity index 100% rename from mitiq/schema/schema/rem_schema.json rename to mitiq/schema/technique_schema/rem_schema.json diff --git a/mitiq/schema/schema/zne_schema.json b/mitiq/schema/technique_schema/zne_schema.json similarity index 100% rename from mitiq/schema/schema/zne_schema.json rename to mitiq/schema/technique_schema/zne_schema.json diff --git a/mitiq/schema/validation_utils.py b/mitiq/schema/validation_utils.py new file mode 100644 index 0000000000..b151d99486 --- /dev/null +++ b/mitiq/schema/validation_utils.py @@ -0,0 +1,18 @@ +import json +import jsonschema +from jsonschema import validate + + +def load(schema_path): + with open(schema_path, 'r') as file: + return json.load(file) + + +def validate_experiment(experiment, schema): + try: + validate(instance=experiment, schema=schema) + print("expertiment validation passed") + except jsonschema.exceptions.ValidationError as e: + print("experiment validation failed") + return None +