Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Interaction picture #214

Draft
wants to merge 14 commits into
base: dev
Choose a base branch
from
Draft
13 changes: 5 additions & 8 deletions c3/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def __init__(self, pmap: ParameterMap = None, prop_method=None, sim_res=100e9):
self.created_by = None
self.logdir: str = ""
self.propagate_batch_size = None
self.use_control_fields = True
self.overwrite_propagators = True # Keep only currently computed propagators
self.compute_propagators_timestamp = 0
self.stop_partial_propagator_gradient = True
Expand Down Expand Up @@ -259,7 +258,6 @@ def asdict(self) -> Dict:
exp_dict["generator"] = self.pmap.generator.asdict()
exp_dict["options"] = {
"propagate_batch_size": self.propagate_batch_size,
"use_control_fields": self.use_control_fields,
"overwrite_propagators": self.overwrite_propagators,
"stop_partial_propagator_gradient": self.stop_partial_propagator_gradient,
}
Expand Down Expand Up @@ -296,7 +294,7 @@ def evaluate_legacy(self, sequences, psi_init: tf.Tensor = None):
for gate in sequence:
psi_t = tf.matmul(self.propagators[gate], psi_t)

pops = self.populations(psi_t, model.lindbladian)
pops = self.populations(psi_t, "lindbladian" in model.frame)
populations.append(pops)
return populations

Expand Down Expand Up @@ -329,7 +327,7 @@ def evaluate_qasm(self, sequences, psi_init: tf.Tensor = None):
for gate in sequence:
psi_t = tf.matmul(self.lookup_gate(**gate), psi_t)

pops = self.populations(psi_t, model.lindbladian)
pops = self.populations(psi_t, "lindbladian" in model.frame)
populations.append(pops)
return populations

Expand Down Expand Up @@ -496,15 +494,14 @@ def compute_propagators(self):
f" Available gates are:\n {list(instructions.keys())}."
)

model.controllability = self.use_control_fields
steps = int((instr.t_end - instr.t_start) * self.sim_res)
result = self.propagation(
model, generator, instr, self.folding_stack[steps]
)
U = result["U"]
dUs = result["dUs"]
self.ts = result["ts"]
if model.use_FR:
if "rotating" in model.frame:
# TODO change LO freq to at the level of a line
freqs = {}
framechanges = {}
Expand All @@ -525,15 +522,15 @@ def compute_propagators(self):
)
t_final = tf.constant(instr.t_end - instr.t_start, dtype=tf.complex128)
FR = model.get_Frame_Rotation(t_final, freqs, framechanges)
if model.lindbladian:
if "lindbladian" in model.frame:
SFR = tf_super(FR)
U = tf.matmul(SFR, U)
self.FR = SFR
else:
U = tf.matmul(FR, U)
self.FR = FR
if model.dephasing_strength != 0.0:
if not model.lindbladian:
if "lindbladian" not in model.frame:
raise ValueError("Dephasing can only be added when lindblad is on.")
else:
amps = {}
Expand Down
45 changes: 27 additions & 18 deletions c3/libraries/chip.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(self, **props):
super().__init__(**props)
self.Hs = {}
self.collapse_ops = {}
self.drive_line = None
self.has_drive = False
self.index = None

def set_subspace_index(self, index):
Expand Down Expand Up @@ -327,6 +327,7 @@ def __init__(
hilbert_dim=hilbert_dim,
params=params,
)
self.has_drive = True
if freq:
self.params["freq"] = freq
if phi:
Expand Down Expand Up @@ -1138,6 +1139,7 @@ def __init__(self, **props):
self.hamiltonian_func = hamiltonians[h_func]
super().__init__(**props)
self.Hs = {}
self.has_drive = False

def asdict(self) -> dict:
params = {}
Expand Down Expand Up @@ -1214,25 +1216,32 @@ class Drive(LineComponent):

"""

def __init__(
self,
name,
desc=None,
comment=None,
connected: List[str] = None,
params=None,
hamiltonian_func=None,
):
super().__init__(
name=name,
desc=desc,
comment=comment,
params=params,
connected=connected,
hamiltonian_func=hamiltonian_func,
)
self.has_drive = True

def init_Hs(self, ann_opers: list):
hs = []
for a in ann_opers:
hs.append(tf.constant(self.hamiltonian_func(a), dtype=tf.complex128))
self.h: tf.Tensor = tf.cast(sum(hs), tf.complex128)
self.h = tf.expand_dims(tf.reduce_sum(hs, axis=0), 0)

def get_Hamiltonian(
self, signal: Union[Dict, bool] = None, transform: tf.Tensor = None
) -> tf.Tensor:
if signal is None:
return tf.zeros_like(self.h)
h = self.h
if transform is not None:
transform = tf.cast(transform, tf.complex128)
h = tf.matmul(tf.matmul(transform, h, adjoint_a=True), transform)

if signal is True:
return h
elif isinstance(signal, dict):
sig = tf.cast(signal["values"], tf.complex128)
sig = tf.reshape(sig, [sig.shape[0], 1, 1])
return tf.expand_dims(h, 0) * sig
def get_Hamiltonian(self, signal: Dict = {}) -> tf.Tensor:
sig = tf.cast(signal["values"], tf.complex128)
sig = tf.reshape(sig, [sig.shape[0], 1, 1])
return self.h[: sig.shape[0]] * sig
11 changes: 10 additions & 1 deletion c3/libraries/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,16 @@
)
/ np.sqrt(2),
"iswap": np.array(
[[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]], dtype=np.complex128
[[1, 0, 0, 0], [0, 0, -1j, 0], [0, -1j, 0, 0], [0, 0, 0, 1]],
dtype=np.complex128,
),
"iswap90": np.array(
[[0, 0, 0, 0], [0, 1, -1j, 0], [0, -1j, 1, 0], [0, 0, 0, 0]],
dtype=np.complex128,
)
/ np.sqrt(2)
+ np.array(
[[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 1]], dtype=np.complex128
),
"cz": np.diag(np.array([1, 1, 1, -1], dtype=np.complex128)),
"ccz": np.diag(np.array([1, 1, 1, 1, 1, 1, 1, -1], dtype=np.complex128)),
Expand Down
136 changes: 15 additions & 121 deletions c3/libraries/propagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,66 +72,6 @@ def rk4_step(h, psi, dt):

def get_hs_of_t_ts(
model: Model, gen: Generator, instr: Instruction, prop_res=1
) -> Dict:
if model.controllability:
hs_of_ts = _get_hs_of_t_ts_controlled(model, gen, instr, prop_res)
else:
hs_of_ts = _get_hs_of_t_ts(model, gen, instr, prop_res)
return hs_of_ts


def _get_hs_of_t_ts_controlled(
model: Model, gen: Generator, instr: Instruction, prop_res=1
) -> Dict:
"""
Return a Dict containing:

- a list of

H(t) = H_0 + sum_k c_k H_k.

- time slices ts

- timestep dt

Parameters
----------
prop_res : tf.float
resolution required by the propagation method
h0 : tf.tensor
Drift Hamiltonian.
hks : list of tf.tensor
List of control Hamiltonians.
cflds_t : array of tf.float
Vector of control field values at time t.
ts : float
Length of one time slice.
"""
Hs = []
ts = []
gen.resolution = prop_res * gen.resolution
signal = gen.generate_signals(instr)
h0, hctrls = model.get_Hamiltonians()
signals = []
hks = []
for key in signal:
signals.append(signal[key]["values"])
ts = signal[key]["ts"]
hks.append(hctrls[key])
cflds = tf.cast(signals, tf.complex128)
hks = tf.cast(hks, tf.complex128)
for ii in range(cflds[0].shape[0]):
cf_t = []
for fields in cflds:
cf_t.append(tf.cast(fields[ii], tf.complex128))
Hs.append(sum_h0_hks(h0, hks, cf_t))

dt = tf.constant(ts[1 * prop_res].numpy() - ts[0].numpy(), dtype=tf.complex128)
return {"Hs": Hs, "ts": ts[::prop_res], "dt": dt}


def _get_hs_of_t_ts(
model: Model, gen: Generator, instr: Instruction, prop_res=1
) -> Dict:
"""
Return a Dict containing:
Expand Down Expand Up @@ -243,38 +183,16 @@ def pwc(model: Model, gen: Generator, instr: Instruction, folding_stack: list) -
signal = gen.generate_signals(instr)
# Why do I get 0.0 if I print gen.resolution here?! FR
ts = []
if model.controllability:
h0, hctrls = model.get_Hamiltonians()
signals = []
hks = []
for key in signal:
signals.append(signal[key]["values"])
ts = signal[key]["ts"]
hks.append(hctrls[key])
signals = tf.cast(signals, tf.complex128)
hks = tf.cast(hks, tf.complex128)
else:
h0 = model.get_Hamiltonian(signal)
ts_list = [sig["ts"][1:] for sig in signal.values()]
ts = tf.constant(tf.math.reduce_mean(ts_list, axis=0))
hks = None
signals = None
if not np.all(
tf.math.reduce_variance(ts_list, axis=0) < 1e-5 * (ts[1] - ts[0])
):
raise Exception("C3Error:Something with the times happend.")
if not np.all(
tf.math.reduce_variance(ts[1:] - ts[:-1]) < 1e-5 * (ts[1] - ts[0]) # type: ignore
):
raise Exception("C3Error:Something with the times happend.")
h0 = model.get_Hamiltonian(signal)
ts_list = [sig["ts"][1:] for sig in signal.values()]
ts = ts_list[-1]

dt = ts[1] - ts[0]

batch_size = tf.constant(len(h0), tf.int32)

dUs = tf_batch_propagate(h0, hks, signals, dt, batch_size=batch_size)
dUs = tf_batch_propagate(h0, dt, batch_size=batch_size)

# U = tf_matmul_left(tf.cast(dUs, tf.complex128))
U = tf_matmul_n(dUs, folding_stack)

if model.max_excitations:
Expand Down Expand Up @@ -366,20 +284,9 @@ def tf_dU_of_t_lind(h0, hks, col_ops, cflds_t, dt):
return dU


def tf_propagation_vectorized(h0, hks, cflds_t, dt):
def tf_propagation_vectorized(h_of_t, dt):
dt = tf.cast(dt, dtype=tf.complex128)
if hks is not None and cflds_t is not None:
cflds_t = tf.cast(cflds_t, dtype=tf.complex128)
hks = tf.cast(hks, dtype=tf.complex128)
cflds = tf.expand_dims(tf.expand_dims(cflds_t, 2), 3)
hks = tf.expand_dims(hks, 1)
if len(h0.shape) < 3:
h0 = tf.expand_dims(h0, 0)
prod = cflds * hks
h = h0 + tf.reduce_sum(prod, axis=0)
else:
h = tf.cast(h0, tf.complex128)
dh = -1.0j * h * dt
dh = -1.0j * h_of_t * dt
return tf.linalg.expm(dh)


Expand All @@ -400,7 +307,7 @@ def pwc_trott_drift(h0, hks, cflds_t, dt):
return dUs


def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size):
def tf_batch_propagate(hamiltonian, dt, batch_size):
"""
Propagate signal in batches
Parameters
Expand All @@ -420,32 +327,19 @@ def tf_batch_propagate(hamiltonian, hks, signals, dt, batch_size):
-------

"""
if signals is not None:
batches = int(tf.math.ceil(signals.shape[0] / batch_size))
batch_array = tf.TensorArray(
signals.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, signals[i * batch_size : i * batch_size + batch_size]
)
else:
batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size))
batch_array = tf.TensorArray(
hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False
batches = int(tf.math.ceil(hamiltonian.shape[0] / batch_size))
batch_array = tf.TensorArray(
hamiltonian.dtype, size=batches, dynamic_size=False, infer_shape=False
)
for i in range(batches):
batch_array = batch_array.write(
i, hamiltonian[i * batch_size : i * batch_size + batch_size]
)
for i in range(batches):
batch_array = batch_array.write(
i, hamiltonian[i * batch_size : i * batch_size + batch_size]
)

dUs_array = tf.TensorArray(tf.complex128, size=batches, infer_shape=False)
for i in range(batches):
x = batch_array.read(i)
if signals is not None:
result = tf_propagation_vectorized(hamiltonian, hks, x, dt)
else:
result = tf_propagation_vectorized(x, None, None, dt)
result = tf_propagation_vectorized(x, dt)
dUs_array = dUs_array.write(i, result)
return dUs_array.concat()

Expand Down
Loading