Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
multiphaseCFD committed Sep 19, 2024
1 parent eb709d5 commit d2a1849
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 46 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ namespace Pennylane::LightningTensor::TNCuda {
backend.
* Any gate tensor can be represented as an MPO tensor network in the context of
MPS. The gate tensor must be decomposed with respect to its target wires. Note
that the only local target wires are supported. The non-adjacent target wires
must be swapped to local before contructing the MPO tensor network.
that connecting Identities MPO are inserted between two adjacent wires for
non-local wires gates.
* The MPO tensors' modes order in an open boundary condition are:
2 3 2
| | |
Expand Down Expand Up @@ -127,9 +127,6 @@ template <class PrecisionT> class MPOTNCuda {
PL_ABORT_IF_NOT(std::is_sorted(wires.begin(), wires.end()),
"Only sorted target wires is accepeted.");

PL_ABORT_IF_NOT(wires.size() == wires.back() - wires.front() + 1,
"Only support local target wires.");

// Create an empty MPO tensor network operator. Note that the state
// extents are aligned with the quantum state.
PL_CUTENSORNET_IS_SUCCESS(cutensornetCreateNetworkOperator(
Expand All @@ -140,7 +137,7 @@ template <class PrecisionT> class MPOTNCuda {
/* cudaDataType_t */ cudaDataType,
/* cutensornetNetworkOperator_t */ &MPOOperator_));

numMPOSites_ = wires.size();
numMPOSites_ = wires.back() - wires.front() + 1;

MPO_modes_int32_.resize(numMPOSites_);

Expand All @@ -153,14 +150,11 @@ template <class PrecisionT> class MPOTNCuda {
return static_cast<int32_t>(numQubits - 1 - mode);
});

// Ensure the modes are in ascending order
std::reverse(MPO_modes_int32_.begin(), MPO_modes_int32_.end());

for (std::size_t i = 0; i < numMPOSites_ - 1; i++) {
for (std::size_t i = 0; i < wires.size() - 1; i++) {
// Binary logarithm of the bond dimension required for the exact MPO
// decomposition
const std::size_t lg_bondDim_exact =
std::min(i + 1, numMPOSites_ - i - 1) *
std::min(i + 1, wires.size() - i - 1) *
2; // 1+1 (1 for bra and 1 for ket)

const std::size_t bondDim =
Expand All @@ -171,6 +165,24 @@ template <class PrecisionT> class MPOTNCuda {
bondDims_.emplace_back(bondDim);
}

auto BondDims = bondDims_;
// Insert bond dimensions of Identity tensors
if (wires.size() != numMPOSites_) {
for (std::size_t i = 0; i < wires.size() - 1; i++) {
const std::size_t numISites = wires[i + 1] - wires[i] - 1;
if (numISites > 0) {
std::vector<std::size_t> ISites(numISites, BondDims[i]);
std::size_t offset = wires[i] - wires[0];
bondDims_.insert(bondDims_.begin() + offset + 1,
ISites.begin(), ISites.end());
}
}
}

PL_ABORT_IF_NOT(bondDims_.size() == numMPOSites_ - 1,
"Number of bond dimensions must match the number of "
"MPO sites.");

for (std::size_t i = 0; i < numMPOSites_; i++) {
const std::size_t bondDimR =
i < numMPOSites_ - 1 ? bondDims_[i] : 1;
Expand All @@ -189,10 +201,36 @@ template <class PrecisionT> class MPOTNCuda {
tensors_.emplace_back(std::make_shared<TensorCuda<PrecisionT>>(
localModesExtents.size(), localModesExtents, localModesExtents,
dev_tag));
}

std::vector<std::size_t> mpo_site_tag(numMPOSites_, numMPOSites_);
for (std::size_t i = 0; i < wires.size(); i++) {
auto idx = wires[i] - wires[0];
mpo_site_tag[idx] = i;
}

auto tensor_cu = cuUtil::complexToCu<ComplexT>(tensors[i]);
tensors_[i]->getDataBuffer().CopyHostDataToGpu(tensor_cu.data(),
tensor_cu.size());
// Update MPO tensors
for (std::size_t i = 0; i < numMPOSites_; i++) {
auto idx = mpo_site_tag[i];
if (idx < numMPOSites_) {
auto tensor_cu = cuUtil::complexToCu<ComplexT>(tensors[idx]);
tensors_[i]->getDataBuffer().CopyHostDataToGpu(
tensor_cu.data(), tensor_cu.size());
} else {
// Initialize connecting Identity tensors
std::size_t length = tensors_[i]->getDataBuffer().getLength();

std::vector<CFP_t> identity_tensor_host(length,
CFP_t{0.0, 0.0});
for (std::size_t idx = 0; idx < length;
idx += 2 * bondDims_[i - 1] + 1) {
identity_tensor_host[idx] =
cuUtil::complexToCu<ComplexT>(ComplexT{1.0, 0.0});
}

tensors_[i]->getDataBuffer().CopyHostDataToGpu(
identity_tensor_host.data(), identity_tensor_host.size());
}
}

// Append MPO tensor network operator components
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,27 +256,9 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
"The number of tensors should be equal to the number of "
"wires.");

// Create a queue of wire pairs to apply SWAP gates and MPO local target
// wires
const auto [local_wires, swap_wires_queue] =
create_swap_wire_pair_queue(wires);

// Apply SWAP gates to ensure the following MPO operator targeting at
// local wires
if (swap_wires_queue.size() > 0) {
for_each(swap_wires_queue.begin(), swap_wires_queue.end(),
[this](const auto &swap_wires) {
for_each(swap_wires.begin(), swap_wires.end(),
[this](const auto &wire_pair) {
BaseType::applyOperation(
"SWAP", wire_pair, false);
});
});
}

// Create a MPO object based on the host data from the user
mpos_.emplace_back(std::make_shared<MPOTNCuda<Precision>>(
tensors, local_wires, max_mpo_bond_dim, BaseType::getNumQubits(),
tensors, wires, max_mpo_bond_dim, BaseType::getNumQubits(),
BaseType::getTNCudaHandle(), BaseType::getCudaDataType(),
BaseType::getDevTag()));

Expand All @@ -293,18 +275,6 @@ class MPSTNCuda final : public TNCudaBase<Precision, MPSTNCuda<Precision>> {
/* int64_t * operatorId*/ &operatorId));

mpo_ids_.push_back(static_cast<std::size_t>(operatorId));

// Apply SWAP gates to restore the original wire order
if (swap_wires_queue.size() > 0) {
for_each(swap_wires_queue.rbegin(), swap_wires_queue.rend(),
[this](const auto &swap_wires) {
for_each(swap_wires.rbegin(), swap_wires.rend(),
[this](const auto &wire_pair) {
BaseType::applyOperation(
"SWAP", wire_pair, false);
});
});
}
}

/**
Expand Down
1 change: 0 additions & 1 deletion pennylane_lightning/lightning_tensor/_tensornet.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def gate_matrix_decompose(gate_ops_matrix, wires, max_mpo_bond_dim, c_dtype):
for i in range(len(wires)):
indices_order.extend([original_axes[i], original_axes[i] + len(wires)])
# Reverse the indices order to match the target wire order of cutensornet backend
indices_order.reverse()

# Permutation of the gate tensor
gate_tensor = np.transpose(gate_tensor, axes=indices_order)
Expand Down

0 comments on commit d2a1849

Please sign in to comment.