Skip to content
This repository has been archived by the owner on Jan 26, 2022. It is now read-only.

Applying gates to productCuMPS fails when using complex matrices and setting maxdim optional argument #17

Open
sami-b95 opened this issue Oct 2, 2021 · 0 comments

Comments

@sami-b95
Copy link

sami-b95 commented Oct 2, 2021

Applying gates to a productCuMPS fails when setting the maxdim optional argument.

Here is an example based on the gate evolution demo (replacing X by Y gates and setting maxdim in apply():

@@ -16,9 +16,9 @@ op_matrix(::OpName"Id") =
 op_matrix(::OpName"I") =
   op_matrix("Id")
 
-op_matrix(::OpName"X") =
-  [0 1
-   1 0]
+op_matrix(::OpName"Y") =
+  [0 -im
+   im 0]
 
 function op_matrix(on::OpName, s::Index...; kwargs...)
   rs = reverse(s)
@@ -36,12 +36,12 @@ op(gn::OpName, ::SiteType"Qubit", s::Index...; kwargs...) =
 
 N = 10
 s = siteinds("Qubit", N)
-X = cu.(ops(s, [("X", n) for n in 1:N]))
+Y = cu.(ops(s, [("Y", n) for n in 1:N]))
 
 initstate = fill("0", N)
 
 ψ0 = productCuMPS(s, initstate)
 
-gates = [X[n] for n in 1:2:N]
-ψ = apply(gates, ψ0)
+gates = [Y[n] for n in 1:2:N]
+ψ = apply(gates, ψ0; maxdim=2)

This returns the following error:

ERROR: LoadError: MethodError: no method matching _contract!(::ITensors.NDTensors.DenseTensor{ComplexF64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, ::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, ::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, ::ITensors.NDTensors.ContractionProperties{3, 2, 3}, ::ComplexF64, ::ComplexF64)
Closest candidates are:
  _contract!(::ITensors.NDTensors.DenseTensor{El, NC, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.DenseTensor{El, NA, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.DenseTensor{El, NB, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.ContractionProperties, ::Number, ::Number) where {El, NC, NA, NB} at /home/***/.julia/dev/ITensorsGPU/src/tensor/cudense.jl:238
  _contract!(::ITensors.NDTensors.DenseTensor{El, NC, IndsT, StoreT} where {IndsT, StoreT<:ITensors.NDTensors.Dense}, ::ITensors.NDTensors.DenseTensor{El, NA, IndsT, StoreT} where {IndsT, StoreT<:ITensors.NDTensors.Dense}, ::ITensors.NDTensors.DenseTensor{El, NB, IndsT, StoreT} where {IndsT, StoreT<:ITensors.NDTensors.Dense}, ::ITensors.NDTensors.ContractionProperties, ::Number, ::Number) where {El, NC, NA, NB} at /home/***/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:861
  _contract!(::ITensors.NDTensors.DenseTensor{El, NC, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.DenseTensor{El, NA, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.DenseTensor{El, NB, IndsT, StoreT} where {IndsT, StoreT<:(ITensors.NDTensors.Dense{ElT, VecT} where {ElT, VecT<:(CUDA.CuArray{T, 1, B} where {T, B})})}, ::ITensors.NDTensors.ContractionProperties, ::Number) where {El, NC, NA, NB} at /home/***/.julia/dev/ITensorsGPU/src/tensor/cudense.jl:238
  ...
Stacktrace:
  [1] _contract_scalar!(R::ITensors.NDTensors.DenseTensor{ComplexF64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsR::Tuple{Int64, Int64, Int64}, T₁::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT₁::Tuple{Int64, Int64, Int64}, T₂::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT₂::Tuple{Int64, Int64}, α::ComplexF64, β::ComplexF64)
    @ ITensorsGPU ~/.julia/dev/ITensorsGPU/src/tensor/cudense.jl:160
  [2] contract!
    @ ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:815 [inlined]
  [3] contract!
    @ ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:814 [inlined]
  [4] _contract!!(R::ITensors.NDTensors.DenseTensor{ComplexF64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsR::Tuple{Int64, Int64, Int64}, T1::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT1::Tuple{Int64, Int64, Int64}, T2::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT2::Tuple{Int64, Int64}, α::Int64, β::Int64)
    @ ITensors.NDTensors ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:600
  [5] _contract!!
    @ ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:597 [inlined]
  [6] contract!!
    @ ~/.julia/dev/ITensorsGPU/src/tensor/cudense.jl:107 [inlined]
  [7] contract(T1::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT1::Tuple{Int64, Int64, Int64}, T2::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT2::Tuple{Int64, Int64}, labelsR::Tuple{Int64, Int64, Int64})
    @ ITensors.NDTensors ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:555
  [8] contract(T1::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT1::Tuple{Int64, Int64, Int64}, T2::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}}, labelsT2::Tuple{Int64, Int64})
    @ ITensors.NDTensors ~/.julia/packages/ITensors/XnM47/src/NDTensors/dense.jl:543
  [9] _contract(A::ITensors.NDTensors.DenseTensor{Float64, 3, Tuple{Index{Int64}, Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{Float64, CUDA.CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, B::ITensors.NDTensors.DenseTensor{ComplexF64, 2, Tuple{Index{Int64}, Index{Int64}}, ITensors.NDTensors.Dense{ComplexF64, CUDA.CuArray{ComplexF64, 1, CUDA.Mem.DeviceBuffer}}})
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/itensor.jl:1639
 [10] _contract(A::ITensor, B::ITensor)
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/itensor.jl:1645
 [11] contract(A::ITensor, B::ITensor)
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/itensor.jl:1743
 [12] *
    @ ~/.julia/packages/ITensors/XnM47/src/itensor.jl:1731 [inlined]
 [13] orthogonalize!(M::MPS, j::Int64; kwargs::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1289
 [14] orthogonalize!
    @ ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1277 [inlined]
 [15] #orthogonalize#636
    @ ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1322 [inlined]
 [16] orthogonalize
    @ ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1321 [inlined]
 [17] product(o::ITensor, ψ::MPS, ns::Vector{Int64}; move_sites_back::Bool, apply_dag::Bool, kwargs::Base.Iterators.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:maxdim,), Tuple{Int64}}})
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1749
 [18] product(As::Vector{ITensor}, ψ::MPS; move_sites_back::Bool, kwargs::Base.Iterators.Pairs{Symbol, Int64, Tuple{Symbol}, NamedTuple{(:maxdim,), Tuple{Int64}}})
    @ ITensors ~/.julia/packages/ITensors/XnM47/src/mps/abstractmps.jl:1857
 [19] top-level scope
    @ ~/itensors_gpu_tmp/ITensorsGPU.jl/examples/gate_evolution.jl:46

As I understand, the error results from a version of _contract! expecting two tensors of the same type, while it is given a Float64 and ComplexF64 instead. I tried the following fix to the problematic method (allowing tensors to have different types):

--- a/src/tensor/cudense.jl
+++ b/src/tensor/cudense.jl
@@ -235,11 +235,11 @@ function _gemm_contract!(CT::DenseTensor{El,NC},
     return C
 end
 
-function _contract!(CT::CuDenseTensor{El,NC},
-                    AT::CuDenseTensor{El,NA},
-                    BT::CuDenseTensor{El,NB},
+function _contract!(CT::CuDenseTensor{El1,NC},
+                    AT::CuDenseTensor{El2,NA},
+                    BT::CuDenseTensor{El3,NB},
                     props::ContractionProperties,
-                    α::Number=one(El),β::Number=zero(El)) where {El,NC,NA,NB}
+                    α::Number=one(El2),β::Number=zero(El3)) where {El1,El2,El3,NC,NA,NB}
   if ndims(CT) > 12 || ndims(BT) > 12 || ndims(AT) > 12
     return _gemm_contract!(CT, AT, BT, props, α, β)
   end

This does suppress the error; however, after trying out the resulting code on larger and more complicated problem instances, the GPU code now clearly runs much slower than the CPU one. What have I done wrong?

Now, the ITensorsGPU.jl module may not have been designed for manipulating tensor with bond truncation, so perhaps I'm mistaken all the way and there is no simple solution!

Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant