Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/algorithms/expval.jl
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ function expectation_value(
ψ::FiniteMPS, O::ProjectionOperator,
envs::FiniteEnvironments = environments(ψ, O)
)
ens = zeros(scalartype(ψ), length(ψ))
ens = zeros(storagetype(ψ), length(ψ))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This looks a bit strange to me, would we not expect this to have to be on the CPU anyways, since it originates from a call to dot which produces a scalar?

If you prefer, I am also happy with writing this entire expression as a single call to sum to avoid the intermediate allocation altogether

for i in 1:length(ψ)
operator = AC_hamiltonian(i, ψ, O, ψ, envs)
ens[i] = dot(ψ.AC[i], operator * ψ.AC[i])
Expand Down
20 changes: 12 additions & 8 deletions src/environments/abstract_envs.jl
Original file line number Diff line number Diff line change
Expand Up @@ -15,48 +15,52 @@ Base.unlock(envs::AbstractMPSEnvironments) = unlock(envs.lock);
# ------------------
function allocate_GL(bra::AbstractMPS, mpo::AbstractMPO, ket::AbstractMPS, i::Int)
T = Base.promote_type(scalartype(bra), scalartype(mpo), scalartype(ket))
TA = similarstoragetype(storagetype(mpo), T)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know this is already a strict improvement, but to make this work in full generality I don't think only using the storagetype of the mpo is really sufficient. I think TensorKit has a promote_storagetype function that can be used to also take into account the storagetype of the states

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, I can modify this to take into account the backing types of bra and ket too

V = left_virtualspace(bra, i) ⊗ left_virtualspace(mpo, i)' ←
left_virtualspace(ket, i)
if V isa BlockTensorKit.TensorMapSumSpace
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), T)
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), TA)
else
TT = TensorMap{T}
TT = TensorKit.TensorMapWithStorage{T, TA}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
TT = TensorKit.TensorMapWithStorage{T, TA}
TT = TensorKit.tensormaptype(spacetype(bra), numout(V), numin(V), TA)

end
return TT(undef, V)
end

function allocate_GR(bra::AbstractMPS, mpo::AbstractMPO, ket::AbstractMPS, i::Int)
T = Base.promote_type(scalartype(bra), scalartype(mpo), scalartype(ket))
TA = similarstoragetype(storagetype(mpo), T)
V = right_virtualspace(ket, i) ⊗ right_virtualspace(mpo, i) ←
right_virtualspace(bra, i)
if V isa BlockTensorKit.TensorMapSumSpace
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), T)
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), TA)
else
TT = TensorMap{T}
TT = TensorKit.TensorMapWithStorage{T, TA}
end
return TT(undef, V)
end

function allocate_GBL(bra::QP, mpo::AbstractMPO, ket::QP, i::Int)
T = Base.promote_type(scalartype(bra), scalartype(mpo), scalartype(ket))
TA = similarstoragetype(storagetype(mpo), T)
V = left_virtualspace(bra.left_gs, i) ⊗ left_virtualspace(mpo, i)' ←
auxiliaryspace(ket)' ⊗ left_virtualspace(ket.right_gs, i)
if V isa BlockTensorKit.TensorMapSumSpace
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), T)
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), TA)
else
TT = TensorMap{T}
TT = TensorKit.TensorMapWithStorage{T, TA}
end
return TT(undef, V)
end

function allocate_GBR(bra::QP, mpo::AbstractMPO, ket::QP, i::Int)
T = Base.promote_type(scalartype(bra), scalartype(mpo), scalartype(ket))
TA = similarstoragetype(storagetype(mpo), T)
V = right_virtualspace(ket.left_gs, i) ⊗ right_virtualspace(mpo, i) ←
auxiliaryspace(ket)' ⊗ right_virtualspace(bra.right_gs, i)
if V isa BlockTensorKit.TensorMapSumSpace
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), T)
TT = blocktensormaptype(spacetype(bra), numout(V), numin(V), TA)
else
TT = TensorMap{T}
TT = TensorKit.TensorMapWithStorage{T, TA}
end
return TT(undef, V)
end
Expand Down
4 changes: 2 additions & 2 deletions src/operators/mpohamiltonian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -824,7 +824,7 @@ function Base.:*(H::FiniteMPOHamiltonian, mps::FiniteMPS)
)
)
# left to middle
U = ones(scalartype(H), left_virtualspace(H, 1))
U = ones(storagetype(H), left_virtualspace(H, 1))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think here we should actually call removeunit instead of writing this as a contraction. This is both more efficient and avoids having to deal with storagetype altogether.
Let me know if you need help with this though

@plansor a[-1 -2; -3 -4] := A[1][-1 2; -3] * H[1][1 -2; 2 -4] * conj(U[1])
Q, R = qr_compact!(a)
A′[1] = TensorMap(Q)
Expand All @@ -836,7 +836,7 @@ function Base.:*(H::FiniteMPOHamiltonian, mps::FiniteMPS)
end

# right to middle
U = ones(scalartype(H), right_virtualspace(H, N))
U = ones(storagetype(H), right_virtualspace(H, N))
@plansor a[-1 -2; -3 -4] := A[end][-1 2; -3] * H[end][-2 -4; 2 1] * U[1]
L, Q = lq_compact!(a)
A′[end] = transpose(TensorMap(Q), ((1, 3), (2,)))
Expand Down
2 changes: 2 additions & 0 deletions src/operators/multilinempo.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ end
MultilineMPO(mpos::AbstractVector{<:AbstractMPO}) = Multiline(mpos)
MultilineMPO(t::MPOTensor) = MultilineMPO(PeriodicMatrix(fill(t, 1, 1)))

TensorKit.storagetype(M::MultilineMPO) = storagetype(M.data)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this work? I would expect you would need something like storagetype(eltype(M)), I don't think we defined storagetype(::AbstractVector) somewhere?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It does seem to work, using storagetype all over makes things a lot more flexible as we can call it more or less agnostically. We should have some function that allows us to figure out "what is your actual array type"


# allow indexing with two indices
Base.getindex(t::MultilineMPO, ::Colon, j::Int) = Base.getindex.(t.data, j)
Base.getindex(t::MultilineMPO, i::Int, j) = Base.getindex(t[i], j)
Expand Down
18 changes: 9 additions & 9 deletions src/states/abstractmps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ Construct an `MPSTensor` with given physical and virtual spaces.
- `right_D::Int`: right virtual dimension
"""
function MPSTensor(
::UndefInitializer, eltype, P::Union{S, CompositeSpace{S}}, Vₗ::S, Vᵣ::S = Vₗ
) where {S <: ElementarySpace}
return TensorMap{eltype}(undef, Vₗ ⊗ P ← Vᵣ)
::UndefInitializer, ::Type{TorA}, P::Union{S, CompositeSpace{S}}, Vₗ::S, Vᵣ::S = Vₗ
) where {S <: ElementarySpace, TorA}
return TensorKit.TensorMapWithStorage{TorA}(undef, Vₗ ⊗ P ← Vᵣ)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggestion to also use TensorKit.tensormaptype here instead.

That being said, this constructor is the worst type piracy I have ever committed, and I would be happy to not have to feel the shame of its existence anymore...

end
function MPSTensor(
f, eltype, P::Union{S, CompositeSpace{S}}, Vₗ::S, Vᵣ::S = Vₗ
) where {S <: ElementarySpace}
A = MPSTensor(undef, eltype, P, Vₗ, Vᵣ)
f, ::Type{TorA}, P::Union{S, CompositeSpace{S}}, Vₗ::S, Vᵣ::S = Vₗ
) where {S <: ElementarySpace, TorA}
A = MPSTensor(undef, TorA, P, Vₗ, Vᵣ)
if f === rand
return rand!(A)
elseif f === randn
Expand Down Expand Up @@ -70,18 +70,18 @@ Construct an `MPSTensor` with given physical and virtual dimensions.
- `Dₗ::Int`: left virtual dimension
- `Dᵣ::Int`: right virtual dimension
"""
MPSTensor(f, eltype, d::Int, Dₗ::Int, Dᵣ::Int = Dₗ) = MPSTensor(f, eltype, ℂ^d, ℂ^Dₗ, ℂ^Dᵣ)
MPSTensor(f, ::Type{TorA}, d::Int, Dₗ::Int, Dᵣ::Int = Dₗ) where {TorA} = MPSTensor(f, TorA, ℂ^d, ℂ^Dₗ, ℂ^Dᵣ)
MPSTensor(d::Int, Dₗ::Int; Dᵣ::Int = Dₗ) = MPSTensor(ℂ^d, ℂ^Dₗ, ℂ^Dᵣ)

"""
MPSTensor(A::AbstractArray)

Convert an array to an `MPSTensor`.
"""
function MPSTensor(A::AbstractArray{T}) where {T <: Number}
function MPSTensor(A::AA) where {T <: Number, AA <: AbstractArray{T}}
@assert ndims(A) > 2 "MPSTensor should have at least 3 dims, but has $ndims(A)"
sz = size(A)
t = TensorMap(undef, T, foldl(⊗, ComplexSpace.(sz[1:(end - 1)])) ← ℂ^sz[end])
t = TensorKit.TensorMapWithStorage{T, AA}(undef, foldl(⊗, ComplexSpace.(sz[1:(end - 1)])) ← ℂ^sz[end])
t[] .= A
Comment on lines +81 to 85
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
function MPSTensor(A::AA) where {T <: Number, AA <: AbstractArray{T}}
@assert ndims(A) > 2 "MPSTensor should have at least 3 dims, but has $ndims(A)"
sz = size(A)
t = TensorMap(undef, T, foldl(, ComplexSpace.(sz[1:(end - 1)])) ^sz[end])
t = TensorKit.TensorMapWithStorage{T, AA}(undef, foldl(, ComplexSpace.(sz[1:(end - 1)])) ^sz[end])
t[] .= A
function MPSTensor(A::AbstractArray{<:Number})
@assert ndims(A) > 2 "MPSTensor should have at least 3 dims, but has $ndims(A)"
sz = size(A)
V = foldl(, ComplexSpace.(sz[1:(end - 1)])) ^sz[end]
t = TensorMap(A, V)

I think this might be slightly cleaner, simply making use of TensorKit functionality?

return t
end
Expand Down
10 changes: 8 additions & 2 deletions src/states/ortho.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ $(TYPEDFIELDS)
verbosity::Int = VERBOSE_WARN

"algorithm used for orthogonalization of the tensors"
alg_orth = LAPACK_HouseholderQR(; positive = true)
alg_orth = Defaults.alg_qr()
"algorithm used for the eigensolver"
alg_eigsolve = _GAUGE_ALG_EIGSOLVE
"minimal amount of iterations before using the eigensolver steps"
Expand All @@ -46,7 +46,7 @@ $(TYPEDFIELDS)
verbosity::Int = VERBOSE_WARN

"algorithm used for orthogonalization of the tensors"
alg_orth = LAPACK_HouseholderLQ(; positive = true)
alg_orth = Defaults.alg_lq()
"algorithm used for the eigensolver"
alg_eigsolve = _GAUGE_ALG_EIGSOLVE
"minimal amount of iterations before using the eigensolver steps"
Expand Down Expand Up @@ -80,9 +80,15 @@ function MixedCanonical(;
if alg_orth isa LAPACK_HouseholderQR
alg_leftorth = alg_orth
alg_rightorth = LAPACK_HouseholderLQ(; alg_orth.kwargs...)
elseif alg_orth isa CUSOLVER_HouseholderQR
alg_leftorth = alg_orth
alg_rightorth = LQViaTransposedQR(CUSOLVER_HouseholderQR(; alg_orth.kwargs...))
elseif alg_orth isa LAPACK_HouseholderLQ
alg_leftorth = LAPACK_HouseholderQR(; alg_orth.kwargs...)
alg_rightorth = alg_orth
elseif alg_orth isa LQViaTransposedQR
alg_leftorth = alg_orth
alg_rightorth = alg_orth.qr_alg
else
alg_leftorth = alg_rightorth = alg_orth
end
Expand Down
1 change: 1 addition & 0 deletions src/utility/defaults.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ using OhMyThreads
using ..MPSKit: DynamicTol
using TensorKit: TensorKit
using MatrixAlgebraKit: LAPACK_HouseholderQR, LAPACK_HouseholderLQ, LAPACK_DivideAndConquer
using MatrixAlgebraKit: CUSOLVER_HouseholderQR, LQViaTransposedQR, CUSOLVER_Jacobi

const VERBOSE_NONE = 0
const VERBOSE_WARN = 1
Expand Down
2 changes: 2 additions & 0 deletions src/utility/periodicarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@ function PeriodicArray{T, N}(initializer, args...) where {T, N}
return PeriodicArray(Array{T, N}(initializer, args...))
end

TensorKit.storagetype(PA::PeriodicArray{T, N}) where {T, N} = storagetype(T)

Comment on lines +41 to +42
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm a little hesitant to adopt storagetype in such a general manner here in MPSKit, since I don't think this is even exported by TensorKit. If this function should exist, I think we would have to decide to use storagetype(A::AbstractArray) = storagetype(eltype(A)), but I'm not sure if that really is in the scope of that (somewhat internal) function

"""
PeriodicVector{T}
Expand Down
Loading