Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NDTensors] DiagonalArray tensor operations #1226

Merged
merged 43 commits into from
Nov 1, 2023
Merged
Show file tree
Hide file tree
Changes from 26 commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
9b7f469
[NDTensors] Add more operations for Array storage
mtfishman Oct 27, 2023
cbd0307
Get more Array storage functionality working
mtfishman Oct 27, 2023
2a6b8da
Format
mtfishman Oct 27, 2023
460082d
New similar definition
mtfishman Oct 27, 2023
821dbdf
Merge branch 'main' into NDTensors_arraytensor_ops
mtfishman Oct 30, 2023
ffe3f66
Define contraction of ArrayStorage with Diag
mtfishman Oct 30, 2023
9c571bc
Format
mtfishman Oct 30, 2023
cc3136c
Format
mtfishman Oct 30, 2023
74d722d
Reorganize Combiner code
mtfishman Oct 30, 2023
ed772e1
Merge branch 'NDTensors_arraytensor_ops' of github.com:ITensor/ITenso…
mtfishman Oct 30, 2023
a40e4a3
Get eigen working with Array storage
mtfishman Oct 30, 2023
7c0509b
Improve eigen code a bit
mtfishman Oct 30, 2023
6febc37
Format
mtfishman Oct 30, 2023
2208569
Fix tests
mtfishman Oct 30, 2023
25814f6
Merge branch 'NDTensors_arraytensor_ops' of github.com:ITensor/ITenso…
mtfishman Oct 30, 2023
45e8b87
Format
mtfishman Oct 30, 2023
2801f7a
Format
mtfishman Oct 30, 2023
a07f808
Fix tests
mtfishman Oct 30, 2023
c14150c
Merge branch 'NDTensors_arraytensor_ops' of github.com:ITensor/ITenso…
mtfishman Oct 30, 2023
62339ba
[NDTensors] Add DiagonalArrays submodule
mtfishman Oct 30, 2023
9d4d7eb
Add tests
mtfishman Oct 30, 2023
b537e6a
Format
mtfishman Oct 30, 2023
ba24b1c
Load Compat for allequal
mtfishman Oct 30, 2023
9c457d9
Merge branch 'main' into NDTensors_diagonalarray
mtfishman Oct 30, 2023
e865d27
[NDTensors] DiagonalArray tensor operations
mtfishman Oct 30, 2023
d59fcfb
Merge main
mtfishman Oct 31, 2023
ff0a504
Format
mtfishman Oct 31, 2023
e35483e
Merge branch 'main' into NDTensors_diagonalarray_ops
mtfishman Oct 31, 2023
d714089
Get more DiagonalArray tensor operations working, start improving kwargs
mtfishman Oct 31, 2023
f2d65d2
More work on kwargs
mtfishman Oct 31, 2023
38b32a6
Fix SVD
mtfishman Oct 31, 2023
9c4ce2c
More kwarg refactoring
mtfishman Oct 31, 2023
34b229b
Fix issues with new kwarg design
mtfishman Oct 31, 2023
766f945
Refactor SVD code
mtfishman Oct 31, 2023
970b112
Fix some more tests
mtfishman Oct 31, 2023
ffec9de
Small change to kwarg logic
mtfishman Oct 31, 2023
7668e86
Fix some more tests
mtfishman Oct 31, 2023
5e65813
Format
mtfishman Oct 31, 2023
0e288ea
Fix some more tests
mtfishman Oct 31, 2023
1ef4c5c
Fix bug in truncate
mtfishman Oct 31, 2023
31e6209
Fix some more tests
mtfishman Oct 31, 2023
42fada1
Some more kwarg changes
mtfishman Oct 31, 2023
251f5a0
Fix tests
mtfishman Nov 1, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ module DiagonalArrays
using Compat # allequal
using LinearAlgebra

export DiagonalArray
export DiagonalArray, DiagonalMatrix, DiagonalVector

include("diagview.jl")

Expand All @@ -19,6 +19,9 @@ struct DiagonalArray{T,N,Diag<:AbstractVector{T},Zero} <: AbstractArray{T,N}
zero::Zero
end

const DiagonalVector{T,Diag,Zero} = DiagonalArray{T,1,Diag,Zero}
const DiagonalMatrix{T,Diag,Zero} = DiagonalArray{T,2,Diag,Zero}

function DiagonalArray{T,N}(
diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()
) where {T,N}
Expand Down Expand Up @@ -53,6 +56,25 @@ function DiagonalArray(diag::AbstractVector{T}, d::Vararg{Int,N}) where {T,N}
return DiagonalArray{T,N}(diag, d)
end

default_size(diag::AbstractVector, n) = ntuple(Returns(length(diag)), n)

# Infer size from diagonal
function DiagonalArray{T,N}(diag::AbstractVector) where {T,N}
return DiagonalArray{T,N}(diag, default_size(diag, N))
end

function DiagonalArray{<:Any,N}(diag::AbstractVector{T}) where {T,N}
return DiagonalArray{T,N}(diag)
end

function DiagonalMatrix(diag::AbstractVector)
return DiagonalArray{<:Any,2}(diag)
end

function DiagonalVector(diag::AbstractVector)
return DiagonalArray{<:Any,1}(diag)
end

# undef
function DiagonalArray{T,N}(::UndefInitializer, d::Tuple{Vararg{Int,N}}) where {T,N}
return DiagonalArray{T,N}(Vector{T}(undef, minimum(d)), d)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ const ArrayStorage{T,N} = Union{
SubArray{T,N},
PermutedDimsArray{T,N},
StridedView{T,N},
DiagonalArray{T,N},
BlockSparseArray{T,N},
}

Expand Down
3 changes: 1 addition & 2 deletions NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ function eigen(
Vinds = indstype((dag(ind(T, 2)), dag(r)))
Dinds = indstype((l, dag(r)))
V = tensor(VM, Vinds)
# TODO: Replace with `DiagonalArray`.
D = tensor(Diag(DM), Dinds)
D = tensor(DiagonalMatrix(DM), Dinds)
return D, V, spec
end
2 changes: 1 addition & 1 deletion NDTensors/src/arraystorage/arraystorage/tensor/svd.jl
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ function svd(
Sinds = indstype((u, v))
Vinds = indstype((ind(T, 2), v))
U = tensor(MU, Uinds)
S = tensor(Diag(MS), Sinds)
S = tensor(DiagonalMatrix(MS), Sinds)
V = tensor(MV, Vinds)
return U, S, V, spec
end
64 changes: 42 additions & 22 deletions NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl
Original file line number Diff line number Diff line change
@@ -1,30 +1,49 @@
function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag})
return promote_type(storagetype1, datatype(storagetype2))
# TODO: Move to a different file.
parenttype(::Type{<:DiagonalArray{<:Any,<:Any,P}}) where {P} = P

# TODO: Move to a different file.
function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray})
# TODO: Replace with `unwrap_type` once
mtfishman marked this conversation as resolved.
Show resolved Hide resolved
# https://github.com/ITensor/ITensors.jl/pull/1220
# is merged.
return promote_type(storagetype1, leaf_parenttype(storagetype2))
end

function contraction_output_type(
tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:ArrayStorageTensor}, indsR
)
return similartype(promote_type(tensortype1, tensortype2), indsR)
# The output must be initialized as zero since it is sparse, cannot be undefined
# Overspecifying types to fix ambiguity error.
function contraction_output(T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR) where {T,N}
return zero_contraction_output(T1, T2, indsR)
mtfishman marked this conversation as resolved.
Show resolved Hide resolved
end
contraction_output(T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR) where {T,N} = contraction_output(T2, T1, indsR)

mtfishman marked this conversation as resolved.
Show resolved Hide resolved
function contraction_output_type(
tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR
)
return contraction_output_type(tensortype2, tensortype1, indsR)
# Overspecifying types to fix ambiguity error.
function contraction_output(tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, indsR) where {T1,N1,T2,N2}
return zero_contraction_output(tensor1, tensor2, indsR)
mtfishman marked this conversation as resolved.
Show resolved Hide resolved
end

## function contraction_output_type(
## tensortype1::Type{<:Tensor{<:Any,<:Any,<:DiagonalArray}}, tensortype2::Type{<:ArrayStorageTensor}, indsR
## )
## return similartype(promote_type(tensortype1, tensortype2), indsR)
## end

## function contraction_output_type(
## tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR
## )
## return contraction_output_type(tensortype2, tensortype1, indsR)
## end

# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`.
function contract!(
C::ArrayStorageTensor{ElC,NC},
Clabels,
A::DiagTensor{ElA,NA},
A::Tensor{ElA,NA,<:DiagonalArray{ElA,NA}},
Alabels,
B::ArrayStorageTensor{ElB,NB},
Blabels,
α::Number=one(ElC),
β::Number=zero(ElC);
convert_to_dense::Bool=true,
convert_to_dense::Bool=false,
) where {ElA,NA,ElB,NB,ElC,NC}
#@timeit_debug timer "diag-dense contract!" begin
if all(i -> i < 0, Blabels)
Expand All @@ -37,24 +56,24 @@ function contract!(
# Assumes C starts set to 0
c₁ = zero(ElC)
for i in 1:min_dim
c₁ += getdiagindex(A, i) * getdiagindex(B, i)
c₁ += DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i)
end
setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1)
DiagonalArrays.setdiagindex!(C, α * c₁ + β * DiagonalArrays.getdiagindex(C, 1), 1)
else
# not all indices are summed over, set the diagonals of the result
# to the product of the diagonals of A and B
# TODO: should we make this return a Diag storage?
for i in 1:min_dim
setdiagindex!(
C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i
DiagonalArrays.setdiagindex!(
C, α * DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) + β * DiagonalArrays.getdiagindex(C, i), i
)
mtfishman marked this conversation as resolved.
Show resolved Hide resolved
end
end
else
# Most general contraction
if convert_to_dense
# TODO: Define a conversion from `DiagonalArray` to `Array`.
contract!(C, Clabels, to_arraystorage(dense(A)), Alabels, B, Blabels, α, β)
# TODO: Define `densearray(::Tensor)`.
contract!(C, Clabels, tensor(DiagonalArrays.densearray(storage(A)), inds(A)), Alabels, B, Blabels, α, β)
else
mtfishman marked this conversation as resolved.
Show resolved Hide resolved
if !isone(α) || !iszero(β)
error(
Expand Down Expand Up @@ -115,10 +134,10 @@ function contract!(
coffset += ii * custride[i]
end
c = zero(ElC)
for j in 1:diaglength(A)
for j in 1:DiagonalArrays.diaglength(A)
# With α == 0 && β == 1
C[cstart + j * c_cstride + coffset] +=
getdiagindex(A, j) * B[bstart + j * b_cstride + boffset]
DiagonalArrays.getdiagindex(A, j) * B[bstart + j * b_cstride + boffset]
# XXX: not sure if this is correct
#C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset]
end
Expand All @@ -128,15 +147,16 @@ function contract!(
#end # @timeit
end

# Overspecifying types to fix ambiguity error.
function contract!(
C::ArrayStorageTensor,
Clabels,
A::ArrayStorageTensor,
Alabels,
B::DiagTensor,
B::Tensor{TB,NB,<:DiagonalArray{TB,NB}},
Blabels,
α::Number=one(eltype(C)),
β::Number=zero(eltype(C)),
)
) where {TB,NB}
return contract!(C, Clabels, B, Blabels, A, Alabels, α, β)
end
4 changes: 4 additions & 0 deletions src/tensor_operations/matrix_decomposition.jl
Original file line number Diff line number Diff line change
Expand Up @@ -556,11 +556,15 @@ function factorize_svd(
kwargs...,
)
leftdir, rightdir = -dir, -dir
@show A
USV = svd(A, Linds...; leftdir, rightdir, alg=svd_alg, kwargs...)
if isnothing(USV)
return nothing
end
U, S, V, spec, u, v = USV
@show U
@show S
@show V
if ortho == "left"
L, R = U, S * V
elseif ortho == "right"
Expand Down
Loading