From 690b2195b09a9b2ba2a37d799a39c77154b87c2d Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 16:42:23 -0400 Subject: [PATCH] [NDTensors] Get more `Array` storage functionality working (#1222) --- NDTensors/src/NDTensors.jl | 29 +++- NDTensors/src/abstractarray/fill.jl | 4 +- NDTensors/src/abstractarray/similar.jl | 5 + .../arraystorage/storage/arraystorage.jl | 30 ++++ .../arraystorage/arraystorage/storage/conj.jl | 2 + .../arraystorage/storage/contract.jl} | 28 ++-- .../arraystorage/storage/permutedims.jl | 8 + .../arraystorage/tensor/arraystorage.jl | 22 +++ .../arraystorage/tensor/contract.jl | 31 ++++ .../arraystorage/arraystorage/tensor/eigen.jl | 74 +++++++++ .../arraystorage/tensor/indexing.jl | 8 + .../arraystorage/arraystorage/tensor/mul.jl | 6 + .../arraystorage/tensor/permutedims.jl | 9 ++ .../arraystorage/arraystorage/tensor/qr.jl | 10 ++ .../arraystorage/arraystorage/tensor/svd.jl | 103 +++++++++++++ .../arraystorage/arraystorage/tensor/zeros.jl | 13 ++ .../blocksparsearray/storage/contract.jl} | 0 .../arraystorage/combiner/storage/contract.jl | 1 + .../arraystorage/combiner/tensor/contract.jl | 80 ++++++++++ .../diagonalarray/tensor/contract.jl | 142 ++++++++++++++++++ NDTensors/src/arraytensor/arraytensor.jl | 116 -------------- NDTensors/src/blocksparse/linearalgebra.jl | 5 +- NDTensors/test/arraytensor/array.jl | 6 +- src/itensor.jl | 5 + src/mps/abstractmps.jl | 5 + .../base/test_arraystorage.jl | 23 +++ 26 files changed, 622 insertions(+), 143 deletions(-) create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/conj.jl rename NDTensors/src/{arraytensor/array.jl => arraystorage/arraystorage/storage/contract.jl} (71%) create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/contract.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/mul.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/qr.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/svd.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl rename NDTensors/src/{arraytensor/blocksparsearray.jl => arraystorage/blocksparsearray/storage/contract.jl} (100%) create mode 100644 NDTensors/src/arraystorage/combiner/storage/contract.jl create mode 100644 NDTensors/src/arraystorage/combiner/tensor/contract.jl create mode 100644 NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl delete mode 100644 NDTensors/src/arraytensor/arraytensor.jl create mode 100644 test/ITensorLegacyMPS/base/test_arraystorage.jl diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index ec53a32515..446b3eedb2 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -132,11 +132,32 @@ include("empty/adapt.jl") ##################################### # Array Tensor (experimental) -# TODO: Move to `Experimental` module. # -include("arraytensor/arraytensor.jl") -include("arraytensor/array.jl") -include("arraytensor/blocksparsearray.jl") +include("arraystorage/arraystorage/storage/arraystorage.jl") +include("arraystorage/arraystorage/storage/conj.jl") +include("arraystorage/arraystorage/storage/permutedims.jl") +include("arraystorage/arraystorage/storage/contract.jl") + +include("arraystorage/arraystorage/tensor/arraystorage.jl") +include("arraystorage/arraystorage/tensor/zeros.jl") +include("arraystorage/arraystorage/tensor/indexing.jl") +include("arraystorage/arraystorage/tensor/permutedims.jl") +include("arraystorage/arraystorage/tensor/mul.jl") +include("arraystorage/arraystorage/tensor/contract.jl") +include("arraystorage/arraystorage/tensor/qr.jl") +include("arraystorage/arraystorage/tensor/eigen.jl") +include("arraystorage/arraystorage/tensor/svd.jl") + +# DiagonalArray storage +include("arraystorage/diagonalarray/tensor/contract.jl") + +# BlockSparseArray storage +include("arraystorage/blocksparsearray/storage/contract.jl") + +# Combiner storage +include("arraystorage/combiner/storage/contract.jl") + +include("arraystorage/combiner/tensor/contract.jl") ##################################### # Deprecations diff --git a/NDTensors/src/abstractarray/fill.jl b/NDTensors/src/abstractarray/fill.jl index 146459a4ce..eab9b830a8 100644 --- a/NDTensors/src/abstractarray/fill.jl +++ b/NDTensors/src/abstractarray/fill.jl @@ -8,10 +8,10 @@ function generic_randn( return randn!(rng, data) end -function generic_zeros(arraytype::Type{<:AbstractArray}, dim::Integer=0) +function generic_zeros(arraytype::Type{<:AbstractArray}, dims...) arraytype_specified = set_unspecified_parameters( leaf_parenttype(arraytype), DefaultParameters() ) ElT = eltype(arraytype_specified) - return fill!(similar(arraytype_specified, dim), zero(ElT)) + return fill!(similar(arraytype_specified, dims...), zero(ElT)) end diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl index 4918e5150c..93bb1bf7aa 100644 --- a/NDTensors/src/abstractarray/similar.jl +++ b/NDTensors/src/abstractarray/similar.jl @@ -77,6 +77,11 @@ function similar(array::AbstractArray, eltype::Type, dims::Tuple) return NDTensors.similar(similartype(typeof(array), eltype), dims) end +# NDTensors.similar +function similar(array::AbstractArray, eltype::Type, dims::Int) + return NDTensors.similar(similartype(typeof(array), eltype), dims) +end + # NDTensors.similar similar(array::AbstractArray, dims::Tuple) = NDTensors.similar(typeof(array), dims) diff --git a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl new file mode 100644 index 0000000000..996494b68a --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl @@ -0,0 +1,30 @@ +# Used for dispatch to distinguish from Tensors wrapping TensorStorage. +# Remove once TensorStorage is removed. +const ArrayStorage{T,N} = Union{ + Array{T,N}, + ReshapedArray{T,N}, + SubArray{T,N}, + PermutedDimsArray{T,N}, + StridedView{T,N}, + BlockSparseArray{T,N}, +} + +const MatrixStorage{T} = Union{ + ArrayStorage{T,2}, + Transpose{T}, + Adjoint{T}, + Symmetric{T}, + Hermitian{T}, + UpperTriangular{T}, + LowerTriangular{T}, + UnitUpperTriangular{T}, + UnitLowerTriangular{T}, + Diagonal{T}, +} + +const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} + +# TODO: Delete once `Dense` is removed. +function to_arraystorage(x::DenseTensor) + return tensor(reshape(data(x), size(x)), inds(x)) +end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/conj.jl b/NDTensors/src/arraystorage/arraystorage/storage/conj.jl new file mode 100644 index 0000000000..51c6a39985 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/conj.jl @@ -0,0 +1,2 @@ +conj(as::AliasStyle, A::AbstractArray) = conj(A) +conj(as::AllowAlias, A::Array{<:Real}) = A diff --git a/NDTensors/src/arraytensor/array.jl b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl similarity index 71% rename from NDTensors/src/arraytensor/array.jl rename to NDTensors/src/arraystorage/arraystorage/storage/contract.jl index 997204394d..0770325f9b 100644 --- a/NDTensors/src/arraytensor/array.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl @@ -1,6 +1,3 @@ -# Combiner -promote_rule(::Type{<:Combiner}, arraytype::Type{<:MatrixOrArrayStorage}) = arraytype - # Generic AbstractArray code function contract( array1::MatrixOrArrayStorage, @@ -43,26 +40,21 @@ function contraction_output( end # Required interface for specific AbstractArray types +# TODO: Define `default_α` and `default_β`. +# TODO: Define this as a `ttgt` or `matricize` backend. function contract!( - arrayR::MatrixOrArrayStorage, - labelsR, + array_dest::MatrixOrArrayStorage, + labels_dest, array1::MatrixOrArrayStorage, labels1, array2::MatrixOrArrayStorage, labels2, + α=one(eltype(array_dest)), + β=zero(eltype(array_dest)); ) - props = ContractionProperties(labels1, labels2, labelsR) - compute_contraction_properties!(props, array1, array2, arrayR) + props = ContractionProperties(labels1, labels2, labels_dest) + compute_contraction_properties!(props, array1, array2, array_dest) # TODO: Change this to just `contract!`, or maybe `contract_ttgt!`? - _contract!(arrayR, array1, array2, props) - return arrayR -end - -function permutedims!( - output_array::MatrixOrArrayStorage, array::MatrixOrArrayStorage, perm, f::Function -) - output_array = permutedims!!( - leaf_parenttype(output_array), output_array, leaf_parenttype(array), array, perm, f - ) - return output_array + _contract!(array_dest, array1, array2, props, α, β) + return array_dest end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl b/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl new file mode 100644 index 0000000000..c0536483cf --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl @@ -0,0 +1,8 @@ +function permutedims!( + output_array::MatrixOrArrayStorage, array::MatrixOrArrayStorage, perm, f::Function +) + output_array = permutedims!!( + leaf_parenttype(output_array), output_array, leaf_parenttype(array), array, perm, f + ) + return output_array +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl new file mode 100644 index 0000000000..9419d51e05 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl @@ -0,0 +1,22 @@ +const ArrayStorageTensor{T,N,S,I} = Tensor{T,N,S,I} where {S<:ArrayStorage{T,N}} +const MatrixStorageTensor{T,S,I} = Tensor{T,2,S,I} where {S<:MatrixStorage{T}} +const MatrixOrArrayStorageTensor{T,S,I} = + Tensor{T,N,S,I} where {N,S<:MatrixOrArrayStorage{T}} + +function Tensor(storage::MatrixOrArrayStorageTensor, inds::Tuple) + return Tensor(NeverAlias(), storage, inds) +end + +function Tensor(as::AliasStyle, storage::MatrixOrArrayStorage, inds::Tuple) + return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( + as, storage, inds + ) +end + +array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) + +# Linear algebra (matrix algebra) +# TODO: Remove `Base.`? Is it imported? +function Base.adjoint(tens::MatrixStorageTensor) + return tensor(adjoint(storage(tens)), reverse(inds(tens))) +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl new file mode 100644 index 0000000000..309893bc31 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl @@ -0,0 +1,31 @@ +# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` +function contraction_output( + tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR +) + tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + return NDTensors.similar(tensortypeR, indsR) +end + +# TODO: Define `default_α` and `default_β`. +function contract!( + tensor_dest::MatrixOrArrayStorageTensor, + labels_dest, + tensor1::MatrixOrArrayStorageTensor, + labels1, + tensor2::MatrixOrArrayStorageTensor, + labels2, + α=one(eltype(tensor_dest)), + β=zero(eltype(tensor_dest)); +) + contract!( + storage(tensor_dest), + labels_dest, + storage(tensor1), + labels1, + storage(tensor2), + labels2, + α, + β, + ) + return tensor_dest +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl new file mode 100644 index 0000000000..d3147dcce6 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -0,0 +1,74 @@ +# TODO: Rewrite this function to be more modern: +# 1. List keyword arguments in function signature. +# 2. Output `Spectrum` as a keyword argument that gets overwritten. +# 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`. +function eigen( + T::Hermitian{<:Any,<:ArrayStorageTensor}; + maxdim=nothing, + mindim=1, + cutoff=nothing, + use_absolute_cutoff=false, + use_relative_cutoff=true, + # These are getting passed erroneously. + # TODO: Make sure they don't get passed down + # to here. + which_decomp=nothing, + tags=nothing, + eigen_perturbation=nothing, + normalize=nothing, + ishermitian=nothing, + ortho=nothing, + svd_alg=nothing, +) + truncate = !isnothing(maxdim) || !isnothing(cutoff) + # TODO: Define `default_maxdim(T)`. + maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim + # TODO: Define `default_cutoff(T)`. + cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff + + matrixT = matrix(T) + ## TODO Here I am calling parent to ensure that the correct `any` function + ## is envoked for non-cpu matrices + if any(!isfinite, parent(matrixT)) + throw( + ArgumentError( + "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" + ), + ) + end + + DM, VM = eigen(matrixT) + + # Sort by largest to smallest eigenvalues + # TODO: Replace `cpu` with `leaf_parenttype` dispatch. + p = sortperm(cpu(DM); rev=true, by=abs) + DM = DM[p] + VM = VM[:, p] + + if truncate + DM, truncerr, _ = truncate!!( + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + dD = length(DM) + if dD < size(VM, 2) + VM = VM[:, 1:dD] + end + else + dD = length(DM) + truncerr = 0.0 + end + spec = Spectrum(DM, truncerr) + + # Make the new indices to go onto V + # TODO: Put in a separate function, such as + # `rewrap_inds` or something like that. + indstype = typeof(inds(T)) + l = eltype(indstype)(dD) + r = eltype(indstype)(dD) + Vinds = indstype((dag(ind(T, 2)), dag(r))) + Dinds = indstype((l, dag(r))) + V = tensor(VM, Vinds) + # TODO: Replace with `DiagonalArray`. + D = tensor(Diag(DM), Dinds) + return D, V, spec +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl b/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl new file mode 100644 index 0000000000..b5efbf81a1 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl @@ -0,0 +1,8 @@ +function getindex(tensor::MatrixOrArrayStorageTensor, I::Integer...) + return storage(tensor)[I...] +end + +function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) + storage(tensor)[I...] = v + return tensor +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl b/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl new file mode 100644 index 0000000000..58593e58e1 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl @@ -0,0 +1,6 @@ +function LinearAlgebra.mul!( + C::MatrixStorageTensor, A::MatrixStorageTensor, B::MatrixStorageTensor +) + mul!(storage(C), storage(A), storage(B)) + return C +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl b/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl new file mode 100644 index 0000000000..0d3df8c547 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl @@ -0,0 +1,9 @@ +function permutedims!( + output_tensor::MatrixOrArrayStorageTensor, + tensor::MatrixOrArrayStorageTensor, + perm, + f::Function, +) + permutedims!(storage(output_tensor), storage(tensor), perm, f) + return output_tensor +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl new file mode 100644 index 0000000000..04ae4501fc --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl @@ -0,0 +1,10 @@ +function qr(A::ArrayStorageTensor) + Q, R = qr(storage(A)) + Q = convert(typeof(R), Q) + i, j = inds(A) + q = size(A, 1) < size(A, 2) ? i : j + q = sim(q) + Qₜ = tensor(Q, (i, q)) + Rₜ = tensor(R, (dag(q), j)) + return Qₜ, Rₜ +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl new file mode 100644 index 0000000000..f00543959e --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -0,0 +1,103 @@ +# TODO: Rewrite this function to be more modern: +# 1. Output `Spectrum` as a keyword argument that gets overwritten. +# 2. Dispatch on `alg`. +# 3. Make this into two layers, one that handles indices and one that works with `Matrix`. +""" + svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) + +svd of an order-2 DenseTensor +""" +function svd( + T::ArrayStorageTensor; + maxdim=nothing, + mindim=1, + cutoff=nothing, + alg="divide_and_conquer", # TODO: Define `default_alg(T)` + use_absolute_cutoff=false, + use_relative_cutoff=true, + # These are getting passed erroneously. + # TODO: Make sure they don't get passed down + # to here. + which_decomp=nothing, + tags=nothing, + eigen_perturbation=nothing, + normalize=nothing, +) + truncate = !isnothing(maxdim) || !isnothing(cutoff) + # TODO: Define `default_maxdim(T)`. + maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim + # TODO: Define `default_cutoff(T)`. + cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff + + # TODO: Dispatch on `Algorithm(alg)`. + if alg == "divide_and_conquer" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) + if isnothing(MUSV) + # If "divide_and_conquer" fails, try "qr_iteration" + alg = "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + end + elseif alg == "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + elseif alg == "recursive" + MUSV = svd_recursive(matrix(T)) + elseif alg == "QRAlgorithm" || alg == "JacobiAlgorithm" + MUSV = svd_catch_error(matrix(T); alg=alg) + else + error( + "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", + ) + end + if isnothing(MUSV) + if any(isnan, T) + println("SVD failed, the matrix you were trying to SVD contains NaNs.") + else + println(lapack_svd_error_message(alg)) + end + return nothing + end + MU, MS, MV = MUSV + conj!(MV) + + P = MS .^ 2 + if truncate + P, truncerr, _ = truncate!!( + P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + else + truncerr = 0.0 + end + spec = Spectrum(P, truncerr) + dS = length(P) + if dS < length(MS) + MU = MU[:, 1:dS] + # Fails on some GPU backends like Metal. + # resize!(MS, dS) + MS = MS[1:dS] + MV = MV[:, 1:dS] + end + + # Make the new indices to go onto U and V + # TODO: Put in a separate function, such as + # `rewrap_inds` or something like that. + indstype = typeof(inds(T)) + u = eltype(indstype)(dS) + v = eltype(indstype)(dS) + Uinds = indstype((ind(T, 1), u)) + Sinds = indstype((u, v)) + Vinds = indstype((ind(T, 2), v)) + U = tensor(MU, Uinds) + S = tensor(Diag(MS), Sinds) + V = tensor(MV, Vinds) + return U, S, V, spec +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl new file mode 100644 index 0000000000..66759892e4 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl @@ -0,0 +1,13 @@ +function _zeros(tensortype::Type{<:ArrayStorageTensor}, inds) + return tensor(generic_zeros(storagetype(tensortype), dims(inds)), inds) +end + +# To resolve ambiguity error with `Base.zeros`. +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds) + return _zeros(tensortype, inds) +end + +# To resolve ambiguity error with `Base.zeros`. +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{Vararg{Integer}}) + return _zeros(tensortype, inds) +end diff --git a/NDTensors/src/arraytensor/blocksparsearray.jl b/NDTensors/src/arraystorage/blocksparsearray/storage/contract.jl similarity index 100% rename from NDTensors/src/arraytensor/blocksparsearray.jl rename to NDTensors/src/arraystorage/blocksparsearray/storage/contract.jl diff --git a/NDTensors/src/arraystorage/combiner/storage/contract.jl b/NDTensors/src/arraystorage/combiner/storage/contract.jl new file mode 100644 index 0000000000..0cc65460d9 --- /dev/null +++ b/NDTensors/src/arraystorage/combiner/storage/contract.jl @@ -0,0 +1 @@ +promote_rule(::Type{<:Combiner}, arraytype::Type{<:MatrixOrArrayStorage}) = arraytype diff --git a/NDTensors/src/arraystorage/combiner/tensor/contract.jl b/NDTensors/src/arraystorage/combiner/tensor/contract.jl new file mode 100644 index 0000000000..46e728daff --- /dev/null +++ b/NDTensors/src/arraystorage/combiner/tensor/contract.jl @@ -0,0 +1,80 @@ +function contraction_output( + tensor1::MatrixOrArrayStorageTensor, tensor2::CombinerTensor, indsR +) + tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + return NDTensors.similar(tensortypeR, indsR) +end + +function contract!!( + output_tensor::ArrayStorageTensor, + output_tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + tensor::ArrayStorageTensor, + tensor_labels, +) + if ndims(combiner_tensor) ≤ 1 + # Empty combiner, acts as multiplying by 1 + output_tensor = permutedims!!( + output_tensor, tensor, getperm(output_tensor_labels, tensor_labels) + ) + return output_tensor + end + if is_index_replacement(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) + ui = setdiff(combiner_tensor_labels, tensor_labels)[] + newind = inds(combiner_tensor)[findfirst(==(ui), combiner_tensor_labels)] + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + output_tensor_inds = setindex(inds(tensor), newind, cpos2) + return NDTensors.tensor(output_tensor_storage, output_tensor_inds) + end + is_combining_contraction = is_combining( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + if is_combining_contraction + Alabels, Blabels = tensor_labels, combiner_tensor_labels + final_labels = contract_labels(Blabels, Alabels) + final_labels_n = contract_labels(combiner_tensor_labels, tensor_labels) + output_tensor_inds = inds(output_tensor) + if final_labels != final_labels_n + perm = getperm(final_labels_n, final_labels) + output_tensor_inds = permute(inds(output_tensor), perm) + output_tensor_labels = permute(output_tensor_labels, perm) + end + cpos1, output_tensor_cpos = intersect_positions( + combiner_tensor_labels, output_tensor_labels + ) + labels_comb = deleteat(combiner_tensor_labels, cpos1) + output_tensor_vl = [output_tensor_labels...] + for (ii, li) in enumerate(labels_comb) + insert!(output_tensor_vl, output_tensor_cpos + ii, li) + end + deleteat!(output_tensor_vl, output_tensor_cpos) + labels_perm = tuple(output_tensor_vl...) + perm = getperm(labels_perm, tensor_labels) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + ## tensorp = reshape(output_tensor, NDTensors.permute(inds(tensor), perm)) + tensorp_inds = permute(inds(tensor), perm) + tensorp = NDTensors.tensor( + reshape(storage(output_tensor), dims(tensorp_inds)), tensorp_inds + ) + permutedims!(tensorp, tensor, perm) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + ## reshape(tensorp, output_tensor_inds) + return NDTensors.tensor( + reshape(storage(tensorp), dims(output_tensor_inds)), output_tensor_inds + ) + else # Uncombining + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + indsC = deleteat(inds(combiner_tensor), cpos1) + output_tensor_inds = insertat(inds(tensor), indsC, cpos2) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + return NDTensors.tensor( + reshape(output_tensor_storage, dims(output_tensor_inds)), output_tensor_inds + ) + end + return invalid_combiner_contraction_error( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) +end diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl new file mode 100644 index 0000000000..e7011896a1 --- /dev/null +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -0,0 +1,142 @@ +function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag}) + return promote_type(storagetype1, datatype(storagetype2)) +end + +function contraction_output_type( + tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:ArrayStorageTensor}, indsR +) + return similartype(promote_type(tensortype1, tensortype2), indsR) +end + +function contraction_output_type( + tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR +) + return contraction_output_type(tensortype2, tensortype1, indsR) +end + +# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. +function contract!( + C::ArrayStorageTensor{ElC,NC}, + Clabels, + A::DiagTensor{ElA,NA}, + Alabels, + B::ArrayStorageTensor{ElB,NB}, + Blabels, + α::Number=one(ElC), + β::Number=zero(ElC); + convert_to_dense::Bool=true, +) where {ElA,NA,ElB,NB,ElC,NC} + #@timeit_debug timer "diag-dense contract!" begin + if all(i -> i < 0, Blabels) + # If all of B is contracted + # TODO: can also check NC+NB==NA + min_dim = min(minimum(dims(A)), minimum(dims(B))) + if length(Clabels) == 0 + # all indices are summed over, just add the product of the diagonal + # elements of A and B + # Assumes C starts set to 0 + c₁ = zero(ElC) + for i in 1:min_dim + c₁ += getdiagindex(A, i) * getdiagindex(B, i) + end + setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1) + else + # not all indices are summed over, set the diagonals of the result + # to the product of the diagonals of A and B + # TODO: should we make this return a Diag storage? + for i in 1:min_dim + setdiagindex!( + C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i + ) + end + end + else + # Most general contraction + if convert_to_dense + # TODO: Define a conversion from `DiagonalArray` to `Array`. + contract!(C, Clabels, to_arraystorage(dense(A)), Alabels, B, Blabels, α, β) + else + if !isone(α) || !iszero(β) + error( + "`contract!(::ArrayStorageTensor, ::DiagTensor, ::ArrayStorageTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", + ) + end + astarts = zeros(Int, length(Alabels)) + bstart = 0 + cstart = 0 + b_cstride = 0 + nbu = 0 + for ib in 1:length(Blabels) + ia = findfirst(==(Blabels[ib]), Alabels) + if !isnothing(ia) + b_cstride += stride(B, ib) + bstart += astarts[ia] * stride(B, ib) + else + nbu += 1 + end + end + + c_cstride = 0 + for ic in 1:length(Clabels) + ia = findfirst(==(Clabels[ic]), Alabels) + if !isnothing(ia) + c_cstride += stride(C, ic) + cstart += astarts[ia] * stride(C, ic) + end + end + + # strides of the uncontracted dimensions of + # B + bustride = zeros(Int, nbu) + custride = zeros(Int, nbu) + # size of the uncontracted dimensions of + # B, to be used in CartesianIndices + busize = zeros(Int, nbu) + n = 1 + for ib in 1:length(Blabels) + if Blabels[ib] > 0 + bustride[n] = stride(B, ib) + busize[n] = size(B, ib) + ic = findfirst(==(Blabels[ib]), Clabels) + custride[n] = stride(C, ic) + n += 1 + end + end + + boffset_orig = 1 - sum(strides(B)) + coffset_orig = 1 - sum(strides(C)) + cartesian_inds = CartesianIndices(Tuple(busize)) + for inds in cartesian_inds + boffset = boffset_orig + coffset = coffset_orig + for i in 1:nbu + ii = inds[i] + boffset += ii * bustride[i] + coffset += ii * custride[i] + end + c = zero(ElC) + for j in 1:diaglength(A) + # With α == 0 && β == 1 + C[cstart + j * c_cstride + coffset] += + getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] + # XXX: not sure if this is correct + #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] + end + end + end + end + #end # @timeit +end + +function contract!( + C::ArrayStorageTensor, + Clabels, + A::ArrayStorageTensor, + Alabels, + B::DiagTensor, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)), +) + return contract!(C, Clabels, B, Blabels, A, Alabels, α, β) +end diff --git a/NDTensors/src/arraytensor/arraytensor.jl b/NDTensors/src/arraytensor/arraytensor.jl deleted file mode 100644 index 13607d240e..0000000000 --- a/NDTensors/src/arraytensor/arraytensor.jl +++ /dev/null @@ -1,116 +0,0 @@ -# Used for dispatch to distinguish from Tensors wrapping TensorStorage. -# Remove once TensorStorage is removed. -const ArrayStorage{T,N} = Union{ - Array{T,N}, - ReshapedArray{T,N}, - SubArray{T,N}, - PermutedDimsArray{T,N}, - StridedView{T,N}, - BlockSparseArray{T,N}, -} -const MatrixStorage{T} = Union{ - ArrayStorage{T,2}, - Transpose{T}, - Adjoint{T}, - Symmetric{T}, - Hermitian{T}, - UpperTriangular{T}, - LowerTriangular{T}, - UnitUpperTriangular{T}, - UnitLowerTriangular{T}, - Diagonal{T}, -} -const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} - -const ArrayStorageTensor{T,N,S,I} = Tensor{T,N,S,I} where {S<:ArrayStorage{T,N}} -const MatrixStorageTensor{T,S,I} = Tensor{T,2,S,I} where {S<:MatrixStorage{T}} -const MatrixOrArrayStorageTensor{T,S,I} = - Tensor{T,N,S,I} where {N,S<:MatrixOrArrayStorage{T}} - -function Tensor(storage::MatrixOrArrayStorageTensor, inds::Tuple) - return Tensor(NeverAlias(), storage, inds) -end - -function Tensor(as::AliasStyle, storage::MatrixOrArrayStorage, inds::Tuple) - return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( - as, storage, inds - ) -end - -function getindex(tensor::MatrixOrArrayStorageTensor, I::Integer...) - return storage(tensor)[I...] -end - -function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) - storage(tensor)[I...] = v - return tensor -end - -# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` -function contraction_output( - tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR -) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end - -function contract!( - tensorR::MatrixOrArrayStorageTensor, - labelsR, - tensor1::MatrixOrArrayStorageTensor, - labels1, - tensor2::MatrixOrArrayStorageTensor, - labels2, -) - contract!(storage(tensorR), labelsR, storage(tensor1), labels1, storage(tensor2), labels2) - return tensorR -end - -function permutedims!( - output_tensor::MatrixOrArrayStorageTensor, - tensor::MatrixOrArrayStorageTensor, - perm, - f::Function, -) - permutedims!(storage(output_tensor), storage(tensor), perm, f) - return output_tensor -end - -# Linear algebra (matrix algebra) -function Base.adjoint(tens::MatrixStorageTensor) - return tensor(adjoint(storage(tens)), reverse(inds(tens))) -end - -function LinearAlgebra.mul!( - C::MatrixStorageTensor, A::MatrixStorageTensor, B::MatrixStorageTensor -) - mul!(storage(C), storage(A), storage(B)) - return C -end - -function LinearAlgebra.svd(tens::MatrixStorageTensor) - F = svd(storage(tens)) - U, S, V = F.U, F.S, F.Vt - i, j = inds(tens) - # TODO: Make this more general with a `similar_ind` function, - # so the dimension can be determined from the length of `S`. - min_ij = dim(i) ≤ dim(j) ? i : j - α = sim(min_ij) # similar_ind(i, space(S)) - β = sim(min_ij) # similar_ind(i, space(S)) - Utensor = tensor(U, (i, α)) - # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. - # Used for now to avoid introducing wrapper types. - Stensor = tensor(Diagonal(S), (α, β)) - Vtensor = tensor(V, (β, j)) - return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) -end - -array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) - -# Combiner -function contraction_output( - tensor1::MatrixOrArrayStorageTensor, tensor2::CombinerTensor, indsR -) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 931f8b6011..70bf5c7972 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -34,7 +34,7 @@ per row/column, otherwise it fails. This assumption makes it so the result can be computed from the dense svds of seperate blocks. """ -function svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} +function svd(T::Tensor{ElT,2,<:BlockSparse}; kwargs...) where {ElT} alg::String = get(kwargs, :alg, "divide_and_conquer") min_blockdim::Int = get(kwargs, :min_blockdim, 0) truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) @@ -207,7 +207,8 @@ _eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(El _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) function eigen( - T::Union{Hermitian{ElT,<:BlockSparseMatrix{ElT}},BlockSparseMatrix{ElT}}; kwargs... + T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; + kwargs..., ) where {ElT<:Union{Real,Complex}} truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) diff --git a/NDTensors/test/arraytensor/array.jl b/NDTensors/test/arraytensor/array.jl index 39809d0db3..bbe8a82500 100644 --- a/NDTensors/test/arraytensor/array.jl +++ b/NDTensors/test/arraytensor/array.jl @@ -35,7 +35,11 @@ using NDTensors: storage, storagetype @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) U, S, V = svd(T1) - @test U * S * V ≈ T1 + + # TODO: Should this work? Currently broken. + @test_broken U * S * V ≈ T1 + # TODO: Should this require labels, or use existing labels? + @test contract(contract(U, (1, -1), S, (-1, 2)), (1, -1), V, (2, -1)) ≈ T1 T12 = contract(T1, (1, -1), T2, (-1, 2)) @test T12 ≈ T1 * T2 diff --git a/src/itensor.jl b/src/itensor.jl index 95e5ac9df7..e86d180f0e 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -125,6 +125,11 @@ ITensor(is, st::TensorStorage)::ITensor = ITensor(NeverAlias(), st, is) itensor(T::ITensor) = T ITensor(T::ITensor) = copy(T) +# TODO: Delete once `TensorStorage` is removed. +function NDTensors.to_arraystorage(x::ITensor) + return itensor(NDTensors.to_arraystorage(tensor(x))) +end + """ itensor(args...; kwargs...) diff --git a/src/mps/abstractmps.jl b/src/mps/abstractmps.jl index e1028e90d8..a6309e7d2f 100644 --- a/src/mps/abstractmps.jl +++ b/src/mps/abstractmps.jl @@ -16,6 +16,11 @@ size(m::AbstractMPS) = size(data(m)) ndims(m::AbstractMPS) = ndims(data(m)) +# TODO: Delete once `TensorStorage` is removed. +function NDTensors.to_arraystorage(x::AbstractMPS) + return NDTensors.to_arraystorage.(x) +end + function promote_itensor_eltype(m::Vector{ITensor}) T = isassigned(m, 1) ? eltype(m[1]) : Number for n in 2:length(m) diff --git a/test/ITensorLegacyMPS/base/test_arraystorage.jl b/test/ITensorLegacyMPS/base/test_arraystorage.jl new file mode 100644 index 0000000000..70fb698f18 --- /dev/null +++ b/test/ITensorLegacyMPS/base/test_arraystorage.jl @@ -0,0 +1,23 @@ +using ITensors +using Test + +@testset "Test ArrayStorage DMRG" begin + n = 4 + conserve_qns = false + s = siteinds("S=1/2", n; conserve_qns) + heisenberg_opsum = function (n) + os = OpSum() + for j in 1:(n - 1) + os += "Sz", j, "Sz", j + 1 + os += 0.5, "S+", j, "S-", j + 1 + os += 0.5, "S-", j, "S+", j + 1 + end + return os + end + H = MPO(heisenberg_opsum(n), s) + ψ = randomMPS(s, j -> isodd(j) ? "↑" : "↓"; linkdims=4) + dmrg_kwargs = (; nsweeps=2, cutoff=[1e-4, 1e-12], maxdim=10, outputlevel=0) + e1, ψ1 = dmrg(NDTensors.to_arraystorage.((H, ψ))...; dmrg_kwargs...) + e2, ψ2 = dmrg(H, ψ; dmrg_kwargs...) + @test e1 ≈ e2 +end