From 9b7f469c5da279f650ca797e0140fb2b19f3933c Mon Sep 17 00:00:00 2001 From: mtfishman Date: Fri, 27 Oct 2023 09:06:39 -0400 Subject: [PATCH 01/36] [NDTensors] Add more operations for Array storage --- .../arraystorage/storage/arraystorage.jl | 25 ++++ .../arraystorage/storage/combiner.jl | 1 + .../arraystorage/storage/contract.jl} | 12 -- .../arraystorage/storage/permutedims.jl | 8 ++ .../arraystorage/tensor/arraystorage.jl | 27 ++++ .../arraystorage/tensor/combiner.jl | 6 + .../arraystorage/tensor/contract.jl | 19 +++ .../arraystorage/tensor/indexing.jl | 8 ++ .../arraystorage/arraystorage/tensor/mul.jl | 6 + .../arraystorage/tensor/permutedims.jl | 9 ++ .../arraystorage/arraystorage/tensor/svd.jl | 16 +++ .../blocksparsearray/storage/contract.jl} | 0 NDTensors/src/arraytensor/arraytensor.jl | 116 ------------------ 13 files changed, 125 insertions(+), 128 deletions(-) create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/combiner.jl rename NDTensors/src/{arraytensor/array.jl => arraystorage/arraystorage/storage/contract.jl} (82%) create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/contract.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/mul.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/svd.jl rename NDTensors/src/{arraytensor/blocksparsearray.jl => arraystorage/blocksparsearray/storage/contract.jl} (100%) delete mode 100644 NDTensors/src/arraytensor/arraytensor.jl diff --git a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl new file mode 100644 index 0000000000..e6f33d725e --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl @@ -0,0 +1,25 @@ +# Used for dispatch to distinguish from Tensors wrapping TensorStorage. +# Remove once TensorStorage is removed. +const ArrayStorage{T,N} = Union{ + Array{T,N}, + ReshapedArray{T,N}, + SubArray{T,N}, + PermutedDimsArray{T,N}, + StridedView{T,N}, + BlockSparseArray{T,N}, +} + +const MatrixStorage{T} = Union{ + ArrayStorage{T,2}, + Transpose{T}, + Adjoint{T}, + Symmetric{T}, + Hermitian{T}, + UpperTriangular{T}, + LowerTriangular{T}, + UnitUpperTriangular{T}, + UnitLowerTriangular{T}, + Diagonal{T}, +} + +const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} diff --git a/NDTensors/src/arraystorage/arraystorage/storage/combiner.jl b/NDTensors/src/arraystorage/arraystorage/storage/combiner.jl new file mode 100644 index 0000000000..0cc65460d9 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/combiner.jl @@ -0,0 +1 @@ +promote_rule(::Type{<:Combiner}, arraytype::Type{<:MatrixOrArrayStorage}) = arraytype diff --git a/NDTensors/src/arraytensor/array.jl b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl similarity index 82% rename from NDTensors/src/arraytensor/array.jl rename to NDTensors/src/arraystorage/arraystorage/storage/contract.jl index 997204394d..b88628b326 100644 --- a/NDTensors/src/arraytensor/array.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl @@ -1,6 +1,3 @@ -# Combiner -promote_rule(::Type{<:Combiner}, arraytype::Type{<:MatrixOrArrayStorage}) = arraytype - # Generic AbstractArray code function contract( array1::MatrixOrArrayStorage, @@ -57,12 +54,3 @@ function contract!( _contract!(arrayR, array1, array2, props) return arrayR end - -function permutedims!( - output_array::MatrixOrArrayStorage, array::MatrixOrArrayStorage, perm, f::Function -) - output_array = permutedims!!( - leaf_parenttype(output_array), output_array, leaf_parenttype(array), array, perm, f - ) - return output_array -end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl b/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl new file mode 100644 index 0000000000..c0536483cf --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/permutedims.jl @@ -0,0 +1,8 @@ +function permutedims!( + output_array::MatrixOrArrayStorage, array::MatrixOrArrayStorage, perm, f::Function +) + output_array = permutedims!!( + leaf_parenttype(output_array), output_array, leaf_parenttype(array), array, perm, f + ) + return output_array +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl new file mode 100644 index 0000000000..72fbc1d535 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl @@ -0,0 +1,27 @@ +const ArrayStorageTensor{T,N,S,I} = Tensor{T,N,S,I} where {S<:ArrayStorage{T,N}} +const MatrixStorageTensor{T,S,I} = Tensor{T,2,S,I} where {S<:MatrixStorage{T}} +const MatrixOrArrayStorageTensor{T,S,I} = + Tensor{T,N,S,I} where {N,S<:MatrixOrArrayStorage{T}} + +function Tensor(storage::MatrixOrArrayStorageTensor, inds::Tuple) + return Tensor(NeverAlias(), storage, inds) +end + +function Tensor(as::AliasStyle, storage::MatrixOrArrayStorage, inds::Tuple) + return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( + as, storage, inds + ) +end + +array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) + +# Linear algebra (matrix algebra) +function Base.adjoint(tens::MatrixStorageTensor) + return tensor(adjoint(storage(tens)), reverse(inds(tens))) +end + +# Conversion from a tensor with TensorStorage storage +# to AbstractArray storage, +function to_arraytensor(x::DenseTensor) + return tensor(reshape(data(x), size(x)), inds(x)) +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl new file mode 100644 index 0000000000..65c45c565e --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl @@ -0,0 +1,6 @@ +function contraction_output( + tensor1::MatrixOrArrayStorageTensor, tensor2::CombinerTensor, indsR +) + tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + return NDTensors.similar(tensortypeR, indsR) +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl new file mode 100644 index 0000000000..80ab295b65 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl @@ -0,0 +1,19 @@ +# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` +function contraction_output( + tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR +) + tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + return NDTensors.similar(tensortypeR, indsR) +end + +function contract!( + tensorR::MatrixOrArrayStorageTensor, + labelsR, + tensor1::MatrixOrArrayStorageTensor, + labels1, + tensor2::MatrixOrArrayStorageTensor, + labels2, +) + contract!(storage(tensorR), labelsR, storage(tensor1), labels1, storage(tensor2), labels2) + return tensorR +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl b/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl new file mode 100644 index 0000000000..b5efbf81a1 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/indexing.jl @@ -0,0 +1,8 @@ +function getindex(tensor::MatrixOrArrayStorageTensor, I::Integer...) + return storage(tensor)[I...] +end + +function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) + storage(tensor)[I...] = v + return tensor +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl b/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl new file mode 100644 index 0000000000..58593e58e1 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/mul.jl @@ -0,0 +1,6 @@ +function LinearAlgebra.mul!( + C::MatrixStorageTensor, A::MatrixStorageTensor, B::MatrixStorageTensor +) + mul!(storage(C), storage(A), storage(B)) + return C +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl b/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl new file mode 100644 index 0000000000..0d3df8c547 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/permutedims.jl @@ -0,0 +1,9 @@ +function permutedims!( + output_tensor::MatrixOrArrayStorageTensor, + tensor::MatrixOrArrayStorageTensor, + perm, + f::Function, +) + permutedims!(storage(output_tensor), storage(tensor), perm, f) + return output_tensor +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl new file mode 100644 index 0000000000..3e128d0abd --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -0,0 +1,16 @@ +function LinearAlgebra.svd(tens::MatrixStorageTensor) + F = svd(storage(tens)) + U, S, V = F.U, F.S, F.Vt + i, j = inds(tens) + # TODO: Make this more general with a `similar_ind` function, + # so the dimension can be determined from the length of `S`. + min_ij = dim(i) ≤ dim(j) ? i : j + α = sim(min_ij) # similar_ind(i, space(S)) + β = sim(min_ij) # similar_ind(i, space(S)) + Utensor = tensor(U, (i, α)) + # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. + # Used for now to avoid introducing wrapper types. + Stensor = tensor(Diagonal(S), (α, β)) + Vtensor = tensor(V, (β, j)) + return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) +end diff --git a/NDTensors/src/arraytensor/blocksparsearray.jl b/NDTensors/src/arraystorage/blocksparsearray/storage/contract.jl similarity index 100% rename from NDTensors/src/arraytensor/blocksparsearray.jl rename to NDTensors/src/arraystorage/blocksparsearray/storage/contract.jl diff --git a/NDTensors/src/arraytensor/arraytensor.jl b/NDTensors/src/arraytensor/arraytensor.jl deleted file mode 100644 index 13607d240e..0000000000 --- a/NDTensors/src/arraytensor/arraytensor.jl +++ /dev/null @@ -1,116 +0,0 @@ -# Used for dispatch to distinguish from Tensors wrapping TensorStorage. -# Remove once TensorStorage is removed. -const ArrayStorage{T,N} = Union{ - Array{T,N}, - ReshapedArray{T,N}, - SubArray{T,N}, - PermutedDimsArray{T,N}, - StridedView{T,N}, - BlockSparseArray{T,N}, -} -const MatrixStorage{T} = Union{ - ArrayStorage{T,2}, - Transpose{T}, - Adjoint{T}, - Symmetric{T}, - Hermitian{T}, - UpperTriangular{T}, - LowerTriangular{T}, - UnitUpperTriangular{T}, - UnitLowerTriangular{T}, - Diagonal{T}, -} -const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} - -const ArrayStorageTensor{T,N,S,I} = Tensor{T,N,S,I} where {S<:ArrayStorage{T,N}} -const MatrixStorageTensor{T,S,I} = Tensor{T,2,S,I} where {S<:MatrixStorage{T}} -const MatrixOrArrayStorageTensor{T,S,I} = - Tensor{T,N,S,I} where {N,S<:MatrixOrArrayStorage{T}} - -function Tensor(storage::MatrixOrArrayStorageTensor, inds::Tuple) - return Tensor(NeverAlias(), storage, inds) -end - -function Tensor(as::AliasStyle, storage::MatrixOrArrayStorage, inds::Tuple) - return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( - as, storage, inds - ) -end - -function getindex(tensor::MatrixOrArrayStorageTensor, I::Integer...) - return storage(tensor)[I...] -end - -function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) - storage(tensor)[I...] = v - return tensor -end - -# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` -function contraction_output( - tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR -) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end - -function contract!( - tensorR::MatrixOrArrayStorageTensor, - labelsR, - tensor1::MatrixOrArrayStorageTensor, - labels1, - tensor2::MatrixOrArrayStorageTensor, - labels2, -) - contract!(storage(tensorR), labelsR, storage(tensor1), labels1, storage(tensor2), labels2) - return tensorR -end - -function permutedims!( - output_tensor::MatrixOrArrayStorageTensor, - tensor::MatrixOrArrayStorageTensor, - perm, - f::Function, -) - permutedims!(storage(output_tensor), storage(tensor), perm, f) - return output_tensor -end - -# Linear algebra (matrix algebra) -function Base.adjoint(tens::MatrixStorageTensor) - return tensor(adjoint(storage(tens)), reverse(inds(tens))) -end - -function LinearAlgebra.mul!( - C::MatrixStorageTensor, A::MatrixStorageTensor, B::MatrixStorageTensor -) - mul!(storage(C), storage(A), storage(B)) - return C -end - -function LinearAlgebra.svd(tens::MatrixStorageTensor) - F = svd(storage(tens)) - U, S, V = F.U, F.S, F.Vt - i, j = inds(tens) - # TODO: Make this more general with a `similar_ind` function, - # so the dimension can be determined from the length of `S`. - min_ij = dim(i) ≤ dim(j) ? i : j - α = sim(min_ij) # similar_ind(i, space(S)) - β = sim(min_ij) # similar_ind(i, space(S)) - Utensor = tensor(U, (i, α)) - # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. - # Used for now to avoid introducing wrapper types. - Stensor = tensor(Diagonal(S), (α, β)) - Vtensor = tensor(V, (β, j)) - return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) -end - -array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) - -# Combiner -function contraction_output( - tensor1::MatrixOrArrayStorageTensor, tensor2::CombinerTensor, indsR -) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) -end From cbd030764115e61347a0a3d2793dc9d5c11e335e Mon Sep 17 00:00:00 2001 From: mtfishman Date: Fri, 27 Oct 2023 16:08:07 -0400 Subject: [PATCH 02/36] Get more Array storage functionality working --- NDTensors/src/NDTensors.jl | 22 ++- .../arraystorage/storage/arraystorage.jl | 5 + .../arraystorage/arraystorage/storage/conj.jl | 2 + .../arraystorage/tensor/combiner.jl | 68 ++++++++ .../arraystorage/arraystorage/tensor/qr.jl | 10 ++ .../arraystorage/arraystorage/tensor/svd.jl | 155 ++++++++++++++++-- NDTensors/src/blocksparse/linearalgebra.jl | 2 +- src/itensor.jl | 5 + src/mps/abstractmps.jl | 5 + 9 files changed, 254 insertions(+), 20 deletions(-) create mode 100644 NDTensors/src/arraystorage/arraystorage/storage/conj.jl create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/qr.jl diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 29501e2cac..e3cd18949a 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -131,11 +131,25 @@ include("empty/adapt.jl") ##################################### # Array Tensor (experimental) -# TODO: Move to `Experimental` module. +# TODO: Move to `Experimental` module? # -include("arraytensor/arraytensor.jl") -include("arraytensor/array.jl") -include("arraytensor/blocksparsearray.jl") +include("arraystorage/arraystorage/storage/arraystorage.jl") +include("arraystorage/arraystorage/storage/conj.jl") +include("arraystorage/arraystorage/storage/permutedims.jl") +include("arraystorage/arraystorage/storage/contract.jl") +include("arraystorage/arraystorage/storage/combiner.jl") + +include("arraystorage/arraystorage/tensor/arraystorage.jl") +include("arraystorage/arraystorage/tensor/indexing.jl") +include("arraystorage/arraystorage/tensor/permutedims.jl") +include("arraystorage/arraystorage/tensor/mul.jl") +include("arraystorage/arraystorage/tensor/contract.jl") +include("arraystorage/arraystorage/tensor/qr.jl") +include("arraystorage/arraystorage/tensor/svd.jl") +include("arraystorage/arraystorage/tensor/combiner.jl") + +# BlockSparseArray storage +include("arraystorage/blocksparsearray/storage/contract.jl") ##################################### # Deprecations diff --git a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl index e6f33d725e..996494b68a 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl @@ -23,3 +23,8 @@ const MatrixStorage{T} = Union{ } const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} + +# TODO: Delete once `Dense` is removed. +function to_arraystorage(x::DenseTensor) + return tensor(reshape(data(x), size(x)), inds(x)) +end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/conj.jl b/NDTensors/src/arraystorage/arraystorage/storage/conj.jl new file mode 100644 index 0000000000..51c6a39985 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/storage/conj.jl @@ -0,0 +1,2 @@ +conj(as::AliasStyle, A::AbstractArray) = conj(A) +conj(as::AllowAlias, A::Array{<:Real}) = A diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl index 65c45c565e..324f7dbf15 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl @@ -4,3 +4,71 @@ function contraction_output( tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) return NDTensors.similar(tensortypeR, indsR) end + +function contract!!( + output_tensor::ArrayStorageTensor, + output_tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + tensor::ArrayStorageTensor, + tensor_labels, +) + if ndims(combiner_tensor) ≤ 1 + # Empty combiner, acts as multiplying by 1 + output_tensor = permutedims!!( + output_tensor, tensor, getperm(output_tensor_labels, tensor_labels) + ) + return output_tensor + end + if is_index_replacement(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) + ui = setdiff(combiner_tensor_labels, tensor_labels)[] + newind = inds(combiner_tensor)[findfirst(==(ui), combiner_tensor_labels)] + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + output_tensor_inds = setindex(inds(tensor), newind, cpos2) + return NDTensors.tensor(output_tensor_storage, output_tensor_inds) + end + is_combining_contraction = is_combining( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + if is_combining_contraction + Alabels, Blabels = tensor_labels, combiner_tensor_labels + final_labels = contract_labels(Blabels, Alabels) + final_labels_n = contract_labels(combiner_tensor_labels, tensor_labels) + output_tensor_inds = inds(output_tensor) + if final_labels != final_labels_n + perm = getperm(final_labels_n, final_labels) + output_tensor_inds = permute(inds(output_tensor), perm) + output_tensor_labels = permute(output_tensor_labels, perm) + end + cpos1, output_tensor_cpos = intersect_positions( + combiner_tensor_labels, output_tensor_labels + ) + labels_comb = deleteat(combiner_tensor_labels, cpos1) + output_tensor_vl = [output_tensor_labels...] + for (ii, li) in enumerate(labels_comb) + insert!(output_tensor_vl, output_tensor_cpos + ii, li) + end + deleteat!(output_tensor_vl, output_tensor_cpos) + labels_perm = tuple(output_tensor_vl...) + perm = getperm(labels_perm, tensor_labels) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + ## tensorp = reshape(output_tensor, NDTensors.permute(inds(tensor), perm)) + tensorp_inds = permute(inds(tensor), perm) + tensorp = NDTensors.tensor(reshape(storage(output_tensor), dims(tensorp_inds)), tensorp_inds) + permutedims!(tensorp, tensor, perm) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + ## reshape(tensorp, output_tensor_inds) + return NDTensors.tensor(reshape(storage(tensorp), dims(output_tensor_inds)), output_tensor_inds) + else # Uncombining + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + indsC = deleteat(inds(combiner_tensor), cpos1) + output_tensor_inds = insertat(inds(tensor), indsC, cpos2) + # TODO: Add a `reshape` for `ArrayStorageTensor`. + return NDTensors.tensor(reshape(output_tensor_storage, dims(output_tensor_inds)), output_tensor_inds) + end + return invalid_combiner_contraction_error( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl new file mode 100644 index 0000000000..04ae4501fc --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl @@ -0,0 +1,10 @@ +function qr(A::ArrayStorageTensor) + Q, R = qr(storage(A)) + Q = convert(typeof(R), Q) + i, j = inds(A) + q = size(A, 1) < size(A, 2) ? i : j + q = sim(q) + Qₜ = tensor(Q, (i, q)) + Rₜ = tensor(R, (dag(q), j)) + return Qₜ, Rₜ +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index 3e128d0abd..2d1e42a6ea 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,16 +1,141 @@ -function LinearAlgebra.svd(tens::MatrixStorageTensor) - F = svd(storage(tens)) - U, S, V = F.U, F.S, F.Vt - i, j = inds(tens) - # TODO: Make this more general with a `similar_ind` function, - # so the dimension can be determined from the length of `S`. - min_ij = dim(i) ≤ dim(j) ? i : j - α = sim(min_ij) # similar_ind(i, space(S)) - β = sim(min_ij) # similar_ind(i, space(S)) - Utensor = tensor(U, (i, α)) - # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. - # Used for now to avoid introducing wrapper types. - Stensor = tensor(Diagonal(S), (α, β)) - Vtensor = tensor(V, (β, j)) - return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) +# TODO: Rewrite this function to be more modern: +# 1. List keyword arguments in function signature. +# 2. Remove `Dense` and `Diag`. +# 3. Output `Spectrum` as a keyword argument that gets overwritten. +# 4. Dispatch on `alg`. +# 5. Remove keyword argument deprecations. +# 6. Make this into two layers, one that handles indices and one that works with `Matrix`. +""" + svd(T::DenseTensor{<:Number,2}; kwargs...) + +svd of an order-2 DenseTensor +""" +function svd(T::ArrayStorageTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} + truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) + + # + # Keyword argument deprecations + # + use_absolute_cutoff = false + if haskey(kwargs, :absoluteCutoff) + @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" + use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) + end + + use_relative_cutoff = true + if haskey(kwargs, :doRelCutoff) + @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" + use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) + end + + if haskey(kwargs, :fastsvd) || haskey(kwargs, :fastSVD) + error( + "In svd, fastsvd/fastSVD keyword arguments are removed in favor of alg, see documentation for more details.", + ) + end + + maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) + mindim::Int = get(kwargs, :mindim, 1) + cutoff = get(kwargs, :cutoff, 0.0) + use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) + use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) + alg::String = get(kwargs, :alg, "divide_and_conquer") + + #@timeit_debug timer "dense svd" begin + if alg == "divide_and_conquer" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) + if isnothing(MUSV) + # If "divide_and_conquer" fails, try "qr_iteration" + alg = "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + end + elseif alg == "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + elseif alg == "recursive" + MUSV = svd_recursive(matrix(T)) + elseif alg == "QRAlgorithm" || alg == "JacobiAlgorithm" + MUSV = svd_catch_error(matrix(T); alg=alg) + else + error( + "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", + ) + end + if isnothing(MUSV) + if any(isnan, T) + println("SVD failed, the matrix you were trying to SVD contains NaNs.") + else + println(lapack_svd_error_message(alg)) + end + return nothing + end + MU, MS, MV = MUSV + conj!(MV) + #end # @timeit_debug + + P = MS .^ 2 + if truncate + P, truncerr, _ = truncate!!( + P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + ) + else + truncerr = 0.0 + end + spec = Spectrum(P, truncerr) + dS = length(P) + if dS < length(MS) + MU = MU[:, 1:dS] + # Fails on some GPU backends like Metal. + # resize!(MS, dS) + MS = MS[1:dS] + MV = MV[:, 1:dS] + end + + # Make the new indices to go onto U and V + u = eltype(IndsT)(dS) + v = eltype(IndsT)(dS) + Uinds = IndsT((ind(T, 1), u)) + Sinds = IndsT((u, v)) + Vinds = IndsT((ind(T, 2), v)) + U = tensor(MU, Uinds) + S = tensor(Diag(MS), Sinds) + V = tensor(MV, Vinds) + return U, S, V, spec end + +## function svd( +## tens::ArrayStorageTensor; +## alg, +## which_decomp, +## tags, +## mindim, +## cutoff, +## eigen_perturbation, +## normalize, +## maxdim, +## ) +## error("Not implemented") +## F = svd(storage(tens)) +## U, S, V = F.U, F.S, F.Vt +## i, j = inds(tens) +## # TODO: Make this more general with a `similar_ind` function, +## # so the dimension can be determined from the length of `S`. +## min_ij = dim(i) ≤ dim(j) ? i : j +## α = sim(min_ij) # similar_ind(i, space(S)) +## β = sim(min_ij) # similar_ind(i, space(S)) +## Utensor = tensor(U, (i, α)) +## # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. +## # Used for now to avoid introducing wrapper types. +## Stensor = tensor(Diagonal(S), (α, β)) +## Vtensor = tensor(V, (β, j)) +## return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) +## end diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index d4a64c9085..21ca66ccc8 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -32,7 +32,7 @@ per row/column, otherwise it fails. This assumption makes it so the result can be computed from the dense svds of seperate blocks. """ -function svd(T::BlockSparseMatrix{ElT}; kwargs...) where {ElT} +function svd(T::Tensor{ElT,2,<:BlockSparse}; kwargs...) where {ElT} alg::String = get(kwargs, :alg, "divide_and_conquer") min_blockdim::Int = get(kwargs, :min_blockdim, 0) truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) diff --git a/src/itensor.jl b/src/itensor.jl index a56c219aad..0c6cbba04f 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -125,6 +125,11 @@ ITensor(is, st::TensorStorage)::ITensor = ITensor(NeverAlias(), st, is) itensor(T::ITensor) = T ITensor(T::ITensor) = copy(T) +# TODO: Delete once `TensorStorage` is removed. +function NDTensors.to_arraystorage(x::ITensor) + return itensor(NDTensors.to_arraystorage(tensor(x))) +end + """ itensor(args...; kwargs...) diff --git a/src/mps/abstractmps.jl b/src/mps/abstractmps.jl index e1028e90d8..a6309e7d2f 100644 --- a/src/mps/abstractmps.jl +++ b/src/mps/abstractmps.jl @@ -16,6 +16,11 @@ size(m::AbstractMPS) = size(data(m)) ndims(m::AbstractMPS) = ndims(data(m)) +# TODO: Delete once `TensorStorage` is removed. +function NDTensors.to_arraystorage(x::AbstractMPS) + return NDTensors.to_arraystorage.(x) +end + function promote_itensor_eltype(m::Vector{ITensor}) T = isassigned(m, 1) ? eltype(m[1]) : Number for n in 2:length(m) From 2a6b8da165fd44169e598ca4b940ed8f48ac5cfe Mon Sep 17 00:00:00 2001 From: mtfishman Date: Fri, 27 Oct 2023 16:28:20 -0400 Subject: [PATCH 03/36] Format --- .../src/arraystorage/arraystorage/tensor/combiner.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl index 324f7dbf15..46e728daff 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl @@ -55,18 +55,24 @@ function contract!!( # TODO: Add a `reshape` for `ArrayStorageTensor`. ## tensorp = reshape(output_tensor, NDTensors.permute(inds(tensor), perm)) tensorp_inds = permute(inds(tensor), perm) - tensorp = NDTensors.tensor(reshape(storage(output_tensor), dims(tensorp_inds)), tensorp_inds) + tensorp = NDTensors.tensor( + reshape(storage(output_tensor), dims(tensorp_inds)), tensorp_inds + ) permutedims!(tensorp, tensor, perm) # TODO: Add a `reshape` for `ArrayStorageTensor`. ## reshape(tensorp, output_tensor_inds) - return NDTensors.tensor(reshape(storage(tensorp), dims(output_tensor_inds)), output_tensor_inds) + return NDTensors.tensor( + reshape(storage(tensorp), dims(output_tensor_inds)), output_tensor_inds + ) else # Uncombining cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) output_tensor_storage = copy(storage(tensor)) indsC = deleteat(inds(combiner_tensor), cpos1) output_tensor_inds = insertat(inds(tensor), indsC, cpos2) # TODO: Add a `reshape` for `ArrayStorageTensor`. - return NDTensors.tensor(reshape(output_tensor_storage, dims(output_tensor_inds)), output_tensor_inds) + return NDTensors.tensor( + reshape(output_tensor_storage, dims(output_tensor_inds)), output_tensor_inds + ) end return invalid_combiner_contraction_error( tensor, tensor_labels, combiner_tensor, combiner_tensor_labels From 460082d56626f42af758a03de05a15d1d9dc7b39 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Fri, 27 Oct 2023 16:40:57 -0400 Subject: [PATCH 04/36] New similar definition --- NDTensors/src/abstractarray/similar.jl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl index 4918e5150c..93bb1bf7aa 100644 --- a/NDTensors/src/abstractarray/similar.jl +++ b/NDTensors/src/abstractarray/similar.jl @@ -77,6 +77,11 @@ function similar(array::AbstractArray, eltype::Type, dims::Tuple) return NDTensors.similar(similartype(typeof(array), eltype), dims) end +# NDTensors.similar +function similar(array::AbstractArray, eltype::Type, dims::Int) + return NDTensors.similar(similartype(typeof(array), eltype), dims) +end + # NDTensors.similar similar(array::AbstractArray, dims::Tuple) = NDTensors.similar(typeof(array), dims) From ffe3f66fa7dd31d1e8f561991771fc2cea97d660 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 11:21:17 -0400 Subject: [PATCH 05/36] Define contraction of ArrayStorage with Diag --- NDTensors/src/NDTensors.jl | 4 + NDTensors/src/abstractarray/fill.jl | 4 +- .../arraystorage/storage/contract.jl | 16 +- .../arraystorage/tensor/contract.jl | 11 +- .../arraystorage/arraystorage/tensor/zeros.jl | 3 + .../diagonalarray/tensor/contract.jl | 144 ++++++++++++++++++ 6 files changed, 170 insertions(+), 12 deletions(-) create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl create mode 100644 NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index c7ace46322..26ff06aab5 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -141,6 +141,7 @@ include("arraystorage/arraystorage/storage/contract.jl") include("arraystorage/arraystorage/storage/combiner.jl") include("arraystorage/arraystorage/tensor/arraystorage.jl") +include("arraystorage/arraystorage/tensor/zeros.jl") include("arraystorage/arraystorage/tensor/indexing.jl") include("arraystorage/arraystorage/tensor/permutedims.jl") include("arraystorage/arraystorage/tensor/mul.jl") @@ -149,6 +150,9 @@ include("arraystorage/arraystorage/tensor/qr.jl") include("arraystorage/arraystorage/tensor/svd.jl") include("arraystorage/arraystorage/tensor/combiner.jl") +# DiagonalArray storage +include("arraystorage/diagonalarray/tensor/contract.jl") + # BlockSparseArray storage include("arraystorage/blocksparsearray/storage/contract.jl") diff --git a/NDTensors/src/abstractarray/fill.jl b/NDTensors/src/abstractarray/fill.jl index 146459a4ce..eab9b830a8 100644 --- a/NDTensors/src/abstractarray/fill.jl +++ b/NDTensors/src/abstractarray/fill.jl @@ -8,10 +8,10 @@ function generic_randn( return randn!(rng, data) end -function generic_zeros(arraytype::Type{<:AbstractArray}, dim::Integer=0) +function generic_zeros(arraytype::Type{<:AbstractArray}, dims...) arraytype_specified = set_unspecified_parameters( leaf_parenttype(arraytype), DefaultParameters() ) ElT = eltype(arraytype_specified) - return fill!(similar(arraytype_specified, dim), zero(ElT)) + return fill!(similar(arraytype_specified, dims...), zero(ElT)) end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl index b88628b326..0770325f9b 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl @@ -40,17 +40,21 @@ function contraction_output( end # Required interface for specific AbstractArray types +# TODO: Define `default_α` and `default_β`. +# TODO: Define this as a `ttgt` or `matricize` backend. function contract!( - arrayR::MatrixOrArrayStorage, - labelsR, + array_dest::MatrixOrArrayStorage, + labels_dest, array1::MatrixOrArrayStorage, labels1, array2::MatrixOrArrayStorage, labels2, + α=one(eltype(array_dest)), + β=zero(eltype(array_dest)); ) - props = ContractionProperties(labels1, labels2, labelsR) - compute_contraction_properties!(props, array1, array2, arrayR) + props = ContractionProperties(labels1, labels2, labels_dest) + compute_contraction_properties!(props, array1, array2, array_dest) # TODO: Change this to just `contract!`, or maybe `contract_ttgt!`? - _contract!(arrayR, array1, array2, props) - return arrayR + _contract!(array_dest, array1, array2, props, α, β) + return array_dest end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl index 80ab295b65..d652d31815 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl @@ -6,14 +6,17 @@ function contraction_output( return NDTensors.similar(tensortypeR, indsR) end +# TODO: Define `default_α` and `default_β`. function contract!( - tensorR::MatrixOrArrayStorageTensor, - labelsR, + tensor_dest::MatrixOrArrayStorageTensor, + labels_dest, tensor1::MatrixOrArrayStorageTensor, labels1, tensor2::MatrixOrArrayStorageTensor, labels2, + α=one(eltype(tensor_dest)), + β=zero(eltype(tensor_dest)); ) - contract!(storage(tensorR), labelsR, storage(tensor1), labels1, storage(tensor2), labels2) - return tensorR + contract!(storage(tensor_dest), labels_dest, storage(tensor1), labels1, storage(tensor2), labels2, α, β) + return tensor_dest end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl new file mode 100644 index 0000000000..8ffb744618 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl @@ -0,0 +1,3 @@ +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds) + return tensor(generic_zeros(storagetype(tensortype), dims(inds)), inds) +end diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl new file mode 100644 index 0000000000..3f07c71ab9 --- /dev/null +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -0,0 +1,144 @@ +function promote_rule( + storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag} +) + return promote_type(storagetype1, datatype(storagetype2)) +end + +function contraction_output_type( + tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:ArrayStorageTensor}, indsR +) + return similartype(promote_type(tensortype1, tensortype2), indsR) +end + +function contraction_output_type( + tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR +) + return contraction_output_type(tensortype2, tensortype1, indsR) +end + +# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. +function contract!( + C::ArrayStorageTensor{ElC,NC}, + Clabels, + A::DiagTensor{ElA,NA}, + Alabels, + B::ArrayStorageTensor{ElB,NB}, + Blabels, + α::Number=one(ElC), + β::Number=zero(ElC); + convert_to_dense::Bool=true, +) where {ElA,NA,ElB,NB,ElC,NC} + #@timeit_debug timer "diag-dense contract!" begin + if all(i -> i < 0, Blabels) + # If all of B is contracted + # TODO: can also check NC+NB==NA + min_dim = min(minimum(dims(A)), minimum(dims(B))) + if length(Clabels) == 0 + # all indices are summed over, just add the product of the diagonal + # elements of A and B + # Assumes C starts set to 0 + c₁ = zero(ElC) + for i in 1:min_dim + c₁ += getdiagindex(A, i) * getdiagindex(B, i) + end + setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1) + else + # not all indices are summed over, set the diagonals of the result + # to the product of the diagonals of A and B + # TODO: should we make this return a Diag storage? + for i in 1:min_dim + setdiagindex!( + C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i + ) + end + end + else + # Most general contraction + if convert_to_dense + # TODO: Define a conversion from `DiagonalArray` to `Array`. + contract!(C, Clabels, to_arraystorage(dense(A)), Alabels, B, Blabels, α, β) + else + if !isone(α) || !iszero(β) + error( + "`contract!(::ArrayStorageTensor, ::DiagTensor, ::ArrayStorageTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", + ) + end + astarts = zeros(Int, length(Alabels)) + bstart = 0 + cstart = 0 + b_cstride = 0 + nbu = 0 + for ib in 1:length(Blabels) + ia = findfirst(==(Blabels[ib]), Alabels) + if !isnothing(ia) + b_cstride += stride(B, ib) + bstart += astarts[ia] * stride(B, ib) + else + nbu += 1 + end + end + + c_cstride = 0 + for ic in 1:length(Clabels) + ia = findfirst(==(Clabels[ic]), Alabels) + if !isnothing(ia) + c_cstride += stride(C, ic) + cstart += astarts[ia] * stride(C, ic) + end + end + + # strides of the uncontracted dimensions of + # B + bustride = zeros(Int, nbu) + custride = zeros(Int, nbu) + # size of the uncontracted dimensions of + # B, to be used in CartesianIndices + busize = zeros(Int, nbu) + n = 1 + for ib in 1:length(Blabels) + if Blabels[ib] > 0 + bustride[n] = stride(B, ib) + busize[n] = size(B, ib) + ic = findfirst(==(Blabels[ib]), Clabels) + custride[n] = stride(C, ic) + n += 1 + end + end + + boffset_orig = 1 - sum(strides(B)) + coffset_orig = 1 - sum(strides(C)) + cartesian_inds = CartesianIndices(Tuple(busize)) + for inds in cartesian_inds + boffset = boffset_orig + coffset = coffset_orig + for i in 1:nbu + ii = inds[i] + boffset += ii * bustride[i] + coffset += ii * custride[i] + end + c = zero(ElC) + for j in 1:diaglength(A) + # With α == 0 && β == 1 + C[cstart + j * c_cstride + coffset] += + getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] + # XXX: not sure if this is correct + #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] + end + end + end + end + #end # @timeit +end + +function contract!( + C::ArrayStorageTensor, + Clabels, + A::ArrayStorageTensor, + Alabels, + B::DiagTensor, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)), +) + return contract!(C, Clabels, B, Blabels, A, Alabels, α, β) +end From 9c571bcc82b017c95cc667bd487d2222a38c36ae Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 11:24:20 -0400 Subject: [PATCH 06/36] Format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- .../src/arraystorage/arraystorage/tensor/contract.jl | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl index d652d31815..309893bc31 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/contract.jl @@ -17,6 +17,15 @@ function contract!( α=one(eltype(tensor_dest)), β=zero(eltype(tensor_dest)); ) - contract!(storage(tensor_dest), labels_dest, storage(tensor1), labels1, storage(tensor2), labels2, α, β) + contract!( + storage(tensor_dest), + labels_dest, + storage(tensor1), + labels1, + storage(tensor2), + labels2, + α, + β, + ) return tensor_dest end From cc3136c5c347cb176223751168198fcede3aa8d5 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 11:24:30 -0400 Subject: [PATCH 07/36] Format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl index 3f07c71ab9..e7011896a1 100644 --- a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -1,6 +1,4 @@ -function promote_rule( - storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag} -) +function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag}) return promote_type(storagetype1, datatype(storagetype2)) end From 74d722d7396d85896bdd985c61de79eaf754fcee Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 11:30:14 -0400 Subject: [PATCH 08/36] Reorganize Combiner code --- NDTensors/src/NDTensors.jl | 8 +++++--- .../storage/combiner.jl => combiner/storage/contract.jl} | 0 .../tensor/combiner.jl => combiner/tensor/contract.jl} | 0 3 files changed, 5 insertions(+), 3 deletions(-) rename NDTensors/src/arraystorage/{arraystorage/storage/combiner.jl => combiner/storage/contract.jl} (100%) rename NDTensors/src/arraystorage/{arraystorage/tensor/combiner.jl => combiner/tensor/contract.jl} (100%) diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 26ff06aab5..a3619a7542 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -132,13 +132,11 @@ include("empty/adapt.jl") ##################################### # Array Tensor (experimental) -# TODO: Move to `Experimental` module? # include("arraystorage/arraystorage/storage/arraystorage.jl") include("arraystorage/arraystorage/storage/conj.jl") include("arraystorage/arraystorage/storage/permutedims.jl") include("arraystorage/arraystorage/storage/contract.jl") -include("arraystorage/arraystorage/storage/combiner.jl") include("arraystorage/arraystorage/tensor/arraystorage.jl") include("arraystorage/arraystorage/tensor/zeros.jl") @@ -148,7 +146,6 @@ include("arraystorage/arraystorage/tensor/mul.jl") include("arraystorage/arraystorage/tensor/contract.jl") include("arraystorage/arraystorage/tensor/qr.jl") include("arraystorage/arraystorage/tensor/svd.jl") -include("arraystorage/arraystorage/tensor/combiner.jl") # DiagonalArray storage include("arraystorage/diagonalarray/tensor/contract.jl") @@ -156,6 +153,11 @@ include("arraystorage/diagonalarray/tensor/contract.jl") # BlockSparseArray storage include("arraystorage/blocksparsearray/storage/contract.jl") +# Combiner storage +include("arraystorage/combiner/storage/contract.jl") + +include("arraystorage/combiner/tensor/contract.jl") + ##################################### # Deprecations # diff --git a/NDTensors/src/arraystorage/arraystorage/storage/combiner.jl b/NDTensors/src/arraystorage/combiner/storage/contract.jl similarity index 100% rename from NDTensors/src/arraystorage/arraystorage/storage/combiner.jl rename to NDTensors/src/arraystorage/combiner/storage/contract.jl diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl b/NDTensors/src/arraystorage/combiner/tensor/contract.jl similarity index 100% rename from NDTensors/src/arraystorage/arraystorage/tensor/combiner.jl rename to NDTensors/src/arraystorage/combiner/tensor/contract.jl From a40e4a3d31bfc8cac96f2799ac59af46987544ba Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 11:40:37 -0400 Subject: [PATCH 09/36] Get eigen working with Array storage --- NDTensors/src/NDTensors.jl | 1 + .../arraystorage/arraystorage/tensor/eigen.jl | 71 +++++++++++++++++++ .../arraystorage/arraystorage/tensor/svd.jl | 12 ++-- NDTensors/src/blocksparse/linearalgebra.jl | 2 +- 4 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index a3619a7542..446b3eedb2 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -145,6 +145,7 @@ include("arraystorage/arraystorage/tensor/permutedims.jl") include("arraystorage/arraystorage/tensor/mul.jl") include("arraystorage/arraystorage/tensor/contract.jl") include("arraystorage/arraystorage/tensor/qr.jl") +include("arraystorage/arraystorage/tensor/eigen.jl") include("arraystorage/arraystorage/tensor/svd.jl") # DiagonalArray storage diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl new file mode 100644 index 0000000000..bab339f849 --- /dev/null +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -0,0 +1,71 @@ +# TODO: Rewrite this function to be more modern: +# 1. List keyword arguments in function signature. +# 2. Output `Spectrum` as a keyword argument that gets overwritten. +# 3. Dispatch on `alg`. +# 4. Remove keyword argument deprecations. +# 5. Make this into two layers, one that handles indices and one that works with `Matrix`. +# 6. Use `eltype` instead of `where`. +function eigen( + T::Hermitian{ElT,<:ArrayStorageTensor{ElT,2,IndsT}}; kwargs... +) where {ElT<:Union{Real,Complex},IndsT} + # Keyword argument deprecations + use_absolute_cutoff = false + if haskey(kwargs, :absoluteCutoff) + @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" + use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) + end + use_relative_cutoff = true + if haskey(kwargs, :doRelCutoff) + @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" + use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) + end + + truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) + maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) + mindim::Int = get(kwargs, :mindim, 1) + cutoff::Union{Nothing,Float64} = get(kwargs, :cutoff, 0.0) + use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) + use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) + + matrixT = matrix(T) + ## TODO Here I am calling parent to ensure that the correct `any` function + ## is envoked for non-cpu matrices + if any(!isfinite, parent(matrixT)) + throw( + ArgumentError( + "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" + ), + ) + end + + DM, VM = eigen(matrixT) + + # Sort by largest to smallest eigenvalues + # TODO: Replace `cpu` with `leaf_parenttype` dispatch. + p = sortperm(cpu(DM); rev=true, by=abs) + DM = DM[p] + VM = VM[:, p] + + if truncate + DM, truncerr, _ = truncate!!( + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + ) + dD = length(DM) + if dD < size(VM, 2) + VM = VM[:, 1:dD] + end + else + dD = length(DM) + truncerr = 0.0 + end + spec = Spectrum(DM, truncerr) + + # Make the new indices to go onto V + l = eltype(IndsT)(dD) + r = eltype(IndsT)(dD) + Vinds = IndsT((dag(ind(T, 2)), dag(r))) + Dinds = IndsT((l, dag(r))) + V = tensor(VM, Vinds) + D = tensor(Diag(DM), Dinds) + return D, V, spec +end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index 2d1e42a6ea..ae256bbc08 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,12 +1,12 @@ # TODO: Rewrite this function to be more modern: # 1. List keyword arguments in function signature. -# 2. Remove `Dense` and `Diag`. -# 3. Output `Spectrum` as a keyword argument that gets overwritten. -# 4. Dispatch on `alg`. -# 5. Remove keyword argument deprecations. -# 6. Make this into two layers, one that handles indices and one that works with `Matrix`. +# 2. Output `Spectrum` as a keyword argument that gets overwritten. +# 3. Dispatch on `alg`. +# 4. Remove keyword argument deprecations. +# 5. Make this into two layers, one that handles indices and one that works with `Matrix`. +# 6. Use `eltype` instead of `where`. """ - svd(T::DenseTensor{<:Number,2}; kwargs...) + svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) svd of an order-2 DenseTensor """ diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index b973603475..7301f071ca 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -207,7 +207,7 @@ _eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(El _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) function eigen( - T::Union{Hermitian{ElT,<:BlockSparseMatrix{ElT}},BlockSparseMatrix{ElT}}; kwargs... + T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; kwargs... ) where {ElT<:Union{Real,Complex}} truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) From 7c0509b8cd1fc3b0b62f65e62358563bd1bb77ba Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 11:49:56 -0400 Subject: [PATCH 10/36] Improve eigen code a bit --- .../arraystorage/arraystorage/tensor/eigen.jl | 59 ++++++++++--------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl index bab339f849..97580e2b29 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -1,31 +1,28 @@ # TODO: Rewrite this function to be more modern: # 1. List keyword arguments in function signature. # 2. Output `Spectrum` as a keyword argument that gets overwritten. -# 3. Dispatch on `alg`. -# 4. Remove keyword argument deprecations. -# 5. Make this into two layers, one that handles indices and one that works with `Matrix`. -# 6. Use `eltype` instead of `where`. +# 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`. function eigen( - T::Hermitian{ElT,<:ArrayStorageTensor{ElT,2,IndsT}}; kwargs... -) where {ElT<:Union{Real,Complex},IndsT} - # Keyword argument deprecations - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) - maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) - mindim::Int = get(kwargs, :mindim, 1) - cutoff::Union{Nothing,Float64} = get(kwargs, :cutoff, 0.0) - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) + T::Hermitian{<:Any,<:ArrayStorageTensor}; + use_absolute_cutoff=false, + use_relative_cutoff=true, + maxdim=nothing, + mindim=1, + cutoff=nothing, + # These are getting passed erroneously. + # TODO: Make sure they don't get passed down + # to here. + ishermitian=nothing, + which_decomp=nothing, + tags=nothing, + eigen_perturbation=nothing, + ortho=nothing, + normalize=nothing, + svd_alg=nothing, +) + truncate = !isnothing(maxdim) || !isnothing(cutoff) + maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim + cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff matrixT = matrix(T) ## TODO Here I am calling parent to ensure that the correct `any` function @@ -48,7 +45,7 @@ function eigen( if truncate DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) dD = length(DM) if dD < size(VM, 2) @@ -61,11 +58,15 @@ function eigen( spec = Spectrum(DM, truncerr) # Make the new indices to go onto V - l = eltype(IndsT)(dD) - r = eltype(IndsT)(dD) - Vinds = IndsT((dag(ind(T, 2)), dag(r))) - Dinds = IndsT((l, dag(r))) + # TODO: Put in a separate function, such as + # `rewrap_inds` or something like that. + indstype = typeof(inds(T)) + l = eltype(indstype)(dD) + r = eltype(indstype)(dD) + Vinds = indstype((dag(ind(T, 2)), dag(r))) + Dinds = indstype((l, dag(r))) V = tensor(VM, Vinds) + # TODO: Replace with `DiagonalArray`. D = tensor(Diag(DM), Dinds) return D, V, spec end From 6febc37bb6be06ff2383f6696b1886277abf42f1 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 11:50:53 -0400 Subject: [PATCH 11/36] Format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- NDTensors/src/blocksparse/linearalgebra.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 7301f071ca..70bf5c7972 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -207,7 +207,8 @@ _eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(El _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) function eigen( - T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; kwargs... + T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; + kwargs..., ) where {ElT<:Union{Real,Complex}} truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) From 2208569e0ee6dca0e998dd8d82fb675ad92fcc8d Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 13:33:05 -0400 Subject: [PATCH 12/36] Fix tests --- .../arraystorage/arraystorage/tensor/eigen.jl | 10 +- .../arraystorage/arraystorage/tensor/svd.jl | 106 ++++++------------ .../arraystorage/arraystorage/tensor/zeros.jl | 12 +- NDTensors/test/arraytensor/array.jl | 6 +- .../base/test_arraystorage.jl | 31 +++++ 5 files changed, 87 insertions(+), 78 deletions(-) create mode 100644 test/ITensorLegacyMPS/base/test_arraystorage.jl diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl index 97580e2b29..d3147dcce6 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -4,24 +4,26 @@ # 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`. function eigen( T::Hermitian{<:Any,<:ArrayStorageTensor}; - use_absolute_cutoff=false, - use_relative_cutoff=true, maxdim=nothing, mindim=1, cutoff=nothing, + use_absolute_cutoff=false, + use_relative_cutoff=true, # These are getting passed erroneously. # TODO: Make sure they don't get passed down # to here. - ishermitian=nothing, which_decomp=nothing, tags=nothing, eigen_perturbation=nothing, - ortho=nothing, normalize=nothing, + ishermitian=nothing, + ortho=nothing, svd_alg=nothing, ) truncate = !isnothing(maxdim) || !isnothing(cutoff) + # TODO: Define `default_maxdim(T)`. maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim + # TODO: Define `default_cutoff(T)`. cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff matrixT = matrix(T) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index ae256bbc08..f00543959e 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,47 +1,35 @@ # TODO: Rewrite this function to be more modern: -# 1. List keyword arguments in function signature. -# 2. Output `Spectrum` as a keyword argument that gets overwritten. -# 3. Dispatch on `alg`. -# 4. Remove keyword argument deprecations. -# 5. Make this into two layers, one that handles indices and one that works with `Matrix`. -# 6. Use `eltype` instead of `where`. +# 1. Output `Spectrum` as a keyword argument that gets overwritten. +# 2. Dispatch on `alg`. +# 3. Make this into two layers, one that handles indices and one that works with `Matrix`. """ svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) svd of an order-2 DenseTensor """ -function svd(T::ArrayStorageTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) +function svd( + T::ArrayStorageTensor; + maxdim=nothing, + mindim=1, + cutoff=nothing, + alg="divide_and_conquer", # TODO: Define `default_alg(T)` + use_absolute_cutoff=false, + use_relative_cutoff=true, + # These are getting passed erroneously. + # TODO: Make sure they don't get passed down + # to here. + which_decomp=nothing, + tags=nothing, + eigen_perturbation=nothing, + normalize=nothing, +) + truncate = !isnothing(maxdim) || !isnothing(cutoff) + # TODO: Define `default_maxdim(T)`. + maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim + # TODO: Define `default_cutoff(T)`. + cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff - # - # Keyword argument deprecations - # - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - if haskey(kwargs, :fastsvd) || haskey(kwargs, :fastSVD) - error( - "In svd, fastsvd/fastSVD keyword arguments are removed in favor of alg, see documentation for more details.", - ) - end - - maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) - mindim::Int = get(kwargs, :mindim, 1) - cutoff = get(kwargs, :cutoff, 0.0) - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) - alg::String = get(kwargs, :alg, "divide_and_conquer") - - #@timeit_debug timer "dense svd" begin + # TODO: Dispatch on `Algorithm(alg)`. if alg == "divide_and_conquer" MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) if isnothing(MUSV) @@ -80,12 +68,11 @@ function svd(T::ArrayStorageTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} end MU, MS, MV = MUSV conj!(MV) - #end # @timeit_debug P = MS .^ 2 if truncate P, truncerr, _ = truncate!!( - P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) else truncerr = 0.0 @@ -101,41 +88,16 @@ function svd(T::ArrayStorageTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} end # Make the new indices to go onto U and V - u = eltype(IndsT)(dS) - v = eltype(IndsT)(dS) - Uinds = IndsT((ind(T, 1), u)) - Sinds = IndsT((u, v)) - Vinds = IndsT((ind(T, 2), v)) + # TODO: Put in a separate function, such as + # `rewrap_inds` or something like that. + indstype = typeof(inds(T)) + u = eltype(indstype)(dS) + v = eltype(indstype)(dS) + Uinds = indstype((ind(T, 1), u)) + Sinds = indstype((u, v)) + Vinds = indstype((ind(T, 2), v)) U = tensor(MU, Uinds) S = tensor(Diag(MS), Sinds) V = tensor(MV, Vinds) return U, S, V, spec end - -## function svd( -## tens::ArrayStorageTensor; -## alg, -## which_decomp, -## tags, -## mindim, -## cutoff, -## eigen_perturbation, -## normalize, -## maxdim, -## ) -## error("Not implemented") -## F = svd(storage(tens)) -## U, S, V = F.U, F.S, F.Vt -## i, j = inds(tens) -## # TODO: Make this more general with a `similar_ind` function, -## # so the dimension can be determined from the length of `S`. -## min_ij = dim(i) ≤ dim(j) ? i : j -## α = sim(min_ij) # similar_ind(i, space(S)) -## β = sim(min_ij) # similar_ind(i, space(S)) -## Utensor = tensor(U, (i, α)) -## # TODO: Remove conversion to `Diagonal` to make more general, or make a generic `Diagonal` concept that works for `BlockSparseArray`. -## # Used for now to avoid introducing wrapper types. -## Stensor = tensor(Diagonal(S), (α, β)) -## Vtensor = tensor(V, (β, j)) -## return Utensor, Stensor, Vtensor, Spectrum(nothing, 0.0) -## end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl index 8ffb744618..66759892e4 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl @@ -1,3 +1,13 @@ -function zeros(tensortype::Type{<:ArrayStorageTensor}, inds) +function _zeros(tensortype::Type{<:ArrayStorageTensor}, inds) return tensor(generic_zeros(storagetype(tensortype), dims(inds)), inds) end + +# To resolve ambiguity error with `Base.zeros`. +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds) + return _zeros(tensortype, inds) +end + +# To resolve ambiguity error with `Base.zeros`. +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{Vararg{Integer}}) + return _zeros(tensortype, inds) +end diff --git a/NDTensors/test/arraytensor/array.jl b/NDTensors/test/arraytensor/array.jl index 39809d0db3..bbe8a82500 100644 --- a/NDTensors/test/arraytensor/array.jl +++ b/NDTensors/test/arraytensor/array.jl @@ -35,7 +35,11 @@ using NDTensors: storage, storagetype @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) U, S, V = svd(T1) - @test U * S * V ≈ T1 + + # TODO: Should this work? Currently broken. + @test_broken U * S * V ≈ T1 + # TODO: Should this require labels, or use existing labels? + @test contract(contract(U, (1, -1), S, (-1, 2)), (1, -1), V, (2, -1)) ≈ T1 T12 = contract(T1, (1, -1), T2, (-1, 2)) @test T12 ≈ T1 * T2 diff --git a/test/ITensorLegacyMPS/base/test_arraystorage.jl b/test/ITensorLegacyMPS/base/test_arraystorage.jl new file mode 100644 index 0000000000..3d4bad4cd4 --- /dev/null +++ b/test/ITensorLegacyMPS/base/test_arraystorage.jl @@ -0,0 +1,31 @@ +using ITensors +using NDTensors +using Test + +@testset "Test ArrayStorage DMRG" begin + n = 4 + conserve_qns = false + s = siteinds("S=1/2", n; conserve_qns) + heisenberg_opsum = function (n) + os = OpSum() + for j in 1:(n - 1) + os += "Sz", j, "Sz", j + 1 + os += 0.5, "S+", j, "S-", j + 1 + os += 0.5, "S-", j, "S+", j + 1 + end + return os + end + H = MPO(heisenberg_opsum(n), s) + ψ = randomMPS(s, j -> isodd(j) ? "↑" : "↓"; linkdims=4) + dmrg_kwargs = (; + nsweeps=2, + cutoff=[1e-4, 1e-12], + maxdim=10, + outputlevel=0, + ) + e1, ψ1 = dmrg(NDTensors.to_arraystorage.((H, ψ))...; dmrg_kwargs...) + e2, ψ2 = dmrg(H, ψ; dmrg_kwargs...) + @test e1 ≈ e2 +end + + From 45e8b87c454e3a6547d8129ba51ec84dd2ecd621 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 13:37:45 -0400 Subject: [PATCH 13/36] Format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- test/ITensorLegacyMPS/base/test_arraystorage.jl | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/test/ITensorLegacyMPS/base/test_arraystorage.jl b/test/ITensorLegacyMPS/base/test_arraystorage.jl index 3d4bad4cd4..26a0f82abd 100644 --- a/test/ITensorLegacyMPS/base/test_arraystorage.jl +++ b/test/ITensorLegacyMPS/base/test_arraystorage.jl @@ -17,12 +17,7 @@ using Test end H = MPO(heisenberg_opsum(n), s) ψ = randomMPS(s, j -> isodd(j) ? "↑" : "↓"; linkdims=4) - dmrg_kwargs = (; - nsweeps=2, - cutoff=[1e-4, 1e-12], - maxdim=10, - outputlevel=0, - ) + dmrg_kwargs = (; nsweeps=2, cutoff=[1e-4, 1e-12], maxdim=10, outputlevel=0) e1, ψ1 = dmrg(NDTensors.to_arraystorage.((H, ψ))...; dmrg_kwargs...) e2, ψ2 = dmrg(H, ψ; dmrg_kwargs...) @test e1 ≈ e2 From 2801f7a6921a89490a747ff31c276d2634558cd4 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Mon, 30 Oct 2023 13:37:57 -0400 Subject: [PATCH 14/36] Format Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- test/ITensorLegacyMPS/base/test_arraystorage.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/test/ITensorLegacyMPS/base/test_arraystorage.jl b/test/ITensorLegacyMPS/base/test_arraystorage.jl index 26a0f82abd..3da97e2e2b 100644 --- a/test/ITensorLegacyMPS/base/test_arraystorage.jl +++ b/test/ITensorLegacyMPS/base/test_arraystorage.jl @@ -22,5 +22,3 @@ using Test e2, ψ2 = dmrg(H, ψ; dmrg_kwargs...) @test e1 ≈ e2 end - - From a07f8081cd60080afaa47579a97da561ec4a43ad Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 14:45:22 -0400 Subject: [PATCH 15/36] Fix tests --- .../src/arraystorage/arraystorage/tensor/arraystorage.jl | 7 +------ test/ITensorLegacyMPS/base/test_arraystorage.jl | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl index 72fbc1d535..9419d51e05 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/arraystorage.jl @@ -16,12 +16,7 @@ end array(tensor::MatrixOrArrayStorageTensor) = storage(tensor) # Linear algebra (matrix algebra) +# TODO: Remove `Base.`? Is it imported? function Base.adjoint(tens::MatrixStorageTensor) return tensor(adjoint(storage(tens)), reverse(inds(tens))) end - -# Conversion from a tensor with TensorStorage storage -# to AbstractArray storage, -function to_arraytensor(x::DenseTensor) - return tensor(reshape(data(x), size(x)), inds(x)) -end diff --git a/test/ITensorLegacyMPS/base/test_arraystorage.jl b/test/ITensorLegacyMPS/base/test_arraystorage.jl index 3d4bad4cd4..0795cc31fb 100644 --- a/test/ITensorLegacyMPS/base/test_arraystorage.jl +++ b/test/ITensorLegacyMPS/base/test_arraystorage.jl @@ -1,5 +1,4 @@ using ITensors -using NDTensors using Test @testset "Test ArrayStorage DMRG" begin From 62339baf57564297e4aea573b59d7f968542dce3 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 16:24:52 -0400 Subject: [PATCH 16/36] [NDTensors] Add DiagonalArrays submodule --- .../BlockSparseArrays/src/blocksparsearray.jl | 1 - .../src/BlockSparseArrays/src/sparsearray.jl | 13 +-- NDTensors/src/DiagonalArrays/README.md | 49 ++++++++ .../src/DiagonalArrays/examples/README.jl | 42 +++++++ .../src/DiagonalArrays/src/DiagonalArrays.jl | 105 ++++++++++++++++++ NDTensors/src/DiagonalArrays/src/diagview.jl | 54 +++++++++ NDTensors/src/DiagonalArrays/test/runtests.jl | 12 ++ NDTensors/src/NDTensors.jl | 2 + 8 files changed, 266 insertions(+), 12 deletions(-) create mode 100644 NDTensors/src/DiagonalArrays/README.md create mode 100644 NDTensors/src/DiagonalArrays/examples/README.jl create mode 100644 NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl create mode 100644 NDTensors/src/DiagonalArrays/src/diagview.jl create mode 100644 NDTensors/src/DiagonalArrays/test/runtests.jl diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl index 572ed929e2..b14e28ac34 100644 --- a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -65,7 +65,6 @@ function BlockArrays.viewblock(block_arr::BlockSparseArray, block) # TODO: Make this `Zeros`? ## zero = zeros(eltype(block_arr), block_size) return block_arr.blocks[blks...] # Fails because zero isn't defined - ## return get_nonzero(block_arr.blocks, blks, zero) end function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) where {T,N} diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl index a3f5b6dcbd..e140377c7f 100644 --- a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl @@ -1,6 +1,7 @@ +# TODO: Define a constructor with a default `zero`. struct SparseArray{T,N,Zero} <: AbstractArray{T,N} data::Dictionary{CartesianIndex{N},T} - dims::NTuple{N,Int64} + dims::NTuple{N,Int} zero::Zero end @@ -20,13 +21,3 @@ end function Base.getindex(a::SparseArray{T,N}, I::Vararg{Int,N}) where {T,N} return getindex(a, CartesianIndex(I)) end - -## # `getindex` but uses a default if the value is -## # structurally zero. -## function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} -## @boundscheck checkbounds(a, I) -## return get(a.data, I, zero) -## end -## function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} -## return get_nonzero(a, CartesianIndex(I), zero) -## end diff --git a/NDTensors/src/DiagonalArrays/README.md b/NDTensors/src/DiagonalArrays/README.md new file mode 100644 index 0000000000..33996cf52d --- /dev/null +++ b/NDTensors/src/DiagonalArrays/README.md @@ -0,0 +1,49 @@ +# DiagonalArrays.jl + +A Julia `DiagonalArray` type. + +````julia +using NDTensors.DiagonalArrays: + DiagonalArray, + densearray, + diagview, + diaglength, + getdiagindex, + setdiagindex!, + setdiag!, + diagcopyto! + +d = DiagonalArray([1., 2, 3], 3, 4, 5) +@show d[1, 1, 1] == 1 +@show d[2, 2, 2] == 2 +@show d[1, 2, 1] == 0 + +d[2, 2, 2] = 22 +@show d[2, 2, 2] == 22 + +@show diaglength(d) == 3 +@show densearray(d) == d +@show getdiagindex(d, 2) == d[2, 2, 2] + +setdiagindex!(d, 222, 2) +@show d[2, 2, 2] == 222 + +a = randn(3, 4, 5) +new_diag = randn(3) +setdiag!(a, new_diag) +diagcopyto!(d, a) + +@show diagview(a) == new_diag +@show diagview(d) == new_diag +```` + +You can generate this README with: +```julia +using Literate +Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) +``` + +--- + +*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* + diff --git a/NDTensors/src/DiagonalArrays/examples/README.jl b/NDTensors/src/DiagonalArrays/examples/README.jl new file mode 100644 index 0000000000..b4903051fe --- /dev/null +++ b/NDTensors/src/DiagonalArrays/examples/README.jl @@ -0,0 +1,42 @@ +# # DiagonalArrays.jl +# +# A Julia `DiagonalArray` type. + +using NDTensors.DiagonalArrays: + DiagonalArray, + densearray, + diagview, + diaglength, + getdiagindex, + setdiagindex!, + setdiag!, + diagcopyto! + +d = DiagonalArray([1., 2, 3], 3, 4, 5) +@show d[1, 1, 1] == 1 +@show d[2, 2, 2] == 2 +@show d[1, 2, 1] == 0 + +d[2, 2, 2] = 22 +@show d[2, 2, 2] == 22 + +@show diaglength(d) == 3 +@show densearray(d) == d +@show getdiagindex(d, 2) == d[2, 2, 2] + +setdiagindex!(d, 222, 2) +@show d[2, 2, 2] == 222 + +a = randn(3, 4, 5) +new_diag = randn(3) +setdiag!(a, new_diag) +diagcopyto!(d, a) + +@show diagview(a) == new_diag +@show diagview(d) == new_diag + +# You can generate this README with: +# ```julia +# using Literate +# Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) +# ``` diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl new file mode 100644 index 0000000000..0c1187975c --- /dev/null +++ b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl @@ -0,0 +1,105 @@ +module DiagonalArrays + +using LinearAlgebra + +export DiagonalArray + +include("diagview.jl") + +struct DefaultZero end + +function (::DefaultZero)( + eltype::Type, I::CartesianIndex +) + return zero(eltype) +end + +struct DiagonalArray{T,N,Diag<:AbstractVector{T},Zero} <: AbstractArray{T,N} + diag::Diag + dims::NTuple{N,Int} + zero::Zero +end + +function DiagonalArray{T,N}(diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} + return DiagonalArray{T,N,typeof(diag),typeof(zero)}(diag, d, zero) +end + +function DiagonalArray{T,N}(diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} + return DiagonalArray{T,N}(T.(diag), d, zero) +end + +function DiagonalArray{T,N}(diag::AbstractVector, d::Vararg{Int,N}) where {T,N} + return DiagonalArray{T,N}(diag, d) +end + +function DiagonalArray{T}(diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} + return DiagonalArray{T,N}(diag, d, zero) +end + +function DiagonalArray{T}(diag::AbstractVector, d::Vararg{Int,N}) where {T,N} + return DiagonalArray{T,N}(diag, d) +end + +function DiagonalArray(diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}) where {T,N} + return DiagonalArray{T,N}(diag, d) +end + +function DiagonalArray(diag::AbstractVector{T}, d::Vararg{Int,N}) where {T,N} + return DiagonalArray{T,N}(diag, d) +end + +# undef +function DiagonalArray{T,N}(::UndefInitializer, d::Tuple{Vararg{Int,N}}) where {T,N} + return DiagonalArray{T,N}(Vector{T}(undef, minimum(d)), d) +end + +function DiagonalArray{T,N}(::UndefInitializer, d::Vararg{Int,N}) where {T,N} + return DiagonalArray{T,N}(undef, d) +end + +function DiagonalArray{T}(::UndefInitializer, d::Tuple{Vararg{Int,N}}) where {T,N} + return DiagonalArray{T,N}(undef, d) +end + +function DiagonalArray{T}(::UndefInitializer, d::Vararg{Int,N}) where {T,N} + return DiagonalArray{T,N}(undef, d) +end + +Base.size(a::DiagonalArray) = a.dims + +diagview(a::DiagonalArray) = a.diag +LinearAlgebra.diag(a::DiagonalArray) = copy(diagview(a)) + +function Base.getindex(a::DiagonalArray{T,N}, I::CartesianIndex{N}) where {T,N} + i = diagindex(a, I) + isnothing(i) && return a.zero(T, I) + return getdiagindex(a, i) +end + +function Base.getindex(a::DiagonalArray{T,N}, I::Vararg{Int,N}) where {T,N} + return getindex(a, CartesianIndex(I)) +end + +function Base.setindex!(a::DiagonalArray{T,N}, v, I::CartesianIndex{N}) where {T,N} + i = diagindex(a, I) + isnothing(i) && return error("Can't set off-diagonal element of DiagonalArray") + setdiagindex!(a, v, i) + return a +end + +function Base.setindex!(a::DiagonalArray{T,N}, v, I::Vararg{Int,N}) where {T,N} + a[CartesianIndex(I)] = v + return a +end + +# Make dense. +function densearray(a::DiagonalArray) + # TODO: Check this works on GPU. + # TODO: Make use of `a.zero`? + d = similar(diagview(a), size(a)) + fill!(d, zero(eltype(a))) + diagcopyto!(d, a) + return d +end + +end diff --git a/NDTensors/src/DiagonalArrays/src/diagview.jl b/NDTensors/src/DiagonalArrays/src/diagview.jl new file mode 100644 index 0000000000..1c062cfc56 --- /dev/null +++ b/NDTensors/src/DiagonalArrays/src/diagview.jl @@ -0,0 +1,54 @@ +# Convert to an offset along the diagonal. +# Otherwise, return `nothing`. +function diagindex(a::AbstractArray{T,N}, I::CartesianIndex{N}) where {T,N} + !allequal(Tuple(I)) && return nothing + return first(Tuple(I)) +end + +function diagindex(a::AbstractArray{T,N}, I::Vararg{Int,N}) where {T,N} + return diagindex(a, CartesianIndex(I)) +end + +function getdiagindex(a::AbstractArray, i::Integer) + return diagview(a)[i] +end + +function setdiagindex!(a::AbstractArray, v, i::Integer) + diagview(a)[i] = v + return a +end + +function setdiag!(a::AbstractArray, v) + copyto!(diagview(a), v) + return a +end + +function diaglength(a::AbstractArray) + # length(diagview(a)) + return minimum(size(a)) +end + +function diagstride(A::AbstractArray) + s = 1 + p = 1 + for i in 1:(ndims(A) - 1) + p *= size(A, i) + s += p + end + return s +end + +function diagindices(A::AbstractArray) + diaglength = minimum(size(A)) + maxdiag = LinearIndices(A)[CartesianIndex(ntuple(Returns(diaglength), ndims(A)))] + return 1:diagstride(A):maxdiag +end + +function diagview(A::AbstractArray) + return @view A[diagindices(A)] +end + +function diagcopyto!(dest::AbstractArray, src::AbstractArray) + copyto!(diagview(dest), diagview(src)) + return dest +end diff --git a/NDTensors/src/DiagonalArrays/test/runtests.jl b/NDTensors/src/DiagonalArrays/test/runtests.jl new file mode 100644 index 0000000000..96e51b5267 --- /dev/null +++ b/NDTensors/src/DiagonalArrays/test/runtests.jl @@ -0,0 +1,12 @@ +using Test +using NDTensors.DiagonalArrays + +@testset "Test NDTensors.DiagonalArrays" begin + @testset "README" begin + @test include( + joinpath( + pkgdir(DiagonalArrays), "src", "DiagonalArrays", "examples", "README.jl" + ), + ) isa Any + end +end diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 446b3eedb2..296bfa4f99 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -20,6 +20,8 @@ using TupleTools include("SetParameters/src/SetParameters.jl") using .SetParameters +include("DiagonalArrays/src/DiagonalArrays.jl") +using .DiagonalArrays include("BlockSparseArrays/src/BlockSparseArrays.jl") using .BlockSparseArrays include("SmallVectors/src/SmallVectors.jl") From 9d4d7eb35d55f471e3f9f683499c6f62224c097b Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 16:39:27 -0400 Subject: [PATCH 17/36] Add tests --- NDTensors/test/DiagonalArrays.jl | 4 ++++ NDTensors/test/runtests.jl | 1 + 2 files changed, 5 insertions(+) create mode 100644 NDTensors/test/DiagonalArrays.jl diff --git a/NDTensors/test/DiagonalArrays.jl b/NDTensors/test/DiagonalArrays.jl new file mode 100644 index 0000000000..fa9b5a0c2b --- /dev/null +++ b/NDTensors/test/DiagonalArrays.jl @@ -0,0 +1,4 @@ +using Test +using NDTensors + +include(joinpath(pkgdir(NDTensors), "src", "DiagonalArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl index 4e934a4c0d..9e5bba62f5 100644 --- a/NDTensors/test/runtests.jl +++ b/NDTensors/test/runtests.jl @@ -20,6 +20,7 @@ end @safetestset "NDTensors" begin @testset "$filename" for filename in [ "BlockSparseArrays.jl", + "DiagonalArrays.jl", "SetParameters.jl", "SmallVectors.jl", "SortedSets.jl", From b537e6a02c861a2cc370c2518f71598c28acc43b Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 16:54:42 -0400 Subject: [PATCH 18/36] Format --- NDTensors/src/DiagonalArrays/examples/README.jl | 2 +- .../src/DiagonalArrays/src/DiagonalArrays.jl | 16 ++++++++++------ NDTensors/src/DiagonalArrays/src/diagview.jl | 6 +++--- NDTensors/src/DiagonalArrays/test/runtests.jl | 4 +--- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/NDTensors/src/DiagonalArrays/examples/README.jl b/NDTensors/src/DiagonalArrays/examples/README.jl index b4903051fe..ede04d6c2e 100644 --- a/NDTensors/src/DiagonalArrays/examples/README.jl +++ b/NDTensors/src/DiagonalArrays/examples/README.jl @@ -12,7 +12,7 @@ using NDTensors.DiagonalArrays: setdiag!, diagcopyto! -d = DiagonalArray([1., 2, 3], 3, 4, 5) +d = DiagonalArray([1.0, 2, 3], 3, 4, 5) @show d[1, 1, 1] == 1 @show d[2, 2, 2] == 2 @show d[1, 2, 1] == 0 diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl index 0c1187975c..6d4ceb8310 100644 --- a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl +++ b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl @@ -8,9 +8,7 @@ include("diagview.jl") struct DefaultZero end -function (::DefaultZero)( - eltype::Type, I::CartesianIndex -) +function (::DefaultZero)(eltype::Type, I::CartesianIndex) return zero(eltype) end @@ -20,11 +18,15 @@ struct DiagonalArray{T,N,Diag<:AbstractVector{T},Zero} <: AbstractArray{T,N} zero::Zero end -function DiagonalArray{T,N}(diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} +function DiagonalArray{T,N}( + diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=DefaultZero() +) where {T,N} return DiagonalArray{T,N,typeof(diag),typeof(zero)}(diag, d, zero) end -function DiagonalArray{T,N}(diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} +function DiagonalArray{T,N}( + diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero() +) where {T,N} return DiagonalArray{T,N}(T.(diag), d, zero) end @@ -32,7 +34,9 @@ function DiagonalArray{T,N}(diag::AbstractVector, d::Vararg{Int,N}) where {T,N} return DiagonalArray{T,N}(diag, d) end -function DiagonalArray{T}(diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero()) where {T,N} +function DiagonalArray{T}( + diag::AbstractVector, d::Tuple{Vararg{Int,N}}, zero=DefaultZero() +) where {T,N} return DiagonalArray{T,N}(diag, d, zero) end diff --git a/NDTensors/src/DiagonalArrays/src/diagview.jl b/NDTensors/src/DiagonalArrays/src/diagview.jl index 1c062cfc56..21f7ac8a06 100644 --- a/NDTensors/src/DiagonalArrays/src/diagview.jl +++ b/NDTensors/src/DiagonalArrays/src/diagview.jl @@ -29,11 +29,11 @@ function diaglength(a::AbstractArray) end function diagstride(A::AbstractArray) - s = 1 - p = 1 + s = 1 + p = 1 for i in 1:(ndims(A) - 1) p *= size(A, i) - s += p + s += p end return s end diff --git a/NDTensors/src/DiagonalArrays/test/runtests.jl b/NDTensors/src/DiagonalArrays/test/runtests.jl index 96e51b5267..45d8b55e5c 100644 --- a/NDTensors/src/DiagonalArrays/test/runtests.jl +++ b/NDTensors/src/DiagonalArrays/test/runtests.jl @@ -4,9 +4,7 @@ using NDTensors.DiagonalArrays @testset "Test NDTensors.DiagonalArrays" begin @testset "README" begin @test include( - joinpath( - pkgdir(DiagonalArrays), "src", "DiagonalArrays", "examples", "README.jl" - ), + joinpath(pkgdir(DiagonalArrays), "src", "DiagonalArrays", "examples", "README.jl") ) isa Any end end From ba24b1c0104da380f9acc57bf694ca555597b69b Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 16:57:53 -0400 Subject: [PATCH 19/36] Load Compat for allequal --- NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl index 6d4ceb8310..45319ad8ce 100644 --- a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl +++ b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl @@ -1,5 +1,6 @@ module DiagonalArrays +using Compat # allequal using LinearAlgebra export DiagonalArray From e865d275d05943cf3b3260dd7051219eff9e6ae5 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 17:51:34 -0400 Subject: [PATCH 20/36] [NDTensors] DiagonalArray tensor operations --- .../src/DiagonalArrays/src/DiagonalArrays.jl | 24 ++++++- .../arraystorage/storage/arraystorage.jl | 1 + .../arraystorage/arraystorage/tensor/eigen.jl | 3 +- .../arraystorage/arraystorage/tensor/svd.jl | 2 +- .../diagonalarray/tensor/contract.jl | 64 ++++++++++++------- src/tensor_operations/matrix_decomposition.jl | 4 ++ 6 files changed, 72 insertions(+), 26 deletions(-) diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl index 45319ad8ce..8e965062e9 100644 --- a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl +++ b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl @@ -3,7 +3,7 @@ module DiagonalArrays using Compat # allequal using LinearAlgebra -export DiagonalArray +export DiagonalArray, DiagonalMatrix, DiagonalVector include("diagview.jl") @@ -19,6 +19,9 @@ struct DiagonalArray{T,N,Diag<:AbstractVector{T},Zero} <: AbstractArray{T,N} zero::Zero end +const DiagonalVector{T,Diag,Zero} = DiagonalArray{T,1,Diag,Zero} +const DiagonalMatrix{T,Diag,Zero} = DiagonalArray{T,2,Diag,Zero} + function DiagonalArray{T,N}( diag::AbstractVector{T}, d::Tuple{Vararg{Int,N}}, zero=DefaultZero() ) where {T,N} @@ -53,6 +56,25 @@ function DiagonalArray(diag::AbstractVector{T}, d::Vararg{Int,N}) where {T,N} return DiagonalArray{T,N}(diag, d) end +default_size(diag::AbstractVector, n) = ntuple(Returns(length(diag)), n) + +# Infer size from diagonal +function DiagonalArray{T,N}(diag::AbstractVector) where {T,N} + return DiagonalArray{T,N}(diag, default_size(diag, N)) +end + +function DiagonalArray{<:Any,N}(diag::AbstractVector{T}) where {T,N} + return DiagonalArray{T,N}(diag) +end + +function DiagonalMatrix(diag::AbstractVector) + return DiagonalArray{<:Any,2}(diag) +end + +function DiagonalVector(diag::AbstractVector) + return DiagonalArray{<:Any,1}(diag) +end + # undef function DiagonalArray{T,N}(::UndefInitializer, d::Tuple{Vararg{Int,N}}) where {T,N} return DiagonalArray{T,N}(Vector{T}(undef, minimum(d)), d) diff --git a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl index 996494b68a..559a7364a8 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl @@ -6,6 +6,7 @@ const ArrayStorage{T,N} = Union{ SubArray{T,N}, PermutedDimsArray{T,N}, StridedView{T,N}, + DiagonalArray{T,N}, BlockSparseArray{T,N}, } diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl index d3147dcce6..9bcb1713c3 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -68,7 +68,6 @@ function eigen( Vinds = indstype((dag(ind(T, 2)), dag(r))) Dinds = indstype((l, dag(r))) V = tensor(VM, Vinds) - # TODO: Replace with `DiagonalArray`. - D = tensor(Diag(DM), Dinds) + D = tensor(DiagonalMatrix(DM), Dinds) return D, V, spec end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index f00543959e..ae1f6f22a8 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -97,7 +97,7 @@ function svd( Sinds = indstype((u, v)) Vinds = indstype((ind(T, 2), v)) U = tensor(MU, Uinds) - S = tensor(Diag(MS), Sinds) + S = tensor(DiagonalMatrix(MS), Sinds) V = tensor(MV, Vinds) return U, S, V, spec end diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl index e7011896a1..b1834a618e 100644 --- a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -1,30 +1,49 @@ -function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:Diag}) - return promote_type(storagetype1, datatype(storagetype2)) +# TODO: Move to a different file. +parenttype(::Type{<:DiagonalArray{<:Any,<:Any,P}}) where {P} = P + +# TODO: Move to a different file. +function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray}) + # TODO: Replace with `unwrap_type` once + # https://github.com/ITensor/ITensors.jl/pull/1220 + # is merged. + return promote_type(storagetype1, leaf_parenttype(storagetype2)) end -function contraction_output_type( - tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:ArrayStorageTensor}, indsR -) - return similartype(promote_type(tensortype1, tensortype2), indsR) +# The output must be initialized as zero since it is sparse, cannot be undefined +# Overspecifying types to fix ambiguity error. +function contraction_output(T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR) where {T,N} + return zero_contraction_output(T1, T2, indsR) end +contraction_output(T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR) where {T,N} = contraction_output(T2, T1, indsR) -function contraction_output_type( - tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR -) - return contraction_output_type(tensortype2, tensortype1, indsR) +# Overspecifying types to fix ambiguity error. +function contraction_output(tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, indsR) where {T1,N1,T2,N2} + return zero_contraction_output(tensor1, tensor2, indsR) end +## function contraction_output_type( +## tensortype1::Type{<:Tensor{<:Any,<:Any,<:DiagonalArray}}, tensortype2::Type{<:ArrayStorageTensor}, indsR +## ) +## return similartype(promote_type(tensortype1, tensortype2), indsR) +## end + +## function contraction_output_type( +## tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR +## ) +## return contraction_output_type(tensortype2, tensortype1, indsR) +## end + # TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. function contract!( C::ArrayStorageTensor{ElC,NC}, Clabels, - A::DiagTensor{ElA,NA}, + A::Tensor{ElA,NA,<:DiagonalArray{ElA,NA}}, Alabels, B::ArrayStorageTensor{ElB,NB}, Blabels, α::Number=one(ElC), β::Number=zero(ElC); - convert_to_dense::Bool=true, + convert_to_dense::Bool=false, ) where {ElA,NA,ElB,NB,ElC,NC} #@timeit_debug timer "diag-dense contract!" begin if all(i -> i < 0, Blabels) @@ -37,24 +56,24 @@ function contract!( # Assumes C starts set to 0 c₁ = zero(ElC) for i in 1:min_dim - c₁ += getdiagindex(A, i) * getdiagindex(B, i) + c₁ += DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) end - setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1) + DiagonalArrays.setdiagindex!(C, α * c₁ + β * DiagonalArrays.getdiagindex(C, 1), 1) else # not all indices are summed over, set the diagonals of the result # to the product of the diagonals of A and B # TODO: should we make this return a Diag storage? for i in 1:min_dim - setdiagindex!( - C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i + DiagonalArrays.setdiagindex!( + C, α * DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) + β * DiagonalArrays.getdiagindex(C, i), i ) end end else # Most general contraction if convert_to_dense - # TODO: Define a conversion from `DiagonalArray` to `Array`. - contract!(C, Clabels, to_arraystorage(dense(A)), Alabels, B, Blabels, α, β) + # TODO: Define `densearray(::Tensor)`. + contract!(C, Clabels, tensor(DiagonalArrays.densearray(storage(A)), inds(A)), Alabels, B, Blabels, α, β) else if !isone(α) || !iszero(β) error( @@ -115,10 +134,10 @@ function contract!( coffset += ii * custride[i] end c = zero(ElC) - for j in 1:diaglength(A) + for j in 1:DiagonalArrays.diaglength(A) # With α == 0 && β == 1 C[cstart + j * c_cstride + coffset] += - getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] + DiagonalArrays.getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] # XXX: not sure if this is correct #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] end @@ -128,15 +147,16 @@ function contract!( #end # @timeit end +# Overspecifying types to fix ambiguity error. function contract!( C::ArrayStorageTensor, Clabels, A::ArrayStorageTensor, Alabels, - B::DiagTensor, + B::Tensor{TB,NB,<:DiagonalArray{TB,NB}}, Blabels, α::Number=one(eltype(C)), β::Number=zero(eltype(C)), -) +) where {TB,NB} return contract!(C, Clabels, B, Blabels, A, Alabels, α, β) end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 9da68a2c00..770d98e947 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -556,11 +556,15 @@ function factorize_svd( kwargs..., ) leftdir, rightdir = -dir, -dir + @show A USV = svd(A, Linds...; leftdir, rightdir, alg=svd_alg, kwargs...) if isnothing(USV) return nothing end U, S, V, spec, u, v = USV + @show U + @show S + @show V if ortho == "left" L, R = U, S * V elseif ortho == "right" From ff0a504a8ce3b78a4d7d98112671e377a89c73ec Mon Sep 17 00:00:00 2001 From: mtfishman Date: Mon, 30 Oct 2023 20:38:54 -0400 Subject: [PATCH 21/36] Format --- .../diagonalarray/tensor/contract.jl | 36 +++++++++++++++---- 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl index b1834a618e..34db640ace 100644 --- a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -2,7 +2,9 @@ parenttype(::Type{<:DiagonalArray{<:Any,<:Any,P}}) where {P} = P # TODO: Move to a different file. -function promote_rule(storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray}) +function promote_rule( + storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray} +) # TODO: Replace with `unwrap_type` once # https://github.com/ITensor/ITensors.jl/pull/1220 # is merged. @@ -11,13 +13,23 @@ end # The output must be initialized as zero since it is sparse, cannot be undefined # Overspecifying types to fix ambiguity error. -function contraction_output(T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR) where {T,N} +function contraction_output( + T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR +) where {T,N} return zero_contraction_output(T1, T2, indsR) end -contraction_output(T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR) where {T,N} = contraction_output(T2, T1, indsR) +function contraction_output( + T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR +) where {T,N} + return contraction_output(T2, T1, indsR) +end # Overspecifying types to fix ambiguity error. -function contraction_output(tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, indsR) where {T1,N1,T2,N2} +function contraction_output( + tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, + tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, + indsR, +) where {T1,N1,T2,N2} return zero_contraction_output(tensor1, tensor2, indsR) end @@ -65,7 +77,10 @@ function contract!( # TODO: should we make this return a Diag storage? for i in 1:min_dim DiagonalArrays.setdiagindex!( - C, α * DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) + β * DiagonalArrays.getdiagindex(C, i), i + C, + α * DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) + + β * DiagonalArrays.getdiagindex(C, i), + i, ) end end @@ -73,7 +88,16 @@ function contract!( # Most general contraction if convert_to_dense # TODO: Define `densearray(::Tensor)`. - contract!(C, Clabels, tensor(DiagonalArrays.densearray(storage(A)), inds(A)), Alabels, B, Blabels, α, β) + contract!( + C, + Clabels, + tensor(DiagonalArrays.densearray(storage(A)), inds(A)), + Alabels, + B, + Blabels, + α, + β, + ) else if !isone(α) || !iszero(β) error( From d71408949c26237d8b12a86184dcf2e8fb228343 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 11:46:32 -0400 Subject: [PATCH 22/36] Get more DiagonalArray tensor operations working, start improving kwargs --- NDTensors/src/DiagonalArrays/README.md | 28 ++- .../src/DiagonalArrays/examples/README.jl | 24 +-- .../src/DiagonalArrays/src/DiagonalArrays.jl | 2 +- NDTensors/src/DiagonalArrays/src/diagview.jl | 46 ++++- NDTensors/src/NDTensors.jl | 2 + .../arraystorage/storage/arraystorage.jl | 5 + .../arraystorage/storage/contract.jl | 5 +- .../arraystorage/arraystorage/tensor/svd.jl | 29 +-- .../arraystorage/arraystorage/tensor/zeros.jl | 5 + .../diagonalarray/storage/contract.jl | 172 ++++++++++++++++++ .../diagonalarray/tensor/contract.jl | 169 +---------------- NDTensors/test/arraytensor/diagonalarray.jl | 23 +++ src/tensor_operations/matrix_decomposition.jl | 18 +- 13 files changed, 302 insertions(+), 226 deletions(-) create mode 100644 NDTensors/src/arraystorage/diagonalarray/storage/contract.jl create mode 100644 NDTensors/test/arraytensor/diagonalarray.jl diff --git a/NDTensors/src/DiagonalArrays/README.md b/NDTensors/src/DiagonalArrays/README.md index 33996cf52d..1b9355b0a9 100644 --- a/NDTensors/src/DiagonalArrays/README.md +++ b/NDTensors/src/DiagonalArrays/README.md @@ -5,15 +5,11 @@ A Julia `DiagonalArray` type. ````julia using NDTensors.DiagonalArrays: DiagonalArray, - densearray, - diagview, - diaglength, - getdiagindex, - setdiagindex!, - setdiag!, - diagcopyto! - -d = DiagonalArray([1., 2, 3], 3, 4, 5) + DiagIndex, + DiagIndices, + densearray + +d = DiagonalArray([1.0, 2, 3], 3, 4, 5) @show d[1, 1, 1] == 1 @show d[2, 2, 2] == 2 @show d[1, 2, 1] == 0 @@ -21,20 +17,20 @@ d = DiagonalArray([1., 2, 3], 3, 4, 5) d[2, 2, 2] = 22 @show d[2, 2, 2] == 22 -@show diaglength(d) == 3 +@show length(d[DiagIndices()]) == 3 @show densearray(d) == d -@show getdiagindex(d, 2) == d[2, 2, 2] +@show d[DiagIndex(2)] == d[2, 2, 2] -setdiagindex!(d, 222, 2) +d[DiagIndex(2)] = 222 @show d[2, 2, 2] == 222 a = randn(3, 4, 5) new_diag = randn(3) -setdiag!(a, new_diag) -diagcopyto!(d, a) +a[DiagIndices()] = new_diag +d[DiagIndices()] = a[DiagIndices()] -@show diagview(a) == new_diag -@show diagview(d) == new_diag +@show a[DiagIndices()] == new_diag +@show d[DiagIndices()] == new_diag ```` You can generate this README with: diff --git a/NDTensors/src/DiagonalArrays/examples/README.jl b/NDTensors/src/DiagonalArrays/examples/README.jl index ede04d6c2e..5fe06d3f23 100644 --- a/NDTensors/src/DiagonalArrays/examples/README.jl +++ b/NDTensors/src/DiagonalArrays/examples/README.jl @@ -4,13 +4,9 @@ using NDTensors.DiagonalArrays: DiagonalArray, - densearray, - diagview, - diaglength, - getdiagindex, - setdiagindex!, - setdiag!, - diagcopyto! + DiagIndex, + DiagIndices, + densearray d = DiagonalArray([1.0, 2, 3], 3, 4, 5) @show d[1, 1, 1] == 1 @@ -20,20 +16,20 @@ d = DiagonalArray([1.0, 2, 3], 3, 4, 5) d[2, 2, 2] = 22 @show d[2, 2, 2] == 22 -@show diaglength(d) == 3 +@show length(d[DiagIndices()]) == 3 @show densearray(d) == d -@show getdiagindex(d, 2) == d[2, 2, 2] +@show d[DiagIndex(2)] == d[2, 2, 2] -setdiagindex!(d, 222, 2) +d[DiagIndex(2)] = 222 @show d[2, 2, 2] == 222 a = randn(3, 4, 5) new_diag = randn(3) -setdiag!(a, new_diag) -diagcopyto!(d, a) +a[DiagIndices()] = new_diag +d[DiagIndices()] = a[DiagIndices()] -@show diagview(a) == new_diag -@show diagview(d) == new_diag +@show a[DiagIndices()] == new_diag +@show d[DiagIndices()] == new_diag # You can generate this README with: # ```julia diff --git a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl index 8e965062e9..c8bb9411d1 100644 --- a/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl +++ b/NDTensors/src/DiagonalArrays/src/DiagonalArrays.jl @@ -3,7 +3,7 @@ module DiagonalArrays using Compat # allequal using LinearAlgebra -export DiagonalArray, DiagonalMatrix, DiagonalVector +export DiagonalArray, DiagonalMatrix, DiagonalVector, DiagIndex, DiagIndices, densearray include("diagview.jl") diff --git a/NDTensors/src/DiagonalArrays/src/diagview.jl b/NDTensors/src/DiagonalArrays/src/diagview.jl index 21f7ac8a06..b3e2547362 100644 --- a/NDTensors/src/DiagonalArrays/src/diagview.jl +++ b/NDTensors/src/DiagonalArrays/src/diagview.jl @@ -18,6 +18,19 @@ function setdiagindex!(a::AbstractArray, v, i::Integer) return a end +struct DiagIndex + I::Int +end + +function Base.getindex(a::AbstractArray, i::DiagIndex) + return getdiagindex(a, i.I) +end + +function Base.setindex!(a::AbstractArray, v, i::DiagIndex) + setdiagindex!(a, v, i.I) + return a +end + function setdiag!(a::AbstractArray, v) copyto!(diagview(a), v) return a @@ -28,27 +41,42 @@ function diaglength(a::AbstractArray) return minimum(size(a)) end -function diagstride(A::AbstractArray) +function diagstride(a::AbstractArray) s = 1 p = 1 - for i in 1:(ndims(A) - 1) - p *= size(A, i) + for i in 1:(ndims(a) - 1) + p *= size(a, i) s += p end return s end -function diagindices(A::AbstractArray) - diaglength = minimum(size(A)) - maxdiag = LinearIndices(A)[CartesianIndex(ntuple(Returns(diaglength), ndims(A)))] - return 1:diagstride(A):maxdiag +function diagindices(a::AbstractArray) + diaglength = minimum(size(a)) + maxdiag = LinearIndices(a)[CartesianIndex(ntuple(Returns(diaglength), ndims(a)))] + return 1:diagstride(a):maxdiag end -function diagview(A::AbstractArray) - return @view A[diagindices(A)] +function diagindices(a::AbstractArray{<:Any,0}) + return Base.OneTo(1) +end + +function diagview(a::AbstractArray) + return @view a[diagindices(a)] end function diagcopyto!(dest::AbstractArray, src::AbstractArray) copyto!(diagview(dest), diagview(src)) return dest end + +struct DiagIndices end + +function Base.getindex(a::AbstractArray, ::DiagIndices) + return diagview(a) +end + +function Base.setindex!(a::AbstractArray, v, ::DiagIndices) + setdiag!(a, v) + return a +end diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 296bfa4f99..2a96afeb3e 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -151,6 +151,8 @@ include("arraystorage/arraystorage/tensor/eigen.jl") include("arraystorage/arraystorage/tensor/svd.jl") # DiagonalArray storage +include("arraystorage/diagonalarray/storage/contract.jl") + include("arraystorage/diagonalarray/tensor/contract.jl") # BlockSparseArray storage diff --git a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl index 559a7364a8..52f2beeed5 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/arraystorage.jl @@ -29,3 +29,8 @@ const MatrixOrArrayStorage{T} = Union{MatrixStorage{T},ArrayStorage{T}} function to_arraystorage(x::DenseTensor) return tensor(reshape(data(x), size(x)), inds(x)) end + +# TODO: Delete once `Diag` is removed. +function to_arraystorage(x::DiagTensor) + return tensor(DiagonalArray(data(x), size(x)), inds(x)) +end diff --git a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl index 0770325f9b..34c3daaebb 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl @@ -4,10 +4,11 @@ function contract( labels1, array2::MatrixOrArrayStorage, labels2, - labelsR=contract_labels(labels1, labels2), + labelsR=contract_labels(labels1, labels2); + kwargs... ) output_array = contraction_output(array1, labels1, array2, labels2, labelsR) - contract!(output_array, labelsR, array1, labels1, array2, labels2) + contract!(output_array, labelsR, array1, labels1, array2, labels2; kwargs...) return output_array end diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index ae1f6f22a8..e19a12dca2 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,3 +1,10 @@ +default_maxdim(a) = minimum(size(a)) +default_mindim(a) = true +default_cutoff(a) = zero(eltype(a)) +default_svd_alg(a) = "divide_and_conquer" +default_use_absolute_cutoff(a) = false +default_use_relative_cutoff(a) = true + # TODO: Rewrite this function to be more modern: # 1. Output `Spectrum` as a keyword argument that gets overwritten. # 2. Dispatch on `alg`. @@ -9,25 +16,23 @@ svd of an order-2 DenseTensor """ function svd( T::ArrayStorageTensor; + mindim=default_mindim(T), maxdim=nothing, - mindim=1, cutoff=nothing, - alg="divide_and_conquer", # TODO: Define `default_alg(T)` - use_absolute_cutoff=false, - use_relative_cutoff=true, + alg=default_svd_alg(T), + use_absolute_cutoff=default_use_absolute_cutoff(T), + use_relative_cutoff=default_use_relative_cutoff(T), # These are getting passed erroneously. # TODO: Make sure they don't get passed down # to here. - which_decomp=nothing, - tags=nothing, - eigen_perturbation=nothing, - normalize=nothing, + ## which_decomp=nothing, + ## tags=nothing, + ## eigen_perturbation=nothing, + ## normalize=nothing, ) truncate = !isnothing(maxdim) || !isnothing(cutoff) - # TODO: Define `default_maxdim(T)`. - maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim - # TODO: Define `default_cutoff(T)`. - cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff + maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim + cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff # TODO: Dispatch on `Algorithm(alg)`. if alg == "divide_and_conquer" diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl index 66759892e4..c990e5db30 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/zeros.jl @@ -11,3 +11,8 @@ end function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{Vararg{Integer}}) return _zeros(tensortype, inds) end + +# To resolve ambiguity error with `Base.zeros`. +function zeros(tensortype::Type{<:ArrayStorageTensor}, inds::Tuple{}) + return _zeros(tensortype, inds) +end diff --git a/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl b/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl new file mode 100644 index 0000000000..a5c5f824eb --- /dev/null +++ b/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl @@ -0,0 +1,172 @@ +# TODO: Move to a different file. +parenttype(::Type{<:DiagonalArray{<:Any,<:Any,P}}) where {P} = P + +# TODO: Move to a different file. +function promote_rule( + storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray} +) + # TODO: Replace with `unwrap_type` once + # https://github.com/ITensor/ITensors.jl/pull/1220 + # is merged. + return promote_type(storagetype1, leaf_parenttype(storagetype2)) +end + +# TODO: Move to a different file. +function promote_rule( + storagetype1::Type{<:DiagonalArray}, storagetype2::Type{<:DiagonalArray} +) + return error("Not implemented yet") +end + +function contraction_output_type( + arraytype1::Type{<:DiagonalArray}, arraytype2::Type{<:DiagonalArray}, inds +) + return error("Not implemented yet") +end + +default_convert_to_dense() = true + +# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. +# TODO: Move to `storage`. +function contract!( + C::ArrayStorage, + Clabels, + A::DiagonalArray, + Alabels, + B::ArrayStorage, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)); + convert_to_dense=default_convert_to_dense(), +) + if convert_to_dense + contract_dense!(C, Clabels, A, Alabels, B, Blabels, α, β) + return C + end + if !isone(α) || !iszero(β) + error( + "`contract!(::ArrayStorageTensor, ::DiagTensor, ::ArrayStorageTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", + ) + end + astarts = zeros(Int, length(Alabels)) + bstart = 0 + cstart = 0 + b_cstride = 0 + nbu = 0 + for ib in 1:length(Blabels) + ia = findfirst(==(Blabels[ib]), Alabels) + if !isnothing(ia) + b_cstride += stride(B, ib) + bstart += astarts[ia] * stride(B, ib) + else + nbu += 1 + end + end + c_cstride = 0 + for ic in 1:length(Clabels) + ia = findfirst(==(Clabels[ic]), Alabels) + if !isnothing(ia) + c_cstride += stride(C, ic) + cstart += astarts[ia] * stride(C, ic) + end + end + # strides of the uncontracted dimensions of + # B + bustride = zeros(Int, nbu) + custride = zeros(Int, nbu) + # size of the uncontracted dimensions of + # B, to be used in CartesianIndices + busize = zeros(Int, nbu) + n = 1 + for ib in 1:length(Blabels) + if Blabels[ib] > 0 + bustride[n] = stride(B, ib) + busize[n] = size(B, ib) + ic = findfirst(==(Blabels[ib]), Clabels) + custride[n] = stride(C, ic) + n += 1 + end + end + boffset_orig = 1 - sum(strides(B)) + coffset_orig = 1 - sum(strides(C)) + cartesian_inds = CartesianIndices(Tuple(busize)) + for inds in cartesian_inds + boffset = boffset_orig + coffset = coffset_orig + for i in 1:nbu + ii = inds[i] + boffset += ii * bustride[i] + coffset += ii * custride[i] + end + c = zero(eltype(C)) + for j in 1:DiagonalArrays.diaglength(A) + # With α == 0 && β == 1 + C[cstart + j * c_cstride + coffset] += + A[DiagIndex(j)] * B[bstart + j * b_cstride + boffset] + # XXX: not sure if this is correct + #C[cstart+j*c_cstride+coffset] += α * A[DiagIndex(j)] * B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] + end + end +end + +function contract!( + C::ArrayStorage{<:Any,0}, + Clabels, + A::DiagonalArray, + Alabels, + B::ArrayStorage, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)); + convert_to_dense=nothing, +) + # If all of B is contracted + # TODO: can also check NC+NB==NA + min_dim = min(minimum(size(A)), minimum(size(B))) + if length(Clabels) == 0 + # all indices are summed over, just add the product of the diagonal + # elements of A and B + # Assumes C starts set to 0 + c₁ = zero(eltype(C)) + for i in 1:min_dim + c₁ += A[DiagIndex(i)] * B[DiagIndex(i)] + end + C[DiagIndex(1)] = α * c₁ + β * C[DiagIndex(1)] + else + # not all indices are summed over, set the diagonals of the result + # to the product of the diagonals of A and B + # TODO: should we make this return a Diag storage? + for i in 1:min_dim + C[DiagIndex(i)] = α * A[DiagIndex(i)] * B[DiagIndex(i)] + β * C[DiagIndex(i)] + end + end + return C +end + +function contract_dense!( + C::ArrayStorage, + Clabels, + A::DiagonalArray, + Alabels, + B::ArrayStorage, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)), +) + return contract!(C, Clabels, densearray(A), Alabels, B, Blabels, α, β) +end + +# Overspecifying types to fix ambiguity error. +function contract!( + C::ArrayStorage, + Clabels, + A::ArrayStorage, + Alabels, + B::DiagonalArray, + Blabels, + α::Number=one(eltype(C)), + β::Number=zero(eltype(C)); + convert_to_dense=default_convert_to_dense(), +) + return contract!(C, Clabels, B, Blabels, A, Alabels, α, β; convert_to_dense) +end diff --git a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl index 34db640ace..18fe5a1cff 100644 --- a/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl +++ b/NDTensors/src/arraystorage/diagonalarray/tensor/contract.jl @@ -1,23 +1,13 @@ -# TODO: Move to a different file. -parenttype(::Type{<:DiagonalArray{<:Any,<:Any,P}}) where {P} = P - -# TODO: Move to a different file. -function promote_rule( - storagetype1::Type{<:ArrayStorage}, storagetype2::Type{<:DiagonalArray} -) - # TODO: Replace with `unwrap_type` once - # https://github.com/ITensor/ITensors.jl/pull/1220 - # is merged. - return promote_type(storagetype1, leaf_parenttype(storagetype2)) -end - # The output must be initialized as zero since it is sparse, cannot be undefined # Overspecifying types to fix ambiguity error. +# TODO: Rewrite in terms of `DiagonalArray` and `Array`, not `Tensor`. function contraction_output( T1::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, T2::ArrayStorageTensor, indsR ) where {T,N} return zero_contraction_output(T1, T2, indsR) end + +# TODO: Rewrite in terms of `DiagonalArray` and `Array`, not `Tensor`. function contraction_output( T1::ArrayStorageTensor, T2::Tensor{T,N,<:DiagonalArray{T,N,<:AbstractVector{T}}}, indsR ) where {T,N} @@ -25,6 +15,7 @@ function contraction_output( end # Overspecifying types to fix ambiguity error. +# TODO: Rewrite in terms of `DiagonalArray`, not `Tensor`. function contraction_output( tensor1::Tensor{T1,N1,<:DiagonalArray{T1,N1,<:AbstractVector{T1}}}, tensor2::Tensor{T2,N2,<:DiagonalArray{T2,N2,<:AbstractVector{T2}}}, @@ -32,155 +23,3 @@ function contraction_output( ) where {T1,N1,T2,N2} return zero_contraction_output(tensor1, tensor2, indsR) end - -## function contraction_output_type( -## tensortype1::Type{<:Tensor{<:Any,<:Any,<:DiagonalArray}}, tensortype2::Type{<:ArrayStorageTensor}, indsR -## ) -## return similartype(promote_type(tensortype1, tensortype2), indsR) -## end - -## function contraction_output_type( -## tensortype1::Type{<:ArrayStorageTensor}, tensortype2::Type{<:DiagTensor}, indsR -## ) -## return contraction_output_type(tensortype2, tensortype1, indsR) -## end - -# TODO: Modernize this function, rewrite in terms of `Array` and `DiagonalArray`. -function contract!( - C::ArrayStorageTensor{ElC,NC}, - Clabels, - A::Tensor{ElA,NA,<:DiagonalArray{ElA,NA}}, - Alabels, - B::ArrayStorageTensor{ElB,NB}, - Blabels, - α::Number=one(ElC), - β::Number=zero(ElC); - convert_to_dense::Bool=false, -) where {ElA,NA,ElB,NB,ElC,NC} - #@timeit_debug timer "diag-dense contract!" begin - if all(i -> i < 0, Blabels) - # If all of B is contracted - # TODO: can also check NC+NB==NA - min_dim = min(minimum(dims(A)), minimum(dims(B))) - if length(Clabels) == 0 - # all indices are summed over, just add the product of the diagonal - # elements of A and B - # Assumes C starts set to 0 - c₁ = zero(ElC) - for i in 1:min_dim - c₁ += DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) - end - DiagonalArrays.setdiagindex!(C, α * c₁ + β * DiagonalArrays.getdiagindex(C, 1), 1) - else - # not all indices are summed over, set the diagonals of the result - # to the product of the diagonals of A and B - # TODO: should we make this return a Diag storage? - for i in 1:min_dim - DiagonalArrays.setdiagindex!( - C, - α * DiagonalArrays.getdiagindex(A, i) * DiagonalArrays.getdiagindex(B, i) + - β * DiagonalArrays.getdiagindex(C, i), - i, - ) - end - end - else - # Most general contraction - if convert_to_dense - # TODO: Define `densearray(::Tensor)`. - contract!( - C, - Clabels, - tensor(DiagonalArrays.densearray(storage(A)), inds(A)), - Alabels, - B, - Blabels, - α, - β, - ) - else - if !isone(α) || !iszero(β) - error( - "`contract!(::ArrayStorageTensor, ::DiagTensor, ::ArrayStorageTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.", - ) - end - astarts = zeros(Int, length(Alabels)) - bstart = 0 - cstart = 0 - b_cstride = 0 - nbu = 0 - for ib in 1:length(Blabels) - ia = findfirst(==(Blabels[ib]), Alabels) - if !isnothing(ia) - b_cstride += stride(B, ib) - bstart += astarts[ia] * stride(B, ib) - else - nbu += 1 - end - end - - c_cstride = 0 - for ic in 1:length(Clabels) - ia = findfirst(==(Clabels[ic]), Alabels) - if !isnothing(ia) - c_cstride += stride(C, ic) - cstart += astarts[ia] * stride(C, ic) - end - end - - # strides of the uncontracted dimensions of - # B - bustride = zeros(Int, nbu) - custride = zeros(Int, nbu) - # size of the uncontracted dimensions of - # B, to be used in CartesianIndices - busize = zeros(Int, nbu) - n = 1 - for ib in 1:length(Blabels) - if Blabels[ib] > 0 - bustride[n] = stride(B, ib) - busize[n] = size(B, ib) - ic = findfirst(==(Blabels[ib]), Clabels) - custride[n] = stride(C, ic) - n += 1 - end - end - - boffset_orig = 1 - sum(strides(B)) - coffset_orig = 1 - sum(strides(C)) - cartesian_inds = CartesianIndices(Tuple(busize)) - for inds in cartesian_inds - boffset = boffset_orig - coffset = coffset_orig - for i in 1:nbu - ii = inds[i] - boffset += ii * bustride[i] - coffset += ii * custride[i] - end - c = zero(ElC) - for j in 1:DiagonalArrays.diaglength(A) - # With α == 0 && β == 1 - C[cstart + j * c_cstride + coffset] += - DiagonalArrays.getdiagindex(A, j) * B[bstart + j * b_cstride + boffset] - # XXX: not sure if this is correct - #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset] - end - end - end - end - #end # @timeit -end - -# Overspecifying types to fix ambiguity error. -function contract!( - C::ArrayStorageTensor, - Clabels, - A::ArrayStorageTensor, - Alabels, - B::Tensor{TB,NB,<:DiagonalArray{TB,NB}}, - Blabels, - α::Number=one(eltype(C)), - β::Number=zero(eltype(C)), -) where {TB,NB} - return contract!(C, Clabels, B, Blabels, A, Alabels, α, β) -end diff --git a/NDTensors/test/arraytensor/diagonalarray.jl b/NDTensors/test/arraytensor/diagonalarray.jl new file mode 100644 index 0000000000..2e12a742aa --- /dev/null +++ b/NDTensors/test/arraytensor/diagonalarray.jl @@ -0,0 +1,23 @@ +using NDTensors +using NDTensors.DiagonalArrays +using LinearAlgebra +using Test + +using NDTensors: storage, storagetype + +@testset "Tensor wrapping DiagonalArray" begin + D = DiagonalArray(randn(3), 3, 4, 5) + Dᵈ = densearray(D) + A = randn(3, 4, 5) + + for convert_to_dense in (true, false) + @test contract(D, (-1, -2, -3), A, (-1, -2, -3); convert_to_dense) ≈ contract(Dᵈ, (-1, -2, -3), A, (-1, -2, -3)) + @test contract(D, (-1, -2, 1), A, (-1, -2, 2); convert_to_dense) ≈ contract(Dᵈ, (-1, -2, 1), A, (-1, -2, 2)) + end + + # Tensor tests + Dᵗ = tensor(D, size(D)) + Dᵈᵗ = tensor(Dᵈ, size(D)) + Aᵗ = tensor(A, size(A)) + @test contract(Dᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) ≈ contract(Dᵈᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) +end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 770d98e947..5740f49501 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -551,20 +551,24 @@ function factorize_svd( Linds...; (singular_values!)=nothing, ortho="left", - svd_alg="divide_and_conquer", + svd_alg=NDTensors.default_svd_alg(A), dir=ITensors.In, - kwargs..., + mindim=NDTensors.default_mindim(A), + maxdim=nothing, + cutoff=nothing, + tags=nothing, + # Passed erroneously + # TODO: Delete + which_decomp=nothing, + eigen_perturbation=nothing, + normalize=nothing, ) leftdir, rightdir = -dir, -dir - @show A - USV = svd(A, Linds...; leftdir, rightdir, alg=svd_alg, kwargs...) + USV = svd(A, Linds...; leftdir, rightdir, alg=svd_alg, mindim, maxdim, cutoff, tags) if isnothing(USV) return nothing end U, S, V, spec, u, v = USV - @show U - @show S - @show V if ortho == "left" L, R = U, S * V elseif ortho == "right" From f2d65d2997665b228fa376e8e29ac1e95d1ac032 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 11:56:24 -0400 Subject: [PATCH 23/36] More work on kwargs --- .../src/DiagonalArrays/examples/README.jl | 6 +-- NDTensors/src/NDTensors.jl | 1 + .../arraystorage/storage/contract.jl | 2 +- .../arraystorage/arraystorage/tensor/svd.jl | 14 ------ .../diagonalarray/storage/contract.jl | 2 +- NDTensors/src/default_kwargs.jl | 6 +++ NDTensors/test/arraytensor/diagonalarray.jl | 9 ++-- src/tensor_operations/matrix_decomposition.jl | 43 ++++++++++++++----- 8 files changed, 48 insertions(+), 35 deletions(-) create mode 100644 NDTensors/src/default_kwargs.jl diff --git a/NDTensors/src/DiagonalArrays/examples/README.jl b/NDTensors/src/DiagonalArrays/examples/README.jl index 5fe06d3f23..f5e621385e 100644 --- a/NDTensors/src/DiagonalArrays/examples/README.jl +++ b/NDTensors/src/DiagonalArrays/examples/README.jl @@ -2,11 +2,7 @@ # # A Julia `DiagonalArray` type. -using NDTensors.DiagonalArrays: - DiagonalArray, - DiagIndex, - DiagIndices, - densearray +using NDTensors.DiagonalArrays: DiagonalArray, DiagIndex, DiagIndices, densearray d = DiagonalArray([1.0, 2, 3], 3, 4, 5) @show d[1, 1, 1] == 1 diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 2a96afeb3e..44de4f244d 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -46,6 +46,7 @@ include("exports.jl") ##################################### # General functionality # +include("default_kwargs.jl") include("algorithm.jl") include("aliasstyle.jl") include("abstractarray/set_types.jl") diff --git a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl index 34c3daaebb..39c9696b48 100644 --- a/NDTensors/src/arraystorage/arraystorage/storage/contract.jl +++ b/NDTensors/src/arraystorage/arraystorage/storage/contract.jl @@ -5,7 +5,7 @@ function contract( array2::MatrixOrArrayStorage, labels2, labelsR=contract_labels(labels1, labels2); - kwargs... + kwargs..., ) output_array = contraction_output(array1, labels1, array2, labels2, labelsR) contract!(output_array, labelsR, array1, labels1, array2, labels2; kwargs...) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index e19a12dca2..b224cd0893 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,10 +1,3 @@ -default_maxdim(a) = minimum(size(a)) -default_mindim(a) = true -default_cutoff(a) = zero(eltype(a)) -default_svd_alg(a) = "divide_and_conquer" -default_use_absolute_cutoff(a) = false -default_use_relative_cutoff(a) = true - # TODO: Rewrite this function to be more modern: # 1. Output `Spectrum` as a keyword argument that gets overwritten. # 2. Dispatch on `alg`. @@ -22,13 +15,6 @@ function svd( alg=default_svd_alg(T), use_absolute_cutoff=default_use_absolute_cutoff(T), use_relative_cutoff=default_use_relative_cutoff(T), - # These are getting passed erroneously. - # TODO: Make sure they don't get passed down - # to here. - ## which_decomp=nothing, - ## tags=nothing, - ## eigen_perturbation=nothing, - ## normalize=nothing, ) truncate = !isnothing(maxdim) || !isnothing(cutoff) maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim diff --git a/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl b/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl index a5c5f824eb..b86753ac20 100644 --- a/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl +++ b/NDTensors/src/arraystorage/diagonalarray/storage/contract.jl @@ -131,7 +131,7 @@ function contract!( for i in 1:min_dim c₁ += A[DiagIndex(i)] * B[DiagIndex(i)] end - C[DiagIndex(1)] = α * c₁ + β * C[DiagIndex(1)] + C[DiagIndex(1)] = α * c₁ + β * C[DiagIndex(1)] else # not all indices are summed over, set the diagonals of the result # to the product of the diagonals of A and B diff --git a/NDTensors/src/default_kwargs.jl b/NDTensors/src/default_kwargs.jl new file mode 100644 index 0000000000..54561c9f9b --- /dev/null +++ b/NDTensors/src/default_kwargs.jl @@ -0,0 +1,6 @@ +default_maxdim(a) = minimum(size(a)) +default_mindim(a) = true +default_cutoff(a) = zero(eltype(a)) +default_svd_alg(a) = "divide_and_conquer" +default_use_absolute_cutoff(a) = false +default_use_relative_cutoff(a) = true diff --git a/NDTensors/test/arraytensor/diagonalarray.jl b/NDTensors/test/arraytensor/diagonalarray.jl index 2e12a742aa..70c0029dfc 100644 --- a/NDTensors/test/arraytensor/diagonalarray.jl +++ b/NDTensors/test/arraytensor/diagonalarray.jl @@ -11,13 +11,16 @@ using NDTensors: storage, storagetype A = randn(3, 4, 5) for convert_to_dense in (true, false) - @test contract(D, (-1, -2, -3), A, (-1, -2, -3); convert_to_dense) ≈ contract(Dᵈ, (-1, -2, -3), A, (-1, -2, -3)) - @test contract(D, (-1, -2, 1), A, (-1, -2, 2); convert_to_dense) ≈ contract(Dᵈ, (-1, -2, 1), A, (-1, -2, 2)) + @test contract(D, (-1, -2, -3), A, (-1, -2, -3); convert_to_dense) ≈ + contract(Dᵈ, (-1, -2, -3), A, (-1, -2, -3)) + @test contract(D, (-1, -2, 1), A, (-1, -2, 2); convert_to_dense) ≈ + contract(Dᵈ, (-1, -2, 1), A, (-1, -2, 2)) end # Tensor tests Dᵗ = tensor(D, size(D)) Dᵈᵗ = tensor(Dᵈ, size(D)) Aᵗ = tensor(A, size(A)) - @test contract(Dᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) ≈ contract(Dᵈᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) + @test contract(Dᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) ≈ + contract(Dᵈᵗ, (-1, -2, -3), Aᵗ, (-1, -2, -3)) end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 5740f49501..ee75c70e8f 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -106,15 +106,23 @@ Utrunc2, Strunc2, Vtrunc2 = svd(A, i, k; cutoff=1e-10); See also: [`factorize`](@ref), [`eigen`](@ref) """ -function svd(A::ITensor, Linds...; leftdir=nothing, rightdir=nothing, kwargs...) - utags::TagSet = get(kwargs, :lefttags, get(kwargs, :utags, "Link,u")) - vtags::TagSet = get(kwargs, :righttags, get(kwargs, :vtags, "Link,v")) - - # Keyword argument deprecations - #if haskey(kwargs, :utags) || haskey(kwargs, :vtags) - # @warn "Keyword arguments `utags` and `vtags` are deprecated in favor of `leftags` and `righttags`." - #end - +function svd( + A::ITensor, + Linds...; + leftdir=nothing, + rightdir=nothing, + lefttags="Link,u", + righttags="Link,v", + mindim=NDTensors.default_mindim(A), + maxdim=nothing, + cutoff=nothing, + alg=NDTensors.default_svd_alg(A), + use_absolute_cutoff=NDTensors.default_use_absolute_cutoff(A), + use_relative_cutoff=NDTensors.default_use_relative_cutoff(A), + # Deprecated + utags=lefttags, + vtags=righttags, +) Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) @@ -142,7 +150,9 @@ function svd(A::ITensor, Linds...; leftdir=nothing, rightdir=nothing, kwargs...) AC = permute(AC, cL, cR) end - USVT = svd(tensor(AC); kwargs...) + USVT = svd( + tensor(AC); mindim, maxdim, cutoff, alg, use_absolute_cutoff, use_relative_cutoff + ) if isnothing(USVT) return nothing end @@ -564,7 +574,18 @@ function factorize_svd( normalize=nothing, ) leftdir, rightdir = -dir, -dir - USV = svd(A, Linds...; leftdir, rightdir, alg=svd_alg, mindim, maxdim, cutoff, tags) + USV = svd( + A, + Linds...; + leftdir, + rightdir, + alg=svd_alg, + mindim, + maxdim, + cutoff, + lefttags=tags, + righttags=tags, + ) if isnothing(USV) return nothing end From 38b32a695ff8664b3840385dc63b65fa734a3c40 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 12:03:49 -0400 Subject: [PATCH 24/36] Fix SVD --- NDTensors/src/linearalgebra/linearalgebra.jl | 44 ++++++-------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 20dec6b3a9..ae11b1eaff 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -93,36 +93,18 @@ end svd of an order-2 DenseTensor """ -function svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) - - # - # Keyword argument deprecations - # - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - if haskey(kwargs, :fastsvd) || haskey(kwargs, :fastSVD) - error( - "In svd, fastsvd/fastSVD keyword arguments are removed in favor of alg, see documentation for more details.", - ) - end - - maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) - mindim::Int = get(kwargs, :mindim, 1) - cutoff = get(kwargs, :cutoff, 0.0) - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) - alg::String = get(kwargs, :alg, "divide_and_conquer") +function svd( + T::DenseTensor{ElT,2,IndsT}; + mindim=default_mindim(T), + maxdim=nothing, + cutoff=nothing, + alg=default_svd_alg(T), + use_absolute_cutoff=default_use_absolute_cutoff(T), + use_relative_cutoff=default_use_relative_cutoff(T), +) where {ElT,IndsT} + truncate = !isnothing(maxdim) || !isnothing(cutoff) + maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim + cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff #@timeit_debug timer "dense svd" begin if alg == "divide_and_conquer" @@ -168,7 +150,7 @@ function svd(T::DenseTensor{ElT,2,IndsT}; kwargs...) where {ElT,IndsT} P = MS .^ 2 if truncate P, truncerr, _ = truncate!!( - P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) else truncerr = 0.0 From 9c4ce2cdc6e11d8d69416a28a991d10333fc9e2c Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 12:59:55 -0400 Subject: [PATCH 25/36] More kwarg refactoring --- NDTensors/src/blocksparse/linearalgebra.jl | 25 ++-- NDTensors/src/default_kwargs.jl | 3 + NDTensors/src/linearalgebra/linearalgebra.jl | 84 ++++-------- NDTensors/src/truncate.jl | 53 +++----- src/tensor_operations/matrix_decomposition.jl | 126 +++++++++++------- 5 files changed, 147 insertions(+), 144 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 70bf5c7972..ea02922a57 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -34,12 +34,19 @@ per row/column, otherwise it fails. This assumption makes it so the result can be computed from the dense svds of seperate blocks. """ -function svd(T::Tensor{ElT,2,<:BlockSparse}; kwargs...) where {ElT} - alg::String = get(kwargs, :alg, "divide_and_conquer") - min_blockdim::Int = get(kwargs, :min_blockdim, 0) - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) - - #@timeit_debug timer "block sparse svd" begin +function svd( + T::Tensor{ElT,2,<:BlockSparse}; + mindim=default_mindim(T), + maxdim=nothing, + cutoff=nothing, + alg=default_svd_alg(T), + use_absolute_cutoff=default_use_absolute_cutoff(T), + use_relative_cutoff=default_use_relative_cutoff(T), + min_blockdim=0, +) where {ElT} + truncate = !isnothing(maxdim) || !isnothing(cutoff) + maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim + cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) @@ -50,7 +57,7 @@ function svd(T::Tensor{ElT,2,<:BlockSparse}; kwargs...) where {ElT} for (n, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - USVb = svd(blockT; alg=alg) + USVb = svd(blockT; alg) if isnothing(USVb) return nothing end @@ -78,7 +85,9 @@ function svd(T::Tensor{ElT,2,<:BlockSparse}; kwargs...) where {ElT} dropblocks = Int[] if truncate - d, truncerr, docut = truncate!!(d; kwargs...) + d, truncerr, docut = truncate!!( + d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) for n in 1:nnzblocks(T) blockdim = _truncated_blockdim( Ss[n], docut; min_blockdim, singular_values=true, truncate diff --git a/NDTensors/src/default_kwargs.jl b/NDTensors/src/default_kwargs.jl index 54561c9f9b..73d8756d84 100644 --- a/NDTensors/src/default_kwargs.jl +++ b/NDTensors/src/default_kwargs.jl @@ -1,3 +1,6 @@ +replace_nothing(::Nothing, replacement) = replacement +replace_nothing(value, replacement) = value + default_maxdim(a) = minimum(size(a)) default_mindim(a) = true default_cutoff(a) = zero(eltype(a)) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index ae11b1eaff..8a01868f6f 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -95,18 +95,16 @@ svd of an order-2 DenseTensor """ function svd( T::DenseTensor{ElT,2,IndsT}; - mindim=default_mindim(T), + mindim=nothing, maxdim=nothing, cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, alg=default_svd_alg(T), - use_absolute_cutoff=default_use_absolute_cutoff(T), - use_relative_cutoff=default_use_relative_cutoff(T), + # Only used by BlockSparse svd + min_blockdim=0, ) where {ElT,IndsT} truncate = !isnothing(maxdim) || !isnothing(cutoff) - maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim - cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff - - #@timeit_debug timer "dense svd" begin if alg == "divide_and_conquer" MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) if isnothing(MUSV) @@ -178,27 +176,14 @@ function svd( end function eigen( - T::Hermitian{ElT,<:DenseTensor{ElT,2,IndsT}}; kwargs... + T::Hermitian{ElT,<:DenseTensor{ElT,2,IndsT}}; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex},IndsT} - # Keyword argument deprecations - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) - maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) - mindim::Int = get(kwargs, :mindim, 1) - cutoff::Union{Nothing,Float64} = get(kwargs, :cutoff, 0.0) - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) - + truncate = !isnothing(maxdim) || !isnothing(cutoff) matrixT = matrix(T) ## TODO Here I am calling parent to ensure that the correct `any` function ## is envoked for non-cpu matrices @@ -220,7 +205,7 @@ function eigen( if truncate DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) dD = length(DM) if dD < size(VM, 2) @@ -298,27 +283,14 @@ random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(El random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) function eigen( - T::DenseTensor{ElT,2,IndsT}; kwargs... + T::DenseTensor{ElT,2,IndsT}; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex},IndsT} - # Keyword argument deprecations - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In svd, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In svd, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) - maxdim::Int = get(kwargs, :maxdim, minimum(dims(T))) - mindim::Int = get(kwargs, :mindim, 1) - cutoff::Float64 = get(kwargs, :cutoff, 0.0) - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) - + truncate = !isnothing(maxdim) || !isnothing(cutoff) matrixT = matrix(T) if any(!isfinite, matrixT) throw( @@ -337,7 +309,7 @@ function eigen( if truncate DM, truncerr, _ = truncate!!( - DM; maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff, kwargs... + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) dD = length(DM) if dD < size(VM, 2) @@ -362,22 +334,22 @@ function eigen( end # NDTensors.qr -function qr(T::DenseTensor{<:Any,2}; positive=false, kwargs...) +function qr(T::DenseTensor{<:Any,2}; positive=false) qxf = positive ? qr_positive : qr - return qx(qxf, T; kwargs...) + return qx(qxf, T) end # NDTensors.ql -function ql(T::DenseTensor{<:Any,2}; positive=false, kwargs...) +function ql(T::DenseTensor{<:Any,2}; positive=false) qxf = positive ? ql_positive : ql - return qx(qxf, T; kwargs...) + return qx(qxf, T) end # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}; kwargs...) +function qx(qx::Function, T::DenseTensor{<:Any,2}) QM, XM = qx(matrix(T)) # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix # It gets converted to matrix below. @@ -465,7 +437,7 @@ end # Lapack replaces A with Q & R carefully packed together. So here we just copy a # before letting lapack overwirte it. # -function ql(A::AbstractMatrix; kwargs...) +function ql(A::AbstractMatrix) Base.require_one_based_indexing(A) T = eltype(A) AA = similar(A, LinearAlgebra._qreltype(T), size(A)) @@ -475,7 +447,7 @@ function ql(A::AbstractMatrix; kwargs...) cutype = leaf_parenttype(AA) AA = NDTensors.cpu(AA) end - Q, L = ql!(AA; kwargs...) + Q, L = ql!(AA) if iscuda Q = adapt(cutype, Q) L = adapt(cutype, L) diff --git a/NDTensors/src/truncate.jl b/NDTensors/src/truncate.jl index 829c351127..f3aa35069a 100644 --- a/NDTensors/src/truncate.jl +++ b/NDTensors/src/truncate.jl @@ -17,31 +17,22 @@ function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...) end # CPU implementation. -function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) where {ElT} - if isnothing(cutoff) - cutoff = typemin(ElT) - end - - # Keyword argument deprecations - use_absolute_cutoff = false - if haskey(kwargs, :absoluteCutoff) - @warn "In truncate!, keyword argument absoluteCutoff is deprecated in favor of use_absolute_cutoff" - use_absolute_cutoff = get(kwargs, :absoluteCutoff, use_absolute_cutoff) - end - use_relative_cutoff = true - if haskey(kwargs, :doRelCutoff) - @warn "In truncate!, keyword argument doRelCutoff is deprecated in favor of use_relative_cutoff" - use_relative_cutoff = get(kwargs, :doRelCutoff, use_relative_cutoff) - end - - maxdim::Int = min(get(kwargs, :maxdim, length(P)), length(P)) - mindim::Int = max(get(kwargs, :mindim, 1), 1) - - use_absolute_cutoff::Bool = get(kwargs, :use_absolute_cutoff, use_absolute_cutoff) - use_relative_cutoff::Bool = get(kwargs, :use_relative_cutoff, use_relative_cutoff) +function truncate!( + P::AbstractVector; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, +) + mindim = replace_nothing(maxdim, default_mindim(P)) + maxdim = replace_nothing(maxdim, length(P)) + cutoff = replace_nothing(cutoff, typemin(eltype(P))) + use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P)) + use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P)) origm = length(P) - docut = zero(ElT) + docut = zero(eltype(P)) #if P[1] <= 0.0 # P[1] = 0.0 @@ -51,7 +42,7 @@ function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) wh if origm == 1 docut = abs(P[1]) / 2 - return zero(ElT), docut + return zero(eltype(P)), docut end s = sign(P[1]) @@ -59,12 +50,12 @@ function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) wh #Zero out any negative weight for n in origm:-1:1 - (P[n] >= zero(ElT)) && break - P[n] = zero(ElT) + (P[n] >= zero(eltype(P))) && break + P[n] = zero(eltype(P)) end n = origm - truncerr = zero(ElT) + truncerr = zero(eltype(P)) while n > maxdim truncerr += P[n] n -= 1 @@ -78,10 +69,10 @@ function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) wh n -= 1 end else - scale = one(ElT) + scale = one(eltype(P)) if use_relative_cutoff scale = sum(P) - (scale == zero(ElT)) && (scale = one(ElT)) + (scale == zero(eltype(P))) && (scale = one(eltype(P))) end #Continue truncating until *sum* of discarded probability @@ -100,8 +91,8 @@ function truncate!(P::AbstractVector{ElT}; cutoff=zero(eltype(P)), kwargs...) wh if n < origm docut = (P[n] + P[n + 1]) / 2 - if abs(P[n] - P[n + 1]) < ElT(1e-3) * P[n] - docut += ElT(1e-3) * P[n] + if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n] + docut += eltype(P)(1e-3) * P[n] end end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index ee75c70e8f..ee55b708e7 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -119,6 +119,7 @@ function svd( alg=NDTensors.default_svd_alg(A), use_absolute_cutoff=NDTensors.default_use_absolute_cutoff(A), use_relative_cutoff=NDTensors.default_use_relative_cutoff(A), + min_blockdim=0, # Deprecated utags=lefttags, vtags=righttags, @@ -151,7 +152,14 @@ function svd( end USVT = svd( - tensor(AC); mindim, maxdim, cutoff, alg, use_absolute_cutoff, use_relative_cutoff + tensor(AC); + mindim, + maxdim, + cutoff, + alg, + use_absolute_cutoff, + use_relative_cutoff, + min_blockdim, ) if isnothing(USVT) return nothing @@ -272,7 +280,23 @@ A * U ≈ Ul * D # true See also: [`svd`](@ref), [`factorize`](@ref) """ -function eigen(A::ITensor, Linds, Rinds; kwargs...) +function eigen( + A::ITensor, + Linds, + Rinds; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + ishermitian=false, + tags="Link,eigen", + lefttags=tags, + righttags=tags, + plev=0, + leftplev=plev, + rightplev=plev, +) @debug_check begin if hasqns(A) @assert flux(A) == QN() @@ -286,16 +310,6 @@ function eigen(A::ITensor, Linds, Rinds; kwargs...) N != NL + NR && error("Number of left and right indices must add up to total number of indices") - ishermitian::Bool = get(kwargs, :ishermitian, false) - - tags::TagSet = get(kwargs, :tags, "Link,eigen") - lefttags::TagSet = get(kwargs, :lefttags, tags) - righttags::TagSet = get(kwargs, :righttags, tags) - - plev::Int = get(kwargs, :plev, 0) - leftplev::Int = get(kwargs, :leftplev, plev) - rightplev::Int = get(kwargs, :rightplev, plev) - if lefttags == righttags && leftplev == rightplev leftplev = rightplev + 1 end @@ -345,7 +359,7 @@ function eigen(A::ITensor, Linds, Rinds; kwargs...) AT = ishermitian ? Hermitian(tensor(AC)) : tensor(AC) - DT, VT, spec = eigen(AT; kwargs...) + DT, VT, spec = eigen(AT; mindim, maxdim, cutoff) D, VC = itensor(DT), itensor(VT) V = VC * dag(CR) @@ -455,7 +469,9 @@ end # # Generic function implementing both qr and ql decomposition. The X tensor = R or L. # -function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) +function qx( + qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qx", positive=false +) # Strip out any extra indices that are not in A. # Unit test test/base/test_itensor.jl line 1469 will fail without this. Linds = commoninds(A, Linds) @@ -478,7 +494,7 @@ function qx(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwar # AC = permute(AC, cL, cR; allow_alias=true) - QT, XT = qx(tensor(AC); kwargs...) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. + QT, XT = qx(tensor(AC); positive) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented. # # Undo the combine oepration, to recover all tensor indices. # @@ -501,8 +517,10 @@ end # Generic function implementing both rq and lq decomposition. Implemented using qr/ql # with swapping the left and right indices. The X tensor = R or L. # -function xq(qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags, kwargs...) - Q, X, q = qx(A, Rinds, Linds; kwargs...) +function xq( + qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,xq", positive=false +) + Q, X, q = qx(A, Rinds, Linds; positive) # # fix up the tag name for the index between Q and L. # @@ -517,8 +535,8 @@ polar(A::ITensor; kwargs...) = error(noinds_error_message("polar")) # TODO: allow custom tags in internal indices? # TODO: return the new common indices? -function polar(A::ITensor, Linds...; kwargs...) - U, S, V = svd(A, Linds...; kwargs...) +function polar(A::ITensor, Linds...) + U, S, V = svd(A, Linds...) u = commoninds(S, U) v = commoninds(S, V) δᵤᵥ′ = δ(u..., v'...) @@ -527,13 +545,12 @@ function polar(A::ITensor, Linds...; kwargs...) return Q, P, commoninds(Q, P) end -function factorize_qr(A::ITensor, Linds...; kwargs...) - ortho::String = get(kwargs, :ortho, "left") +function factorize_qr(A::ITensor, Linds...; ortho="left") if ortho == "left" - L, R, q = qr(A, Linds...; kwargs...) + L, R, q = qr(A, Linds...) elseif ortho == "right" Lis = uniqueinds(A, indices(Linds...)) - R, L, q = qr(A, Lis...; kwargs...) + R, L, q = qr(A, Lis...) else error("In factorize using qr decomposition, ortho keyword $ortho not supported. Supported options are left or right.") @@ -561,17 +578,12 @@ function factorize_svd( Linds...; (singular_values!)=nothing, ortho="left", - svd_alg=NDTensors.default_svd_alg(A), + alg=NDTensors.default_svd_alg(A), dir=ITensors.In, mindim=NDTensors.default_mindim(A), maxdim=nothing, cutoff=nothing, tags=nothing, - # Passed erroneously - # TODO: Delete - which_decomp=nothing, - eigen_perturbation=nothing, - normalize=nothing, ) leftdir, rightdir = -dir, -dir USV = svd( @@ -579,7 +591,7 @@ function factorize_svd( Linds...; leftdir, rightdir, - alg=svd_alg, + alg, mindim, maxdim, cutoff, @@ -608,9 +620,17 @@ function factorize_svd( return L, R, spec end -function factorize_eigen(A::ITensor, Linds...; kwargs...) - ortho::String = get(kwargs, :ortho, "left") - delta_A2 = get(kwargs, :eigen_perturbation, nothing) +function factorize_eigen( + A::ITensor, + Linds...; + ortho="left", + eigen_perturbation=nothing, + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + tags=nothing, +) + delta_A2 = eigen_perturbation if ortho == "left" Lis = commoninds(A, indices(Linds...)) elseif ortho == "right" @@ -628,7 +648,7 @@ function factorize_eigen(A::ITensor, Linds...; kwargs...) noprime!(delta_A2) A2 += delta_A2 end - F = eigen(A2, Lis, simLis; ishermitian=true, kwargs...) + F = eigen(A2, Lis, simLis; ishermitian=true, mindim, maxdim, cutoff, tags) D, _, spec = F L = F.Vt R = dag(L) * A @@ -683,13 +703,24 @@ Perform a factorization of `A` into ITensors `L` and `R` such that `A ≈ L * R` For truncation arguments, see: [`svd`](@ref) """ -function factorize(A::ITensor, Linds...; maxdim=nothing, kwargs...) - ortho::String = get(kwargs, :ortho, "left") - tags::TagSet = get(kwargs, :tags, "Link,fact") - plev::Int = get(kwargs, :plev, 0) - which_decomp::Union{String,Nothing} = get(kwargs, :which_decomp, nothing) - cutoff = get(kwargs, :cutoff, nothing) - eigen_perturbation = get(kwargs, :eigen_perturbation, nothing) +function factorize( + A::ITensor, + Linds...; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + ortho="left", + tags="Link,fact", + plev=0, + which_decomp=nothing, + # eigen + eigen_perturbation=nothing, + # svd + svd_alg=NDTensors.default_svd_alg(A), + use_absolute_cutoff=NDTensors.default_use_absolute_cutoff(A), + use_relative_cutoff=NDTensors.default_use_relative_cutoff(A), + min_blockdim=0, +) if !isnothing(eigen_perturbation) if !(isnothing(which_decomp) || which_decomp == "eigen") error("""when passing a non-trivial eigen_perturbation to `factorize`, @@ -699,11 +730,6 @@ function factorize(A::ITensor, Linds...; maxdim=nothing, kwargs...) which_decomp = "eigen" end - if haskey(kwargs, :which_factorization) - error("""which_factorization keyword in factorize has - been replace by which_decomp.""") - end - # Determines when to use eigen vs. svd (eigen is less precise, # so eigen should only be used if a larger cutoff is requested) automatic_cutoff = 1e-12 @@ -728,15 +754,17 @@ function factorize(A::ITensor, Linds...; maxdim=nothing, kwargs...) end if which_decomp == "svd" - LR = factorize_svd(A, Linds...; kwargs..., maxdim=maxdim) + LR = factorize_svd( + A, Linds...; mindim, maxdim, cutoff, tags, ortho, alg=svd_alg, dir, singular_values! + ) if isnothing(LR) return nothing end L, R, spec = LR elseif which_decomp == "eigen" - L, R, spec = factorize_eigen(A, Linds...; kwargs..., maxdim=maxdim) + L, R, spec = factorize_eigen(A, Linds...; mindim, maxdim, cutoff, tags, ortho) elseif which_decomp == "qr" - L, R = factorize_qr(A, Linds...; kwargs...) + L, R = factorize_qr(A, Linds...; ortho, tags) spec = Spectrum(nothing, 0.0) else throw(ArgumentError("""In factorize, factorization $which_decomp is not From 34b229b276af5874fe37ef0e2491bf54305f08a9 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 14:53:25 -0400 Subject: [PATCH 26/36] Fix issues with new kwarg design --- .../arraystorage/arraystorage/tensor/qr.jl | 5 +- .../arraystorage/arraystorage/tensor/svd.jl | 18 +++-- NDTensors/src/linearalgebra/linearalgebra.jl | 7 +- src/mps/mps.jl | 39 ++++++----- src/tensor_operations/matrix_decomposition.jl | 65 +++++++++++-------- 5 files changed, 83 insertions(+), 51 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl index 04ae4501fc..c99d026f0f 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl @@ -1,4 +1,7 @@ -function qr(A::ArrayStorageTensor) +function qr(A::ArrayStorageTensor; positive=false) + if positive + error("Not implemented") + end Q, R = qr(storage(A)) Q = convert(typeof(R), Q) i, j = inds(A) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index b224cd0893..46e9ea8e42 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -9,16 +9,22 @@ svd of an order-2 DenseTensor """ function svd( T::ArrayStorageTensor; - mindim=default_mindim(T), + mindim=nothing, maxdim=nothing, cutoff=nothing, - alg=default_svd_alg(T), - use_absolute_cutoff=default_use_absolute_cutoff(T), - use_relative_cutoff=default_use_relative_cutoff(T), + alg=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + # Only used by BlockSparse svd + min_blockdim=nothing, ) truncate = !isnothing(maxdim) || !isnothing(cutoff) - maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim - cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff + mindim = replace_nothing(mindim, default_mindim(T)) + maxdim = replace_nothing(maxdim, default_maxdim(T)) + cutoff = replace_nothing(cutoff, default_cutoff(T)) + use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(T)) + use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(T)) + alg = replace_nothing(alg, default_svd_alg(T)) # TODO: Dispatch on `Algorithm(alg)`. if alg == "divide_and_conquer" diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 8a01868f6f..803772fb8e 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -100,11 +100,12 @@ function svd( cutoff=nothing, use_absolute_cutoff=nothing, use_relative_cutoff=nothing, - alg=default_svd_alg(T), + alg=nothing, # Only used by BlockSparse svd - min_blockdim=0, + min_blockdim=nothing, ) where {ElT,IndsT} truncate = !isnothing(maxdim) || !isnothing(cutoff) + alg = replace_nothing(alg, default_svd_alg(T)) if alg == "divide_and_conquer" MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) if isnothing(MUSV) @@ -127,7 +128,7 @@ function svd( elseif alg == "recursive" MUSV = svd_recursive(matrix(T)) elseif alg == "QRAlgorithm" || alg == "JacobiAlgorithm" - MUSV = svd_catch_error(matrix(T); alg=alg) + MUSV = svd_catch_error(matrix(T); alg) else error( "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", diff --git a/src/mps/mps.jl b/src/mps/mps.jl index 2a19b6c1e0..fd2e876715 100644 --- a/src/mps/mps.jl +++ b/src/mps/mps.jl @@ -527,20 +527,21 @@ Factorize the ITensor `phi` and replace the ITensors `b` and `b+1` of MPS `M` with the factors. Choose the orthogonality with `ortho="left"/"right"`. """ -function replacebond!(M::MPS, b::Int, phi::ITensor; kwargs...) - ortho::String = get(kwargs, :ortho, "left") - swapsites::Bool = get(kwargs, :swapsites, false) - which_decomp::Union{String,Nothing} = get(kwargs, :which_decomp, nothing) - normalize::Bool = get(kwargs, :normalize, false) - - # Deprecated keywords - if haskey(kwargs, :dir) - error( - """dir keyword in replacebond! has been replaced by ortho. - Note that the options are now the same as factorize, so use `left` instead of `fromleft` and `right` instead of `fromright`.""", - ) - end - +function replacebond!( + M::MPS, + b::Int, + phi::ITensor; + normalize=false, + swapsites=false, + ortho="left", + # Decomposition kwargs + which_decomp=nothing, + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + eigen_perturbation=nothing, + svd_alg=nothing, +) indsMb = inds(M[b]) if swapsites sb = siteind(M, b) @@ -548,7 +549,15 @@ function replacebond!(M::MPS, b::Int, phi::ITensor; kwargs...) indsMb = replaceind(indsMb, sb, sbp1) end L, R, spec = factorize( - phi, indsMb; which_decomp=which_decomp, tags=tags(linkind(M, b)), kwargs... + phi, + indsMb; + mindim, + maxdim, + cutoff, + which_decomp, + eigen_perturbation, + svd_alg, + tags=tags(linkind(M, b)), ) M[b] = L M[b + 1] = R diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index ee55b708e7..700a543a6f 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -111,19 +111,26 @@ function svd( Linds...; leftdir=nothing, rightdir=nothing, - lefttags="Link,u", - righttags="Link,v", - mindim=NDTensors.default_mindim(A), + lefttags=nothing, + righttags=nothing, + mindim=nothing, maxdim=nothing, cutoff=nothing, - alg=NDTensors.default_svd_alg(A), - use_absolute_cutoff=NDTensors.default_use_absolute_cutoff(A), - use_relative_cutoff=NDTensors.default_use_relative_cutoff(A), - min_blockdim=0, + alg=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + min_blockdim=nothing, # Deprecated - utags=lefttags, - vtags=righttags, + utags=nothing, + vtags=nothing, ) + lefttags = NDTensors.replace_nothing(lefttags, ts"Link,u") + righttags = NDTensors.replace_nothing(righttags, ts"Link,v") + + # Deprecated + utags = lefttags + vtags = righttags + Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) @@ -173,10 +180,10 @@ function svd( U = UC * dag(CL) V = VC * dag(CR) - settags!(U, utags, u) - settags!(S, utags, u) - settags!(S, vtags, v) - settags!(V, vtags, v) + U = settags(U, utags, u) + S = settags(S, utags, u) + S = settags(S, vtags, v) + V = settags(V, vtags, v) u = settags(u, utags) v = settags(v, vtags) @@ -545,12 +552,12 @@ function polar(A::ITensor, Linds...) return Q, P, commoninds(Q, P) end -function factorize_qr(A::ITensor, Linds...; ortho="left") +function factorize_qr(A::ITensor, Linds...; ortho="left", tags=nothing, positive=false) if ortho == "left" - L, R, q = qr(A, Linds...) + L, R, q = qr(A, Linds...; tags, positive) elseif ortho == "right" Lis = uniqueinds(A, indices(Linds...)) - R, L, q = qr(A, Lis...) + R, L, q = qr(A, Lis...; tags, positive) else error("In factorize using qr decomposition, ortho keyword $ortho not supported. Supported options are left or right.") @@ -579,12 +586,13 @@ function factorize_svd( (singular_values!)=nothing, ortho="left", alg=NDTensors.default_svd_alg(A), - dir=ITensors.In, + dir=nothing, mindim=NDTensors.default_mindim(A), maxdim=nothing, cutoff=nothing, tags=nothing, ) + dir = NDTensors.replace_nothing(dir, ITensors.In) leftdir, rightdir = -dir, -dir USV = svd( A, @@ -709,17 +717,19 @@ function factorize( mindim=nothing, maxdim=nothing, cutoff=nothing, - ortho="left", - tags="Link,fact", - plev=0, + ortho=nothing, + tags=nothing, + plev=nothing, which_decomp=nothing, # eigen eigen_perturbation=nothing, # svd - svd_alg=NDTensors.default_svd_alg(A), - use_absolute_cutoff=NDTensors.default_use_absolute_cutoff(A), - use_relative_cutoff=NDTensors.default_use_relative_cutoff(A), - min_blockdim=0, + svd_alg=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + min_blockdim=nothing, + (singular_values!)=nothing, + dir=nothing, ) if !isnothing(eigen_perturbation) if !(isnothing(which_decomp) || which_decomp == "eigen") @@ -729,6 +739,9 @@ function factorize( end which_decomp = "eigen" end + ortho = NDTensors.replace_nothing(ortho, "left") + tags = NDTensors.replace_nothing(tags, ts"Link,fact") + plev = NDTensors.replace_nothing(plev, 0) # Determines when to use eigen vs. svd (eigen is less precise, # so eigen should only be used if a larger cutoff is requested) @@ -774,8 +787,8 @@ function factorize( # Set the tags and prime level l = commonind(L, R) l̃ = setprime(settags(l, tags), plev) - replaceind!(L, l, l̃) - replaceind!(R, l, l̃) + L = replaceind(L, l, l̃) + R = replaceind(R, l, l̃) l = l̃ return L, R, spec, l From 766f945f94e08c8b80d8eb227b7150a0638d8bf1 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 17:23:21 -0400 Subject: [PATCH 27/36] Refactor SVD code --- .../arraystorage/arraystorage/tensor/qr.jl | 4 +- .../arraystorage/arraystorage/tensor/svd.jl | 141 +++++++++++------- src/mps/abstractmps.jl | 6 +- 3 files changed, 87 insertions(+), 64 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl index c99d026f0f..7ee216341f 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/qr.jl @@ -1,7 +1,5 @@ function qr(A::ArrayStorageTensor; positive=false) - if positive - error("Not implemented") - end + positive && error("Not implemented") Q, R = qr(storage(A)) Q = convert(typeof(R), Q) i, j = inds(A) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index 46e9ea8e42..1d93c2f1d1 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -1,14 +1,47 @@ -# TODO: Rewrite this function to be more modern: -# 1. Output `Spectrum` as a keyword argument that gets overwritten. -# 2. Dispatch on `alg`. -# 3. Make this into two layers, one that handles indices and one that works with `Matrix`. +backup_svd_alg(::Algorithm"divide_and_conquer") = Algorithm"qr_iteration"() +backup_svd_alg(::Algorithm"qr_iteration") = Algorithm"recursive"() + +function svd(alg::Algorithm"divide_and_conquer", a::ArrayStorage) + USV = svd_catch_error(a; alg=LinearAlgebra.DivideAndConquer()) + if isnothing(USV) + return svd(backup_svd_alg(alg), a) + end + return USV +end + +function svd(alg::Algorithm"qr_iteration", a::ArrayStorage) + USV = svd_catch_error(a; alg=LinearAlgebra.QRIteration()) + if isnothing(USV) + return svd(backup_svd_alg(alg), a) + end + return USV +end + +function svd(alg::Algorithm"recursive", a::ArrayStorage) + return svd_recursive(a) +end + +function svd(::Algorithm"QRAlgorithm", a::ArrayStorage) + return error("Not implemented yet") +end + +function svd(::Algorithm"JacobiAlgorithm", a::ArrayStorage) + return error("Not implemented yet") +end + +function svd(alg::Algorithm, a::ArrayStorage) + return error( + "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", + ) +end + """ - svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) + tsvd(a::ArrayStorage{<:Number,2}; kwargs...) svd of an order-2 DenseTensor """ -function svd( - T::ArrayStorageTensor; +function tsvd( + a::ArrayStorage; mindim=nothing, maxdim=nothing, cutoff=nothing, @@ -18,56 +51,22 @@ function svd( # Only used by BlockSparse svd min_blockdim=nothing, ) - truncate = !isnothing(maxdim) || !isnothing(cutoff) - mindim = replace_nothing(mindim, default_mindim(T)) - maxdim = replace_nothing(maxdim, default_maxdim(T)) - cutoff = replace_nothing(cutoff, default_cutoff(T)) - use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(T)) - use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(T)) - alg = replace_nothing(alg, default_svd_alg(T)) - - # TODO: Dispatch on `Algorithm(alg)`. - if alg == "divide_and_conquer" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) - if isnothing(MUSV) - # If "divide_and_conquer" fails, try "qr_iteration" - alg = "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) - if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" - MUSV = svd_recursive(matrix(T)) - end - end - elseif alg == "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) - if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" - MUSV = svd_recursive(matrix(T)) - end - elseif alg == "recursive" - MUSV = svd_recursive(matrix(T)) - elseif alg == "QRAlgorithm" || alg == "JacobiAlgorithm" - MUSV = svd_catch_error(matrix(T); alg=alg) - else - error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) - end - if isnothing(MUSV) - if any(isnan, T) + alg = replace_nothing(alg, default_svd_alg(a)) + USV = svd(Algorithm(alg), a) + if isnothing(USV) + if any(isnan, a) println("SVD failed, the matrix you were trying to SVD contains NaNs.") else println(lapack_svd_error_message(alg)) end return nothing end - MU, MS, MV = MUSV - conj!(MV) - P = MS .^ 2 - if truncate + U, S, V = USV + conj!(V) + + P = S .^ 2 + if !isnothing(maxdim) || !isnothing(cutoff) P, truncerr, _ = truncate!!( P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) @@ -76,25 +75,51 @@ function svd( end spec = Spectrum(P, truncerr) dS = length(P) - if dS < length(MS) - MU = MU[:, 1:dS] + if dS < length(S) + U = U[:, 1:dS] # Fails on some GPU backends like Metal. # resize!(MS, dS) - MS = MS[1:dS] - MV = MV[:, 1:dS] + S = S[1:dS] + V = V[:, 1:dS] end + return U, DiagonalMatrix(S), V, spec +end +# TODO: Rewrite this function to be more modern: +# 1. Output `Spectrum` as a keyword argument that gets overwritten. +# 2. Dispatch on `alg`. +# 3. Make this into two layers, one that handles indices and one that works with `Matrix`. +""" + svd(T::ArrayStorageTensor{<:Number,2}; kwargs...) + +svd of an order-2 DenseTensor +""" +function svd( + T::ArrayStorageTensor; + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + alg=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, + # Only used by BlockSparse svd + min_blockdim=nothing, +) + U, S, V, spec = tsvd( + storage(T); mindim, maxdim, cutoff, alg, use_absolute_cutoff, use_relative_cutoff + ) # Make the new indices to go onto U and V # TODO: Put in a separate function, such as # `rewrap_inds` or something like that. + dS = length(S[DiagIndices()]) indstype = typeof(inds(T)) u = eltype(indstype)(dS) v = eltype(indstype)(dS) Uinds = indstype((ind(T, 1), u)) Sinds = indstype((u, v)) Vinds = indstype((ind(T, 2), v)) - U = tensor(MU, Uinds) - S = tensor(DiagonalMatrix(MS), Sinds) - V = tensor(MV, Vinds) - return U, S, V, spec + TU = tensor(U, Uinds) + TS = tensor(S, Sinds) + TV = tensor(V, Vinds) + return TU, TS, TV, spec end diff --git a/src/mps/abstractmps.jl b/src/mps/abstractmps.jl index a6309e7d2f..57677138a0 100644 --- a/src/mps/abstractmps.jl +++ b/src/mps/abstractmps.jl @@ -1592,7 +1592,7 @@ bond indices is performed. Afterward, tensors Either modify in-place with `orthogonalize!` or out-of-place with `orthogonalize`. """ -function orthogonalize!(M::AbstractMPS, j::Int; kwargs...) +function orthogonalize!(M::AbstractMPS, j::Int) @debug_check begin if !(1 <= j <= length(M)) error("Input j=$j to `orthogonalize!` out of range (valid range = 1:$(length(M)))") @@ -1608,7 +1608,7 @@ function orthogonalize!(M::AbstractMPS, j::Int; kwargs...) else ltags = TagSet("Link,l=$b") end - L, R = factorize(M[b], linds; tags=ltags, kwargs...) + L, R = factorize(M[b], linds; tags=ltags) M[b] = L M[b + 1] *= R setleftlim!(M, b) @@ -1629,7 +1629,7 @@ function orthogonalize!(M::AbstractMPS, j::Int; kwargs...) else ltags = TagSet("Link,l=$b") end - L, R = factorize(M[b + 1], rinds; tags=ltags, kwargs...) + L, R = factorize(M[b + 1], rinds; tags=ltags) M[b + 1] = L M[b] *= R From 970b112ae0f287fcb1cb5e8d30559d00935298fb Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 17:46:26 -0400 Subject: [PATCH 28/36] Fix some more tests --- NDTensors/src/blocksparse/linearalgebra.jl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index ea02922a57..f04f0f5b5e 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -36,17 +36,16 @@ computed from the dense svds of seperate blocks. """ function svd( T::Tensor{ElT,2,<:BlockSparse}; - mindim=default_mindim(T), + min_blockdim=nothing, + mindim=nothing, maxdim=nothing, cutoff=nothing, - alg=default_svd_alg(T), - use_absolute_cutoff=default_use_absolute_cutoff(T), - use_relative_cutoff=default_use_relative_cutoff(T), - min_blockdim=0, + alg=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, ) where {ElT} truncate = !isnothing(maxdim) || !isnothing(cutoff) - maxdim = isnothing(maxdim) ? default_maxdim(T) : maxdim - cutoff = isnothing(cutoff) ? default_cutoff(T) : cutoff + min_blockdim = replace_nothing(min_blockdim, 0) Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) @@ -208,7 +207,6 @@ function svd( copyto!(blockview(V, blockV), Vb) end return U, S, V, Spectrum(d, truncerr) - #end # @timeit_debug end _eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(ElT), ElT From ffec9defb6748862476fc381bdfc60cbc185cec6 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 18:04:06 -0400 Subject: [PATCH 29/36] Small change to kwarg logic --- NDTensors/src/blocksparse/linearalgebra.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index f04f0f5b5e..1a02327d7f 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -4,8 +4,9 @@ const DiagBlockSparseMatrix{ElT,StoreT,IndsT} = DiagBlockSparseTensor{ElT,2,Stor const DiagMatrix{ElT,StoreT,IndsT} = DiagTensor{ElT,2,StoreT,IndsT} function _truncated_blockdim( - S::DiagMatrix, docut::Real; singular_values=false, truncate=true, min_blockdim=0 + S::DiagMatrix, docut::Real; singular_values=false, truncate=true, min_blockdim=nothing ) + min_blockdim = replace_nothing(min_blockdim, 0) # TODO: Replace `cpu` with `leaf_parenttype` dispatch. S = cpu(S) full_dim = diaglength(S) @@ -45,7 +46,6 @@ function svd( use_relative_cutoff=nothing, ) where {ElT} truncate = !isnothing(maxdim) || !isnothing(cutoff) - min_blockdim = replace_nothing(min_blockdim, 0) Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) From 7668e86e855c1849fae46b70151c11aef8c83f36 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 18:38:36 -0400 Subject: [PATCH 30/36] Fix some more tests --- NDTensors/src/blocksparse/linearalgebra.jl | 17 +++++++++++------ src/mps/mpo.jl | 9 ++++----- src/tensor_operations/matrix_decomposition.jl | 8 ++++---- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 1a02327d7f..0294cfee0c 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -215,9 +215,14 @@ _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(El function eigen( T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; - kwargs..., + min_blockdim=nothing, + mindim=nothing, + maxdim=nothing, + cutoff=nothing, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex}} - truncate = haskey(kwargs, :maxdim) || haskey(kwargs, :cutoff) + truncate = !isnothing(maxdim) || !isnothing(cutoff) ElD, ElV = _eigen_eltypes(T) @@ -247,9 +252,9 @@ function eigen( sort!(d; rev=true, by=abs) if truncate - d, truncerr, docut = truncate!!(d; kwargs...) + d, truncerr, docut = truncate!!(d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff) for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim(Ds[n], docut) + blockdim = _truncated_blockdim(Ds[n], docut; min_blockdim, singular_values=false, truncate) if blockdim == 0 push!(dropblocks, n) else @@ -340,7 +345,7 @@ qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) +function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; positive=nothing) ElT = eltype(T) # getting total number of blocks nnzblocksT = nnzblocks(T) @@ -351,7 +356,7 @@ function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; kwargs...) for (jj, b) in enumerate(eachnzblock(T)) blockT = blockview(T, b) - QXb = qx(blockT; kwargs...) #call dense qr at src/linearalgebra.jl 387 + QXb = qx(blockT; positive) if (isnothing(QXb)) return nothing diff --git a/src/mps/mpo.jl b/src/mps/mpo.jl index 84550e5558..c3b47fb5c0 100644 --- a/src/mps/mpo.jl +++ b/src/mps/mpo.jl @@ -597,13 +597,12 @@ end Apply(A::MPO, ψ::MPS; kwargs...) = Applied(apply, (A, ψ), NamedTuple(kwargs)) -function contract(A::MPO, ψ::MPS; alg="densitymatrix", kwargs...) - if haskey(kwargs, :method) - # Backwards compatibility, use `method`. - alg = get(kwargs, :method, "densitymatrix") - end +function contract(A::MPO, ψ::MPS; alg=nothing, method=alg, kwargs...) + # TODO: Delete `method` since it is deprecated. + alg = NDTensors.replace_nothing(method, "densitymatrix") # Keyword argument deprecations + # TODO: Delete these. if alg == "DensityMatrix" @warn "In contract, method DensityMatrix is deprecated in favor of densitymatrix" alg = "densitymatrix" diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index 700a543a6f..cc49af6964 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -121,15 +121,15 @@ function svd( use_relative_cutoff=nothing, min_blockdim=nothing, # Deprecated - utags=nothing, - vtags=nothing, + utags=lefttags, + vtags=righttags, ) lefttags = NDTensors.replace_nothing(lefttags, ts"Link,u") righttags = NDTensors.replace_nothing(righttags, ts"Link,v") # Deprecated - utags = lefttags - vtags = righttags + utags = NDTensors.replace_nothing(utags, ts"Link,u") + vtags = NDTensors.replace_nothing(vtags, ts"Link,v") Lis = commoninds(A, indices(Linds...)) Ris = uniqueinds(A, Lis) From 5e65813b16bf42a80418addf8d63737c70d525fe Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 18:42:32 -0400 Subject: [PATCH 31/36] Format --- NDTensors/src/blocksparse/linearalgebra.jl | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 0294cfee0c..5231940b74 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -252,9 +252,13 @@ function eigen( sort!(d; rev=true, by=abs) if truncate - d, truncerr, docut = truncate!!(d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff) + d, truncerr, docut = truncate!!( + d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim(Ds[n], docut; min_blockdim, singular_values=false, truncate) + blockdim = _truncated_blockdim( + Ds[n], docut; min_blockdim, singular_values=false, truncate + ) if blockdim == 0 push!(dropblocks, n) else From 0e288ea629baa8e6c512dc856d09fb45a08cb676 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 18:48:46 -0400 Subject: [PATCH 32/36] Fix some more tests --- src/mps/abstractmps.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mps/abstractmps.jl b/src/mps/abstractmps.jl index 57677138a0..ded5259586 100644 --- a/src/mps/abstractmps.jl +++ b/src/mps/abstractmps.jl @@ -1592,7 +1592,7 @@ bond indices is performed. Afterward, tensors Either modify in-place with `orthogonalize!` or out-of-place with `orthogonalize`. """ -function orthogonalize!(M::AbstractMPS, j::Int) +function orthogonalize!(M::AbstractMPS, j::Int; maxdim=nothing) @debug_check begin if !(1 <= j <= length(M)) error("Input j=$j to `orthogonalize!` out of range (valid range = 1:$(length(M)))") @@ -1608,7 +1608,7 @@ function orthogonalize!(M::AbstractMPS, j::Int) else ltags = TagSet("Link,l=$b") end - L, R = factorize(M[b], linds; tags=ltags) + L, R = factorize(M[b], linds; tags=ltags, maxdim) M[b] = L M[b + 1] *= R setleftlim!(M, b) @@ -1629,7 +1629,7 @@ function orthogonalize!(M::AbstractMPS, j::Int) else ltags = TagSet("Link,l=$b") end - L, R = factorize(M[b + 1], rinds; tags=ltags) + L, R = factorize(M[b + 1], rinds; tags=ltags, maxdim) M[b + 1] = L M[b] *= R From 1ef4c5c7af5175667a2278ef1aa915fe72c20e8f Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 19:06:15 -0400 Subject: [PATCH 33/36] Fix bug in truncate --- NDTensors/src/truncate.jl | 3 +-- src/tensor_operations/matrix_decomposition.jl | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/NDTensors/src/truncate.jl b/NDTensors/src/truncate.jl index f3aa35069a..9699e43f28 100644 --- a/NDTensors/src/truncate.jl +++ b/NDTensors/src/truncate.jl @@ -25,7 +25,7 @@ function truncate!( use_absolute_cutoff=nothing, use_relative_cutoff=nothing, ) - mindim = replace_nothing(maxdim, default_mindim(P)) + mindim = replace_nothing(mindim, default_mindim(P)) maxdim = replace_nothing(maxdim, length(P)) cutoff = replace_nothing(cutoff, typemin(eltype(P))) use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P)) @@ -98,6 +98,5 @@ function truncate!( s < 0 && (P .*= s) resize!(P, n) - return truncerr, docut end diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index cc49af6964..cc99d48da9 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -585,9 +585,9 @@ function factorize_svd( Linds...; (singular_values!)=nothing, ortho="left", - alg=NDTensors.default_svd_alg(A), + alg=nothing, dir=nothing, - mindim=NDTensors.default_mindim(A), + mindim=nothing, maxdim=nothing, cutoff=nothing, tags=nothing, @@ -765,7 +765,6 @@ function factorize( which_decomp = "eigen" end end - if which_decomp == "svd" LR = factorize_svd( A, Linds...; mindim, maxdim, cutoff, tags, ortho, alg=svd_alg, dir, singular_values! From 31e620916ad4b55a51a55fa857f642ba9ec3de97 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 19:13:07 -0400 Subject: [PATCH 34/36] Fix some more tests --- src/mps/abstractmps.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/mps/abstractmps.jl b/src/mps/abstractmps.jl index ded5259586..94722e301f 100644 --- a/src/mps/abstractmps.jl +++ b/src/mps/abstractmps.jl @@ -1592,7 +1592,8 @@ bond indices is performed. Afterward, tensors Either modify in-place with `orthogonalize!` or out-of-place with `orthogonalize`. """ -function orthogonalize!(M::AbstractMPS, j::Int; maxdim=nothing) +function orthogonalize!(M::AbstractMPS, j::Int; maxdim=nothing, normalize=nothing) + # TODO: Delete `maxdim` and `normalize` keyword arguments. @debug_check begin if !(1 <= j <= length(M)) error("Input j=$j to `orthogonalize!` out of range (valid range = 1:$(length(M)))") From 42fada1e32a06b45b11cb57d51332bee366f9e4f Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 19:35:47 -0400 Subject: [PATCH 35/36] Some more kwarg changes --- src/mps/dmrg.jl | 6 ++--- src/tensor_operations/matrix_decomposition.jl | 24 ++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/mps/dmrg.jl b/src/mps/dmrg.jl index 1a0a933791..96eed1df9d 100644 --- a/src/mps/dmrg.jl +++ b/src/mps/dmrg.jl @@ -311,10 +311,10 @@ function dmrg(PH, psi0::MPS, sweeps::Sweeps; kwargs...) mindim=mindim(sweeps, sw), cutoff=cutoff(sweeps, sw), eigen_perturbation=drho, - ortho=ortho, + ortho, normalize=true, - which_decomp=which_decomp, - svd_alg=svd_alg, + which_decomp, + svd_alg, ) end maxtruncerr = max(maxtruncerr, spec.truncerr) diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl index cc99d48da9..378bdfa826 100644 --- a/src/tensor_operations/matrix_decomposition.jl +++ b/src/tensor_operations/matrix_decomposition.jl @@ -296,14 +296,22 @@ function eigen( cutoff=nothing, use_absolute_cutoff=nothing, use_relative_cutoff=nothing, - ishermitian=false, - tags="Link,eigen", - lefttags=tags, - righttags=tags, - plev=0, - leftplev=plev, - rightplev=plev, + ishermitian=nothing, + tags=nothing, + lefttags=nothing, + righttags=nothing, + plev=nothing, + leftplev=nothing, + rightplev=nothing, ) + ishermitian = NDTensors.replace_nothing(ishermitian, false) + tags = NDTensors.replace_nothing(tags, ts"Link,eigen") + lefttags = NDTensors.replace_nothing(lefttags, tags) + righttags = NDTensors.replace_nothing(righttags, tags) + plev = NDTensors.replace_nothing(plev, 0) + leftplev = NDTensors.replace_nothing(leftplev, plev) + rightplev = NDTensors.replace_nothing(rightplev, plev) + @debug_check begin if hasqns(A) @assert flux(A) == QN() @@ -366,7 +374,7 @@ function eigen( AT = ishermitian ? Hermitian(tensor(AC)) : tensor(AC) - DT, VT, spec = eigen(AT; mindim, maxdim, cutoff) + DT, VT, spec = eigen(AT; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff) D, VC = itensor(DT), itensor(VT) V = VC * dag(CR) From 251f5a09ecf009829d1233dd1e4850fc2fa6da9c Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 31 Oct 2023 22:41:53 -0400 Subject: [PATCH 36/36] Fix tests --- .../src/arraystorage/arraystorage/tensor/eigen.jl | 14 ++++---------- .../src/arraystorage/arraystorage/tensor/svd.jl | 2 +- NDTensors/src/blocksparse/linearalgebra.jl | 12 ++++-------- NDTensors/src/linearalgebra/linearalgebra.jl | 9 +++------ src/mps/dmrg.jl | 2 +- src/mps/mps.jl | 11 ++++++++--- 6 files changed, 21 insertions(+), 29 deletions(-) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl index 9bcb1713c3..e55038f7d8 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl @@ -5,10 +5,10 @@ function eigen( T::Hermitian{<:Any,<:ArrayStorageTensor}; maxdim=nothing, - mindim=1, + mindim=nothing, cutoff=nothing, - use_absolute_cutoff=false, - use_relative_cutoff=true, + use_absolute_cutoff=nothing, + use_relative_cutoff=nothing, # These are getting passed erroneously. # TODO: Make sure they don't get passed down # to here. @@ -20,12 +20,6 @@ function eigen( ortho=nothing, svd_alg=nothing, ) - truncate = !isnothing(maxdim) || !isnothing(cutoff) - # TODO: Define `default_maxdim(T)`. - maxdim = isnothing(maxdim) ? minimum(dims(T)) : maxdim - # TODO: Define `default_cutoff(T)`. - cutoff = isnothing(cutoff) ? zero(eltype(T)) : cutoff - matrixT = matrix(T) ## TODO Here I am calling parent to ensure that the correct `any` function ## is envoked for non-cpu matrices @@ -45,7 +39,7 @@ function eigen( DM = DM[p] VM = VM[:, p] - if truncate + if any(!isnothing, (maxdim, cutoff)) DM, truncerr, _ = truncate!!( DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) diff --git a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl index 1d93c2f1d1..69f446ac53 100644 --- a/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl +++ b/NDTensors/src/arraystorage/arraystorage/tensor/svd.jl @@ -66,7 +66,7 @@ function tsvd( conj!(V) P = S .^ 2 - if !isnothing(maxdim) || !isnothing(cutoff) + if any(!isnothing, (maxdim, cutoff)) P, truncerr, _ = truncate!!( P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index 5231940b74..2f6b41f69e 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -45,8 +45,6 @@ function svd( use_absolute_cutoff=nothing, use_relative_cutoff=nothing, ) where {ElT} - truncate = !isnothing(maxdim) || !isnothing(cutoff) - Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) Vs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) @@ -83,13 +81,13 @@ function svd( nzblocksT = nzblocks(T) dropblocks = Int[] - if truncate + if any(!isnothing, (maxdim, cutoff)) d, truncerr, docut = truncate!!( d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) for n in 1:nnzblocks(T) blockdim = _truncated_blockdim( - Ss[n], docut; min_blockdim, singular_values=true, truncate + Ss[n], docut; min_blockdim, singular_values=true, truncate=true ) if blockdim == 0 push!(dropblocks, n) @@ -222,8 +220,6 @@ function eigen( use_absolute_cutoff=nothing, use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex}} - truncate = !isnothing(maxdim) || !isnothing(cutoff) - ElD, ElV = _eigen_eltypes(T) # Sorted eigenvalues @@ -251,13 +247,13 @@ function eigen( dropblocks = Int[] sort!(d; rev=true, by=abs) - if truncate + if any(!isnothing, (maxdim, cutoff)) d, truncerr, docut = truncate!!( d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) for n in 1:nnzblocks(T) blockdim = _truncated_blockdim( - Ds[n], docut; min_blockdim, singular_values=false, truncate + Ds[n], docut; min_blockdim, singular_values=false, truncate=true ) if blockdim == 0 push!(dropblocks, n) diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 803772fb8e..a99e65eb37 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -104,7 +104,6 @@ function svd( # Only used by BlockSparse svd min_blockdim=nothing, ) where {ElT,IndsT} - truncate = !isnothing(maxdim) || !isnothing(cutoff) alg = replace_nothing(alg, default_svd_alg(T)) if alg == "divide_and_conquer" MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) @@ -147,7 +146,7 @@ function svd( #end # @timeit_debug P = MS .^ 2 - if truncate + if any(!isnothing, (maxdim, cutoff)) P, truncerr, _ = truncate!!( P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) @@ -184,7 +183,6 @@ function eigen( use_absolute_cutoff=nothing, use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex},IndsT} - truncate = !isnothing(maxdim) || !isnothing(cutoff) matrixT = matrix(T) ## TODO Here I am calling parent to ensure that the correct `any` function ## is envoked for non-cpu matrices @@ -204,7 +202,7 @@ function eigen( DM = DM[p] VM = VM[:, p] - if truncate + if any(!isnothing, (maxdim, cutoff)) DM, truncerr, _ = truncate!!( DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) @@ -291,7 +289,6 @@ function eigen( use_absolute_cutoff=nothing, use_relative_cutoff=nothing, ) where {ElT<:Union{Real,Complex},IndsT} - truncate = !isnothing(maxdim) || !isnothing(cutoff) matrixT = matrix(T) if any(!isfinite, matrixT) throw( @@ -308,7 +305,7 @@ function eigen( #DM = DM[p] #VM = VM[:,p] - if truncate + if any(!isnothing, (maxdim, cutoff)) DM, truncerr, _ = truncate!!( DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff ) diff --git a/src/mps/dmrg.jl b/src/mps/dmrg.jl index 96eed1df9d..3a731b3c66 100644 --- a/src/mps/dmrg.jl +++ b/src/mps/dmrg.jl @@ -269,7 +269,7 @@ function dmrg(PH, psi0::MPS, sweeps::Sweeps; kwargs...) phi, 1, eigsolve_which_eigenvalue; - ishermitian=ishermitian, + ishermitian, tol=eigsolve_tol, krylovdim=eigsolve_krylovdim, maxiter=eigsolve_maxiter, diff --git a/src/mps/mps.jl b/src/mps/mps.jl index fd2e876715..8d3f721687 100644 --- a/src/mps/mps.jl +++ b/src/mps/mps.jl @@ -531,9 +531,9 @@ function replacebond!( M::MPS, b::Int, phi::ITensor; - normalize=false, - swapsites=false, - ortho="left", + normalize=nothing, + swapsites=nothing, + ortho=nothing, # Decomposition kwargs which_decomp=nothing, mindim=nothing, @@ -542,6 +542,10 @@ function replacebond!( eigen_perturbation=nothing, svd_alg=nothing, ) + normalize = NDTensors.replace_nothing(normalize, false) + swapsites = NDTensors.replace_nothing(swapsites, false) + ortho = NDTensors.replace_nothing(ortho, "left") + indsMb = inds(M[b]) if swapsites sb = siteind(M, b) @@ -554,6 +558,7 @@ function replacebond!( mindim, maxdim, cutoff, + ortho, which_decomp, eigen_perturbation, svd_alg,