Skip to content

Commit

Permalink
Initial progress on Hermitian eigendecomposition
Browse files Browse the repository at this point in the history
  • Loading branch information
mtfishman committed Nov 15, 2023
1 parent 41ed36f commit 12fa143
Show file tree
Hide file tree
Showing 12 changed files with 200 additions and 134 deletions.
1 change: 1 addition & 0 deletions NDTensors/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ LinearAlgebra = "1.6"
Random = "1.6"
Requires = "1.1"
SimpleTraits = "0.9.4"
SparseArrays = "1.6"
SplitApplyCombine = "1.2.2"
StaticArrays = "0.12, 1.0"
Strided = "2"
Expand Down
4 changes: 4 additions & 0 deletions NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ using BlockArrays
using Compat
using Dictionaries
using SplitApplyCombine
using LinearAlgebra: Hermitian, Transpose

using BlockArrays: block

Expand All @@ -13,7 +14,10 @@ include("base.jl")
include("axes.jl")
include("abstractarray.jl")
include("permuteddimsarray.jl")
include("hermitian.jl")
include("transpose.jl")
include("blockarrays.jl")
# TODO: Split off into `NDSparseArrays` module.
include("sparsearray.jl")
include("blocksparsearray.jl")
include("allocate_output.jl")
Expand Down
11 changes: 10 additions & 1 deletion NDTensors/src/BlockSparseArrays/src/allocate_output.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,16 @@
function output_type(f, args::Type...)
# TODO: Is this good to use here?
# Seems best for `Number` subtypes, maybe restrict to that here.
return Base.promote_op(f, args...)
return typeof(f(zero.(args)...))
# return Base.promote_op(f, args...)
end

function output_type(f::Function, as::Type{<:AbstractArray}...)
@assert allequal(ndims.(as))
elt = output_type(f, eltype.(as)...)
n = ndims(first(as))
# TODO: Generalize this to GPU arrays!
return Array{elt,n}
end

#############################################################################
Expand Down
26 changes: 25 additions & 1 deletion NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ blocktype(a::BlockSparseArray{<:Any,<:Any,A}) where {A} = A
# TODO: Use `SetParameters`.
set_ndims(::Type{<:Array{T}}, n) where {T} = Array{T,n}

function nonzero_blockkeys(a::BlockSparseArray)
# TODO: Move to `AbstractArray` file.
function nonzero_blockkeys(a::AbstractArray)
return map(Block Tuple, collect(nonzero_keys(blocks(a))))
end

Expand Down Expand Up @@ -105,6 +106,22 @@ function BlockSparseArray{T,N,B}(
return BlockSparseArray{T,N,B}(undef, blocks, axes)
end

## struct BlockSparseArray{
## T,N,A<:AbstractArray{T,N},Blocks<:SparseArray{A,N},Axes<:NTuple{N,AbstractUnitRange{Int}}
## } <: AbstractBlockArray{T,N}
## blocks::Blocks
## axes::Axes
## end

function BlockSparseArray(a::SparseArray, axes::Tuple{Vararg{AbstractUnitRange}})
A = eltype(a)
T = eltype(A)
N = ndims(a)
Blocks = typeof(a)
Axes = typeof(axes)
return BlockSparseArray{T,N,A,Blocks,Axes}(a, axes)
end

function BlockSparseArray(
blockdata::Dictionary{<:Block{N}}, axes::Tuple{Vararg{AbstractUnitRange{Int},N}}
) where {N}
Expand Down Expand Up @@ -265,6 +282,13 @@ function BlockArrays.blocks(
return PermutedDimsArray(blocks(parent(a)), perm(a))
end

# TODO: Make `PermutedBlockSparseArray`.
function BlockArrays.blocks(
a::Hermitian{<:Any,<:BlockSparseArray}
)
return Hermitian(blocks(parent(a)))
end

# TODO: Make `PermutedBlockSparseArray`.
function Base.zero(a::PermutedDimsArray{<:Any,<:Any,<:Any,<:Any,<:BlockSparseArray})
return BlockSparseArray(zero(blocks(a)), axes(a))
Expand Down
37 changes: 36 additions & 1 deletion NDTensors/src/BlockSparseArrays/src/broadcast.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ isnumber(x) = false

function flatten_numbers(f, args)
# TODO: Is there a simpler way to implement this?
# This doesn't play well with `Base.promote_op`.
function flattened_f(flattened_args...)
j = 0
unflattened_args = ntuple(length(args)) do i
Expand All @@ -63,5 +64,39 @@ function Base.copyto!(a_dest::BlockSparseArray, bc::Broadcasted{<:BlockSparseSty
bcf = flatten(bc)
f, args = flatten_numbers(bcf.f, bcf.args)
return _broadcast!(f, a_dest, args...)
return error("Not implemented")
end

## # Special algebra cases
## struct LeftMul{C}
## c::C
## end
## (f::LeftMul)(x) = f.c * x
##
## struct RightMul{C}
## c::C
## end
## (f::RightMul)(x) = x * f.c
##
## # 2 .* a
## function Base.copy(bc::Broadcasted{<:BlockSparseStyle,<:Any,typeof(*),<:Tuple{<:Number,<:AbstractArray}})
## # TODO: Use `map_nonzeros`.
## return map(LeftMul(bc.args[1]), bc.args[2])
## end
##
## # a .* 2
## function Base.copy(bc::Broadcasted{<:BlockSparseStyle,<:Any,typeof(*),<:Tuple{<:AbstractArray,<:Number}})
## # TODO: Use `map_nonzeros`.
## return map(RightMul(bc.args[2]), bc.args[1])
## end
##
## # a ./ 2
## function Base.copy(bc::Broadcasted{<:BlockSparseStyle,<:Any,typeof(/),<:Tuple{<:AbstractArray,<:Number}})
## # TODO: Use `map_nonzeros`.
## return map(RightMul(inv(bc.args[2])), bc.args[1])
## end
##
## # a .+ b
## function Base.copy(bc::Broadcasted{<:BlockSparseStyle,<:Any,<:Union{typeof(+),typeof(-)},<:Tuple{<:AbstractArray,<:AbstractArray}})
## # TODO: Use `map_nonzeros`.
## return map(+, bc.args...)
## end
3 changes: 3 additions & 0 deletions NDTensors/src/BlockSparseArrays/src/hermitian.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# TODO: This needs to be done more carefully by
# grabbing upper or lower triangles of the parent matrix.
nonzero_keys(a::Hermitian) = nonzero_keys(parent(a))
47 changes: 0 additions & 47 deletions NDTensors/src/BlockSparseArrays/src/sparsearray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,53 +93,6 @@ function map_nonzeros!(f, a_dest::AbstractArray, as::SparseArrayLike...)
return a_dest
end

## function output_type(f, args::Type...)
## # TODO: Is this good to use here?
## # Seems best for `Number` subtypes, maybe restrict to that here.
## return Base.promote_op(f, args...)
## end
##
## function output_eltype(::typeof(map_nonzeros), fmap, as::Type{<:AbstractArray}...)
## return output_type(fmap, eltype.(as)...)
## end
##
## function output_eltype(f::typeof(map_nonzeros), fmap, as::AbstractArray...)
## return output_eltype(f, fmap, typeof.(as)...)
## end
##
## function output_structure(f::typeof(map_nonzeros), fmap, as::SparseArray...)
## end
##
## function output_structure(f::typeof(map_nonzeros), fmap, as::AbstractArray...)
## return ArrayStructure(; eltype=output_eltype(f, fmap, as...), axes=output_axes(f, fmap, as...))
## end
##
## function output_type(f::typeof(map_nonzeros), fmap, as::AbstractArray...)
## return error("Not implemented")
## end
##
## function output_type(f::typeof(map_nonzeros), fmap, as::SparseArrayLike...)
## return SparseArray
## end
##
## # Allocate an array with uninitialized/undefined memory
## # according the array type and structure (for example the
## # size or axes).
## function allocate(arraytype::Type{<:AbstractArray}, structure)
## return arraytype(undef, structure)
## end
##
## function allocate_zeros(arraytype::Type{<:AbstractArray}, structure)
## a = allocate(arraytype, structure)
## # TODO: Use `zeros!!` or `zerovector!!` from VectorInterface.jl.
## zeros!(a)
## return a
## end
##
## function allocate_output(f::typeof(map_nonzeros), fmap, as::AbstractArray...)
## return allocate_zeros(output_type(f, fmap, as...), output_structure(f, fmap, as...))
## end
##
function map_nonzeros(f, as::SparseArrayLike...)
## @assert allequal(axes.(as))
# Preserves the element type:
Expand Down
6 changes: 6 additions & 0 deletions NDTensors/src/BlockSparseArrays/src/transpose.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
function nonzero_keys(a::Transpose)
return (
CartesianIndex(reverse(Tuple(parent_index))) for
parent_index in nonzero_keys(parent(a))
)
end
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,8 @@ array(tensor::MatrixOrArrayStorageTensor) = storage(tensor)
function Base.adjoint(tens::MatrixStorageTensor)
return tensor(adjoint(storage(tens)), reverse(inds(tens)))
end

# Linear algebra (matrix algebra)
function LinearAlgebra.Hermitian(tens::MatrixStorageTensor)
return tensor(Hermitian(storage(tens)), inds(tens))
end
75 changes: 42 additions & 33 deletions NDTensors/src/arraystorage/arraystorage/tensor/eigen.jl
Original file line number Diff line number Diff line change
@@ -1,9 +1,34 @@
function truncate!!(d::AbstractVector, u::AbstractMatrix; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff)
error("Not implemented")
# Sort by largest to smallest eigenvalues
# TODO: Replace `cpu` with `Expose` dispatch.
p = sortperm(cpu(d); rev=true, by=abs)
d = d[p]
u = u[:, p]

if any(!isnothing, (maxdim, cutoff))
d, truncerr, _ = truncate!!(
d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff
)
length_d = length(d)
if length_d < size(u, 2)
u = u[:, 1:length_d]
end
else
length_d = length(d)
# TODO: Make this `zero(eltype(d))`?
truncerr = 0.0
end
spec = Spectrum(d, truncerr)
return d, u, spec
end

# TODO: Rewrite this function to be more modern:
# 1. List keyword arguments in function signature.
# 2. Output `Spectrum` as a keyword argument that gets overwritten.
# 3. Make this into two layers, one that handles indices and one that works with `AbstractMatrix`.
function eigen(
T::Hermitian{ElT,<:ArrayStorageTensor{ElT}};
function LinearAlgebra.eigen(
t::ArrayStorageTensor{ElT};
maxdim=nothing,
mindim=nothing,
cutoff=nothing,
Expand All @@ -20,48 +45,32 @@ function eigen(
ortho=nothing,
svd_alg=nothing,
) where {ElT<:Union{Real,Complex}}
matrixT = matrix(T)
a = storage(t)
## TODO Here I am calling parent to ensure that the correct `any` function
## is envoked for non-cpu matrices
if any(!isfinite, parent(matrixT))
if any(!isfinite, parent(a))
throw(
ArgumentError(
"Trying to perform the eigendecomposition of a matrix containing NaNs or Infs"
),
)
end
d, u = eigen(a)
d, u, spec = truncate!!(d, u)

DM, VM = eigen(matrixT)

# Sort by largest to smallest eigenvalues
# TODO: Replace `cpu` with `Expose` dispatch.
p = sortperm(cpu(DM); rev=true, by=abs)
DM = DM[p]
VM = VM[:, p]

if any(!isnothing, (maxdim, cutoff))
DM, truncerr, _ = truncate!!(
DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff
)
dD = length(DM)
if dD < size(VM, 2)
VM = VM[:, 1:dD]
end
else
dD = length(DM)
truncerr = 0.0
end
spec = Spectrum(DM, truncerr)
error("Not implemented")

# Make the new indices to go onto V
# TODO: Put in a separate function, such as
# `rewrap_inds` or something like that.
indstype = typeof(inds(T))
l = eltype(indstype)(dD)
r = eltype(indstype)(dD)
Vinds = indstype((dag(ind(T, 2)), dag(r)))
Dinds = indstype((l, dag(r)))
V = tensor(VM, Vinds)
D = tensor(DiagonalMatrix(DM), Dinds)
return D, V, spec
indstype = typeof(inds(t))

# TODO: Make this generic to dense or block sparse.
l = eltype(indstype)(axes(t, 1))
r = eltype(indstype)(axes(t, 2))
inds_d = indstype((l, dag(r)))
inds_u = indstype((dag(ind(T, 2)), dag(r)))
dₜ = tensor(DiagonalMatrix(DM), Dinds)
uₜ = tensor(u, u_inds)
return dₜ, uₜ, spec
end
17 changes: 13 additions & 4 deletions NDTensors/src/arraystorage/blocksparsearray/storage/eigen.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
function LinearAlgebra.eigen(a::BlockSparseArray; kwargs...)
function LinearAlgebra.eigen(a::BlockSparseArray)
return error("Not implemented")
end

# TODO: Maybe make `Hermitian` partially eager for `BlockSparseArray`?
function LinearAlgebra.eigen(
a::Union{Hermitian{<:Real,<:BlockSparseArray},Hermitian{<:Complex,<:BlockSparseArray}};
kwargs...,
a::Union{Hermitian{<:Real,<:BlockSparseArray},Hermitian{<:Complex,<:BlockSparseArray}}
)
return error("Not implemented")
# TODO: Test `a` is block diagonal.
# @assert is_block_diagonal(a)
d = BlockSparseArray{real(eltype(a))}(axes(a, 1))
u = BlockSparseArray{eltype(a)}(axes(a))
for b in nonzero_blockkeys(a)
d_b, u_b = eigen(@view a[b])
d[BlockArrays.Block(b.n[1])] = d_b
u[b] = u_b
end
return d, u
end
Loading

0 comments on commit 12fa143

Please sign in to comment.