From d3e346f9929401fa3a8d022babb064399e51ffd8 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 26 Sep 2023 18:03:52 -0400 Subject: [PATCH 1/8] [NDTensors] BlockSparseArrays prototype --- NDTensors/Project.toml | 1 + NDTensors/src/BlockSparseArrays/README.md | 51 ++++++++++++++ .../src/BlockSparseArrays.jl | 10 +++ .../BlockSparseArrays/src/blocksparsearray.jl | 68 +++++++++++++++++++ .../src/BlockSparseArrays/src/sparsearray.jl | 30 ++++++++ NDTensors/src/NDTensors.jl | 2 + 6 files changed, 162 insertions(+) create mode 100644 NDTensors/src/BlockSparseArrays/README.md create mode 100644 NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl create mode 100644 NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl create mode 100644 NDTensors/src/BlockSparseArrays/src/sparsearray.jl diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index c5d46e7592..b33322a4f6 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -5,6 +5,7 @@ version = "0.2.11" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" Compat = "34da2185-b29b-5c13-b0c7-acf172513d20" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" FLoops = "cc61a311-1640-44b5-9fba-1b764f453329" diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/BlockSparseArrays/README.md new file mode 100644 index 0000000000..8e2b89e728 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/README.md @@ -0,0 +1,51 @@ +# BlockSparseArrays.jl + +A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. + +It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys +to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. +`BlockArrays` reinterprets the `SparseArray` as a blocked data structure. + +```julia +using NDTensors.BlockSparseArrays +using BlockArrays +using Dictionaries + +# Block dimensions +i1 = [2, 3] +i2 = [2, 3] + +i_axes = (blockedrange(i1), blockedrange(i2)) + +function block_size(axes, block) + return length.(getindex.(axes, Block.(block.n))) +end + +# Data +nz_blocks = [Block(1, 1), Block(2, 2)] +nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] +nz_block_lengths = prod.(nz_block_sizes) + +# Blocks with discontiguous underlying data +d_blocks = randn.(nz_block_sizes) + +# Blocks with contiguous underlying data +# d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) +# d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] + +block_data = Dictionary([CartesianIndex(nz_block.n) for nz_block in nz_blocks], d_blocks) +block_storage = SparseArray{valtype(block_data),length(i_axes)}(block_data, blocklength.(i_axes)) + +B = BlockSparseArray(block_storage, i_axes) +B * B +``` + +## TODO + +- Define an `AbstractBlockSparseArray` type along with two concrete types, one with blocks that makes no assumptions about data layout (they could be slices into contiguous data or not), and one that uses a contiguous memory in the background (which could be any `AbstractVector` wrapped in a `PseudoBlockVector` that tracks the blocks as shown above). +- Define fast linear algebra (matmul, SVD, QR, etc.) that takes advantage of sparsity. +- Define tensor contraction and addition using the `TensorOperations.jl` tensor operations interface (`tensoradd!`, `tensorcontract!`, and `tensortrace!`). See `SparseArrayKit.jl` for examples of overloading for sparse data structures. +- Use `SparseArrayKit.jl` as the elementwise sparse array backend (it would need to be generalized a little, +for example it makes the assumption that `zero` is defined for the element type, which isn't the case when the values are matrices since it would need shape information, though it could output a universal zero tensor). +- Implement `SparseArrays` functionality such as `findnz`, `findall(!iszero, B)`, `nnz`, `nonzeros`, `dropzeros`, and `droptol!`, along with the block versions of those (which would get forwarded to the `SparseArray` data structure, where they are treated as elementwise sparsity). `SparseArrayKit.jl` has functions `nonzero_pairs`, `nonzero_keys`, `nonzero_values`, and `nonzero_length` which could have analagous block functions. +- Look at other packages that deal with block sparsity such as `BlockSparseMatrices.jl` and `BlockBandedMatrices.jl` for ideas on code design and interfaces. diff --git a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl new file mode 100644 index 0000000000..dd99166e1b --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl @@ -0,0 +1,10 @@ +module BlockSparseArrays +using BlockArrays +using Dictionaries + +export BlockSparseArray, SparseArray + +include("sparsearray.jl") +include("blocksparsearray.jl") + +end diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl new file mode 100644 index 0000000000..3d72f4fd36 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -0,0 +1,68 @@ +using BlockArrays: block + +# Also add a version with contiguous underlying data. +struct BlockSparseArray{T,N,R<:SparseArray{<:AbstractArray{T,N},N},BS<:NTuple{N,AbstractUnitRange{Int}}} <: AbstractBlockArray{T,N} + blocks::R + axes::BS +end + +Base.axes(block_arr::BlockSparseArray) = block_arr.axes + +Base.copy(block_arr::BlockSparseArray) = BlockSparseArray(deepcopy(block_arr.blocks), copy.(block_arr.axes)) + +function BlockArrays.viewblock(block_arr::BlockSparseArray, block) + blks = block.n + @boundscheck blockcheckbounds(block_arr, blks...) + block_size = length.(getindex.(axes(block_arr), Block.(blks))) + # TODO: Make this `Zeros`? + zero = zeros(eltype(block_arr), block_size) + # return block_arr.blocks[blks...] # Fails because zero isn't defined + return get_nonzero(block_arr.blocks, blks, zero) +end + +function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) where {T,N} + @boundscheck blockcheckbounds(block_arr, Block(bi.I)) + bl = view(block_arr, block(bi)) + inds = bi.α + @boundscheck checkbounds(bl, inds...) + v = bl[inds...] + return v +end + +function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, i::Vararg{Integer, N}) where {T,N} + @boundscheck checkbounds(block_arr, i...) + block_indices = findblockindex.(axes(block_arr), i) + block = map(block_index -> Block(block_index.I), block_indices) + offsets = map(block_index -> only(block_index.α), block_indices) + block_view = @view block_arr[block...] + block_view[offsets...] = v + block_arr[block...] = block_view + return block_arr +end + +function BlockArrays._check_setblock!(block_arr::BlockSparseArray{T, N}, v, block::NTuple{N, Integer}) where {T,N} + for i in 1:N + bsz = length(axes(block_arr, i)[Block(block[i])]) + if size(v, i) != bsz + throw(DimensionMismatch(string("tried to assign $(size(v)) array to ", length.(getindex.(axes(block_arr), block)), " block"))) + end + end +end + +function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, block::Vararg{Block{1}, N}) where {T,N} + blks = Int.(block) + @boundscheck blockcheckbounds(block_arr, blks...) + @boundscheck BlockArrays._check_setblock!(block_arr, v, blks) + # This fails since it tries to replace the element + block_arr.blocks[blks...] = v + # Use .= here to overwrite data. + ## block_view = @view block_arr[Block(blks)] + ## block_view .= v + return block_arr +end + +function Base.getindex(block_arr::BlockSparseArray{T, N}, i::Vararg{Integer, N}) where {T,N} + @boundscheck checkbounds(block_arr, i...) + v = block_arr[findblockindex.(axes(block_arr), i)...] + return v +end diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl new file mode 100644 index 0000000000..1ae33befb8 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl @@ -0,0 +1,30 @@ +struct SparseArray{T,N} <: AbstractArray{T,N} + data::Dictionary{CartesianIndex{N},T} + dims::NTuple{N,Int64} +end + +Base.size(a::SparseArray) = a.dims + +function Base.setindex!(a::SparseArray{T,N}, v, I::CartesianIndex{N}) where {T,N} + set!(a.data, I, v) + return a +end +function Base.setindex!(a::SparseArray{T,N}, v, I::Vararg{Int,N}) where {T,N} + return setindex!(a, v, CartesianIndex(I)) +end + +function Base.getindex(a::SparseArray{T,N}, I::CartesianIndex{N}) where {T,N} + return get(a.data, I, nothing) +end +function Base.getindex(a::SparseArray{T,N}, I::Vararg{Int,N}) where {T,N} + return getindex(a, CartesianIndex(I)) +end + +# `getindex` but uses a default if the value is +# structurally zero. +function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} + @boundscheck checkbounds(a, I) + return get(a.data, I, zero) +end +get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} = + get_nonzero(a, CartesianIndex(I), zero) diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index ae138d11fc..f5968cff07 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -19,6 +19,8 @@ using TupleTools include("SetParameters/src/SetParameters.jl") using .SetParameters +include("BlockSparseArrays/src/BlockSparseArrays.jl") +using .BlockSparseArrays using Base: @propagate_inbounds, ReshapedArray, DimOrInd, OneTo From 8c7dada70c46301a62598ecc10636dcd2ea2d3bc Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 26 Sep 2023 18:28:51 -0400 Subject: [PATCH 2/8] Expand example --- NDTensors/src/BlockSparseArrays/README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/BlockSparseArrays/README.md index 8e2b89e728..a1752bbbc5 100644 --- a/NDTensors/src/BlockSparseArrays/README.md +++ b/NDTensors/src/BlockSparseArrays/README.md @@ -37,6 +37,17 @@ block_data = Dictionary([CartesianIndex(nz_block.n) for nz_block in nz_blocks], block_storage = SparseArray{valtype(block_data),length(i_axes)}(block_data, blocklength.(i_axes)) B = BlockSparseArray(block_storage, i_axes) + +# Access a block +B[Block(1, 1)] + +# Access a non-zero block, returns a zero matrix +B[Block(1, 2)] + +# Set a zero block +B[Block(1, 2)] = randn(2, 3) + +# Matrix multiplication (not optimized for sparsity yet) B * B ``` From 49d6b33e707f9b513d4e86e9fe13a9fbcf252f4c Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 26 Sep 2023 20:52:44 -0400 Subject: [PATCH 3/8] Format --- .../BlockSparseArrays/src/blocksparsearray.jl | 40 ++++++++++++++----- .../src/BlockSparseArrays/src/sparsearray.jl | 9 +++-- 2 files changed, 34 insertions(+), 15 deletions(-) diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl index 3d72f4fd36..6d45220dae 100644 --- a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -1,14 +1,18 @@ using BlockArrays: block # Also add a version with contiguous underlying data. -struct BlockSparseArray{T,N,R<:SparseArray{<:AbstractArray{T,N},N},BS<:NTuple{N,AbstractUnitRange{Int}}} <: AbstractBlockArray{T,N} +struct BlockSparseArray{ + T,N,R<:SparseArray{<:AbstractArray{T,N},N},BS<:NTuple{N,AbstractUnitRange{Int}} +} <: AbstractBlockArray{T,N} blocks::R axes::BS end Base.axes(block_arr::BlockSparseArray) = block_arr.axes -Base.copy(block_arr::BlockSparseArray) = BlockSparseArray(deepcopy(block_arr.blocks), copy.(block_arr.axes)) +function Base.copy(block_arr::BlockSparseArray) + return BlockSparseArray(deepcopy(block_arr.blocks), copy.(block_arr.axes)) +end function BlockArrays.viewblock(block_arr::BlockSparseArray, block) blks = block.n @@ -29,7 +33,9 @@ function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) wher return v end -function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, i::Vararg{Integer, N}) where {T,N} +function Base.setindex!( + block_arr::BlockSparseArray{T,N}, v, i::Vararg{Integer,N} +) where {T,N} @boundscheck checkbounds(block_arr, i...) block_indices = findblockindex.(axes(block_arr), i) block = map(block_index -> Block(block_index.I), block_indices) @@ -40,16 +46,28 @@ function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, i::Vararg{Integer, return block_arr end -function BlockArrays._check_setblock!(block_arr::BlockSparseArray{T, N}, v, block::NTuple{N, Integer}) where {T,N} - for i in 1:N - bsz = length(axes(block_arr, i)[Block(block[i])]) - if size(v, i) != bsz - throw(DimensionMismatch(string("tried to assign $(size(v)) array to ", length.(getindex.(axes(block_arr), block)), " block"))) - end +function BlockArrays._check_setblock!( + block_arr::BlockSparseArray{T,N}, v, block::NTuple{N,Integer} +) where {T,N} + for i in 1:N + bsz = length(axes(block_arr, i)[Block(block[i])]) + if size(v, i) != bsz + throw( + DimensionMismatch( + string( + "tried to assign $(size(v)) array to ", + length.(getindex.(axes(block_arr), block)), + " block", + ), + ), + ) end + end end -function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, block::Vararg{Block{1}, N}) where {T,N} +function Base.setindex!( + block_arr::BlockSparseArray{T,N}, v, block::Vararg{Block{1},N} +) where {T,N} blks = Int.(block) @boundscheck blockcheckbounds(block_arr, blks...) @boundscheck BlockArrays._check_setblock!(block_arr, v, blks) @@ -61,7 +79,7 @@ function Base.setindex!(block_arr::BlockSparseArray{T, N}, v, block::Vararg{Bloc return block_arr end -function Base.getindex(block_arr::BlockSparseArray{T, N}, i::Vararg{Integer, N}) where {T,N} +function Base.getindex(block_arr::BlockSparseArray{T,N}, i::Vararg{Integer,N}) where {T,N} @boundscheck checkbounds(block_arr, i...) v = block_arr[findblockindex.(axes(block_arr), i)...] return v diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl index 1ae33befb8..bb824f1446 100644 --- a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl @@ -23,8 +23,9 @@ end # `getindex` but uses a default if the value is # structurally zero. function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} - @boundscheck checkbounds(a, I) - return get(a.data, I, zero) + @boundscheck checkbounds(a, I) + return get(a.data, I, zero) +end +function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} + return get_nonzero(a, CartesianIndex(I), zero) end -get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} = - get_nonzero(a, CartesianIndex(I), zero) From 49255c06deefa352aee498367bd65d295ced748f Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 3 Oct 2023 14:53:26 -0400 Subject: [PATCH 4/8] Add example and test based on Literate.jl --- NDTensors/src/BlockSparseArrays/README.md | 25 ++++----- .../src/BlockSparseArrays/examples/README.jl | 52 +++++++++++++++++++ .../BlockSparseArrays/src/blocksparsearray.jl | 37 ++++++++++--- .../src/BlockSparseArrays/src/sparsearray.jl | 23 ++++---- .../src/BlockSparseArrays/test/runtests.jl | 9 ++++ 5 files changed, 114 insertions(+), 32 deletions(-) create mode 100644 NDTensors/src/BlockSparseArrays/examples/README.jl create mode 100644 NDTensors/src/BlockSparseArrays/test/runtests.jl diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/BlockSparseArrays/README.md index a1752bbbc5..27fd9564f3 100644 --- a/NDTensors/src/BlockSparseArrays/README.md +++ b/NDTensors/src/BlockSparseArrays/README.md @@ -6,10 +6,9 @@ It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. -```julia +````julia using NDTensors.BlockSparseArrays using BlockArrays -using Dictionaries # Block dimensions i1 = [2, 3] @@ -33,10 +32,7 @@ d_blocks = randn.(nz_block_sizes) # d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) # d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] -block_data = Dictionary([CartesianIndex(nz_block.n) for nz_block in nz_blocks], d_blocks) -block_storage = SparseArray{valtype(block_data),length(i_axes)}(block_data, blocklength.(i_axes)) - -B = BlockSparseArray(block_storage, i_axes) +B = BlockSparseArray(nz_blocks, d_blocks, i_axes) # Access a block B[Block(1, 1)] @@ -49,14 +45,15 @@ B[Block(1, 2)] = randn(2, 3) # Matrix multiplication (not optimized for sparsity yet) B * B +```` + +You can generate this README with: +```julia +using Literate +Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) ``` -## TODO +--- + +*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).* -- Define an `AbstractBlockSparseArray` type along with two concrete types, one with blocks that makes no assumptions about data layout (they could be slices into contiguous data or not), and one that uses a contiguous memory in the background (which could be any `AbstractVector` wrapped in a `PseudoBlockVector` that tracks the blocks as shown above). -- Define fast linear algebra (matmul, SVD, QR, etc.) that takes advantage of sparsity. -- Define tensor contraction and addition using the `TensorOperations.jl` tensor operations interface (`tensoradd!`, `tensorcontract!`, and `tensortrace!`). See `SparseArrayKit.jl` for examples of overloading for sparse data structures. -- Use `SparseArrayKit.jl` as the elementwise sparse array backend (it would need to be generalized a little, -for example it makes the assumption that `zero` is defined for the element type, which isn't the case when the values are matrices since it would need shape information, though it could output a universal zero tensor). -- Implement `SparseArrays` functionality such as `findnz`, `findall(!iszero, B)`, `nnz`, `nonzeros`, `dropzeros`, and `droptol!`, along with the block versions of those (which would get forwarded to the `SparseArray` data structure, where they are treated as elementwise sparsity). `SparseArrayKit.jl` has functions `nonzero_pairs`, `nonzero_keys`, `nonzero_values`, and `nonzero_length` which could have analagous block functions. -- Look at other packages that deal with block sparsity such as `BlockSparseMatrices.jl` and `BlockBandedMatrices.jl` for ideas on code design and interfaces. diff --git a/NDTensors/src/BlockSparseArrays/examples/README.jl b/NDTensors/src/BlockSparseArrays/examples/README.jl new file mode 100644 index 0000000000..3a73fc692c --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/examples/README.jl @@ -0,0 +1,52 @@ +# # BlockSparseArrays.jl +# +# A Julia `BlockSparseArray` type based on the `BlockArrays.jl` interface. +# +# It wraps an elementwise `SparseArray` type that uses a dictionary-of-keys +# to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. +# `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. + +using NDTensors.BlockSparseArrays +using BlockArrays + +## Block dimensions +i1 = [2, 3] +i2 = [2, 3] + +i_axes = (blockedrange(i1), blockedrange(i2)) + +function block_size(axes, block) + return length.(getindex.(axes, Block.(block.n))) +end + +## Data +nz_blocks = [Block(1, 1), Block(2, 2)] +nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] +nz_block_lengths = prod.(nz_block_sizes) + +## Blocks with discontiguous underlying data +d_blocks = randn.(nz_block_sizes) + +## Blocks with contiguous underlying data +## d_data = PseudoBlockVector(randn(sum(nz_block_lengths)), nz_block_lengths) +## d_blocks = [reshape(@view(d_data[Block(i)]), block_size(i_axes, nz_blocks[i])) for i in 1:length(nz_blocks)] + +B = BlockSparseArray(nz_blocks, d_blocks, i_axes) + +## Access a block +B[Block(1, 1)] + +## Access a non-zero block, returns a zero matrix +B[Block(1, 2)] + +## Set a zero block +B[Block(1, 2)] = randn(2, 3) + +## Matrix multiplication (not optimized for sparsity yet) +B * B + +# You can generate this README with: +# ```julia +# using Literate +# Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor()) +# ``` diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl index 6d45220dae..48f18cede7 100644 --- a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -2,10 +2,33 @@ using BlockArrays: block # Also add a version with contiguous underlying data. struct BlockSparseArray{ - T,N,R<:SparseArray{<:AbstractArray{T,N},N},BS<:NTuple{N,AbstractUnitRange{Int}} + T,N,Blocks<:SparseArray{<:AbstractArray{T,N},N},Axes<:NTuple{N,AbstractUnitRange{Int}} } <: AbstractBlockArray{T,N} - blocks::R - axes::BS + blocks::Blocks + axes::Axes +end + +# The size of a block +function block_size(axes::Tuple, block::Block) + return length.(getindex.(axes, Block.(block.n))) +end + +struct BlockZero{Axes} + axes::Axes +end + +(f::BlockZero)(T::Type, I::CartesianIndex) = fill!(T(undef, block_size(f.axes, Block(Tuple(I)))), false) + +function BlockSparseArray(blocks::AbstractVector{<:Block}, blockdata::AbstractVector, axes::Tuple) + return BlockSparseArray(Dictionary(blocks, blockdata), axes) +end + +function BlockSparseArray(blockdata::Dictionary{<:Block}, axes::Tuple) + blocks = keys(blockdata) + cartesianblocks = map(block -> CartesianIndex(block.n), blocks) + cartesiandata = Dictionary(cartesianblocks, blockdata) + block_storage = SparseArray(cartesiandata, blocklength.(axes), BlockZero(axes)) + return BlockSparseArray(block_storage, axes) end Base.axes(block_arr::BlockSparseArray) = block_arr.axes @@ -17,11 +40,11 @@ end function BlockArrays.viewblock(block_arr::BlockSparseArray, block) blks = block.n @boundscheck blockcheckbounds(block_arr, blks...) - block_size = length.(getindex.(axes(block_arr), Block.(blks))) + ## block_size = length.(getindex.(axes(block_arr), Block.(blks))) # TODO: Make this `Zeros`? - zero = zeros(eltype(block_arr), block_size) - # return block_arr.blocks[blks...] # Fails because zero isn't defined - return get_nonzero(block_arr.blocks, blks, zero) + ## zero = zeros(eltype(block_arr), block_size) + return block_arr.blocks[blks...] # Fails because zero isn't defined + ## return get_nonzero(block_arr.blocks, blks, zero) end function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) where {T,N} diff --git a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl index bb824f1446..a3f5b6dcbd 100644 --- a/NDTensors/src/BlockSparseArrays/src/sparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/sparsearray.jl @@ -1,6 +1,7 @@ -struct SparseArray{T,N} <: AbstractArray{T,N} +struct SparseArray{T,N,Zero} <: AbstractArray{T,N} data::Dictionary{CartesianIndex{N},T} dims::NTuple{N,Int64} + zero::Zero end Base.size(a::SparseArray) = a.dims @@ -14,18 +15,18 @@ function Base.setindex!(a::SparseArray{T,N}, v, I::Vararg{Int,N}) where {T,N} end function Base.getindex(a::SparseArray{T,N}, I::CartesianIndex{N}) where {T,N} - return get(a.data, I, nothing) + return get(a.data, I, a.zero(T, I)) end function Base.getindex(a::SparseArray{T,N}, I::Vararg{Int,N}) where {T,N} return getindex(a, CartesianIndex(I)) end -# `getindex` but uses a default if the value is -# structurally zero. -function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} - @boundscheck checkbounds(a, I) - return get(a.data, I, zero) -end -function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} - return get_nonzero(a, CartesianIndex(I), zero) -end +## # `getindex` but uses a default if the value is +## # structurally zero. +## function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N} +## @boundscheck checkbounds(a, I) +## return get(a.data, I, zero) +## end +## function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N} +## return get_nonzero(a, CartesianIndex(I), zero) +## end diff --git a/NDTensors/src/BlockSparseArrays/test/runtests.jl b/NDTensors/src/BlockSparseArrays/test/runtests.jl new file mode 100644 index 0000000000..32d43ae501 --- /dev/null +++ b/NDTensors/src/BlockSparseArrays/test/runtests.jl @@ -0,0 +1,9 @@ +using Test +using NDTensors.BlockSparseArrays + +@testset "Test NDTensors.BlockSparseArrays" begin + @testset "README" begin + @test include(joinpath(pkgdir(BlockSparseArrays), "src", "BlockSparseArrays", "examples", "README.jl")) isa Any + end +end + From 3af05a50cf1b6a8adb14fc5f8b74c28abc8b2d88 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 3 Oct 2023 17:02:00 -0400 Subject: [PATCH 5/8] Add tests --- .../src/BlockSparseArrays.jl | 2 + .../BlockSparseArrays/src/blocksparsearray.jl | 8 ++- NDTensors/src/NDTensors.jl | 1 + NDTensors/src/arraytensor/arraytensor.jl | 3 +- NDTensors/src/arraytensor/blocksparsearray.jl | 18 +++++++ NDTensors/test/arraytensor/array.jl | 45 +++++++++++++++++ NDTensors/test/arraytensor/arraytensor.jl | 43 ++-------------- .../test/arraytensor/blocksparsearray.jl | 49 +++++++++++++++++++ 8 files changed, 126 insertions(+), 43 deletions(-) create mode 100644 NDTensors/src/arraytensor/blocksparsearray.jl create mode 100644 NDTensors/test/arraytensor/array.jl create mode 100644 NDTensors/test/arraytensor/blocksparsearray.jl diff --git a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl index dd99166e1b..ddc40bf6d0 100644 --- a/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl +++ b/NDTensors/src/BlockSparseArrays/src/BlockSparseArrays.jl @@ -2,6 +2,8 @@ module BlockSparseArrays using BlockArrays using Dictionaries +using BlockArrays: block + export BlockSparseArray, SparseArray include("sparsearray.jl") diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl index 48f18cede7..247881b199 100644 --- a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -19,11 +19,11 @@ end (f::BlockZero)(T::Type, I::CartesianIndex) = fill!(T(undef, block_size(f.axes, Block(Tuple(I)))), false) -function BlockSparseArray(blocks::AbstractVector{<:Block}, blockdata::AbstractVector, axes::Tuple) +function BlockSparseArray(blocks::AbstractVector{<:Block{N}}, blockdata::AbstractVector, axes::NTuple{N}) where {N} return BlockSparseArray(Dictionary(blocks, blockdata), axes) end -function BlockSparseArray(blockdata::Dictionary{<:Block}, axes::Tuple) +function BlockSparseArray(blockdata::Dictionary{<:Block{N}}, axes::NTuple{N,AbstractUnitRange{Int}}) where {N} blocks = keys(blockdata) cartesianblocks = map(block -> CartesianIndex(block.n), blocks) cartesiandata = Dictionary(cartesianblocks, blockdata) @@ -31,6 +31,10 @@ function BlockSparseArray(blockdata::Dictionary{<:Block}, axes::Tuple) return BlockSparseArray(block_storage, axes) end +function BlockSparseArray(blockdata::Dictionary{<:Block{N}}, blockinds::NTuple{N,AbstractVector}) where {N} + return BlockSparseArray(blockdata, blockedrange.(blockinds)) +end + Base.axes(block_arr::BlockSparseArray) = block_arr.axes function Base.copy(block_arr::BlockSparseArray) diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 8550b9bdc4..7df0990789 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -119,6 +119,7 @@ include("empty/adapt.jl") # include("arraytensor/arraytensor.jl") include("arraytensor/array.jl") +include("arraytensor/blocksparsearray.jl") ##################################### # Deprecations diff --git a/NDTensors/src/arraytensor/arraytensor.jl b/NDTensors/src/arraytensor/arraytensor.jl index d91b4a6d01..9ea03f7dc4 100644 --- a/NDTensors/src/arraytensor/arraytensor.jl +++ b/NDTensors/src/arraytensor/arraytensor.jl @@ -1,7 +1,7 @@ # Used for dispatch to distinguish from Tensors wrapping TensorStorage. # Remove once TensorStorage is removed. const ArrayStorage{T,N} = Union{ - Array{T,N},ReshapedArray{T,N},SubArray{T,N},PermutedDimsArray{T,N},StridedView{T,N} + Array{T,N},ReshapedArray{T,N},SubArray{T,N},PermutedDimsArray{T,N},StridedView{T,N},BlockSparseArray{T,N} } const MatrixStorage{T} = Union{ ArrayStorage{T,2}, @@ -41,6 +41,7 @@ function setindex!(tensor::MatrixOrArrayStorageTensor, v, I::Integer...) return tensor end +# TODO: Just call `contraction_output(storage(tensor1), storage(tensor2), indsR)` function contraction_output( tensor1::MatrixOrArrayStorageTensor, tensor2::MatrixOrArrayStorageTensor, indsR ) diff --git a/NDTensors/src/arraytensor/blocksparsearray.jl b/NDTensors/src/arraytensor/blocksparsearray.jl new file mode 100644 index 0000000000..002a147109 --- /dev/null +++ b/NDTensors/src/arraytensor/blocksparsearray.jl @@ -0,0 +1,18 @@ +# TODO: Implement. +function contraction_output( + tensor1::BlockSparseArray, tensor2::BlockSparseArray, indsR +) + return error("Not implemented") +end + +# TODO: Implement. +function contract!( + tensorR::BlockSparseArray, + labelsR, + tensor1::BlockSparseArray, + labels1, + tensor2::BlockSparseArray, + labels2, +) + return error("Not implemented") +end diff --git a/NDTensors/test/arraytensor/array.jl b/NDTensors/test/arraytensor/array.jl new file mode 100644 index 0000000000..39809d0db3 --- /dev/null +++ b/NDTensors/test/arraytensor/array.jl @@ -0,0 +1,45 @@ +using NDTensors +using LinearAlgebra +using Test + +using NDTensors: storage, storagetype + +@testset "Tensor wrapping Array" begin + is1 = (2, 3) + D1 = randn(is1) + + is2 = (3, 4) + D2 = randn(is2) + + T1 = tensor(D1, is1) + T2 = tensor(D2, is2) + + @test T1[1, 1] == D1[1, 1] + + x = rand() + T1[1, 1] = x + + @test T1[1, 1] == x + @test array(T1) == D1 + @test storagetype(T1) <: Matrix{Float64} + @test storage(T1) == D1 + @test eltype(T1) == eltype(D1) + @test inds(T1) == is1 + + R = T1 * T2 + @test storagetype(R) <: Matrix{Float64} + @test Array(R) ≈ Array(T1) * Array(T2) + + T1r = randn!(similar(T1)) + @test Array(T1r + T1) ≈ Array(T1r) + Array(T1) + @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) + + U, S, V = svd(T1) + @test U * S * V ≈ T1 + + T12 = contract(T1, (1, -1), T2, (-1, 2)) + @test T12 ≈ T1 * T2 + + D12 = contract(D1, (1, -1), D2, (-1, 2)) + @test D12 ≈ Array(T12) +end diff --git a/NDTensors/test/arraytensor/arraytensor.jl b/NDTensors/test/arraytensor/arraytensor.jl index 39809d0db3..a71944d01c 100644 --- a/NDTensors/test/arraytensor/arraytensor.jl +++ b/NDTensors/test/arraytensor/arraytensor.jl @@ -2,44 +2,7 @@ using NDTensors using LinearAlgebra using Test -using NDTensors: storage, storagetype - -@testset "Tensor wrapping Array" begin - is1 = (2, 3) - D1 = randn(is1) - - is2 = (3, 4) - D2 = randn(is2) - - T1 = tensor(D1, is1) - T2 = tensor(D2, is2) - - @test T1[1, 1] == D1[1, 1] - - x = rand() - T1[1, 1] = x - - @test T1[1, 1] == x - @test array(T1) == D1 - @test storagetype(T1) <: Matrix{Float64} - @test storage(T1) == D1 - @test eltype(T1) == eltype(D1) - @test inds(T1) == is1 - - R = T1 * T2 - @test storagetype(R) <: Matrix{Float64} - @test Array(R) ≈ Array(T1) * Array(T2) - - T1r = randn!(similar(T1)) - @test Array(T1r + T1) ≈ Array(T1r) + Array(T1) - @test Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) - - U, S, V = svd(T1) - @test U * S * V ≈ T1 - - T12 = contract(T1, (1, -1), T2, (-1, 2)) - @test T12 ≈ T1 * T2 - - D12 = contract(D1, (1, -1), D2, (-1, 2)) - @test D12 ≈ Array(T12) +@testset "Tensor wrapping AbstractArrays" begin + include("array.jl") + include("blocksparsearray.jl") end diff --git a/NDTensors/test/arraytensor/blocksparsearray.jl b/NDTensors/test/arraytensor/blocksparsearray.jl new file mode 100644 index 0000000000..1c2accf7ee --- /dev/null +++ b/NDTensors/test/arraytensor/blocksparsearray.jl @@ -0,0 +1,49 @@ +using NDTensors +using NDTensors.BlockSparseArrays +using BlockArrays +using LinearAlgebra +using Test + +using BlockArrays: Block + +using NDTensors: storage, storagetype + +@testset "Tensor wrapping BlockSparseArray" begin + is1 = ([1, 1], [1, 2]) + D1 = BlockSparseArray([Block(1, 1), Block(2, 2)], [randn(1, 1), randn(1, 2)], is1) + + is2 = ([1, 2], [2, 2]) + D2 = BlockSparseArray([Block(1, 1), Block(2, 2)], [randn(1, 2), randn(2, 2)], is2) + + T1 = tensor(D1, is1) + T2 = tensor(D2, is2) + + @test T1[1, 1] == D1[1, 1] + + x = rand() + T1[1, 1] = x + + @test T1[1, 1] == x + @test array(T1) == D1 + @test storagetype(T1) <: BlockSparseArray{Float64,2} + @test storage(T1) == D1 + @test eltype(T1) == eltype(D1) + @test inds(T1) == is1 + + @test_broken R = T1 * T2 + @test_broken storagetype(R) <: Matrix{Float64} + @test_broken Array(R) ≈ Array(T1) * Array(T2) + + @test_broken T1r = randn!(similar(T1)) + @test_broken Array(T1r + T1) ≈ Array(T1r) + Array(T1) + @test_broken Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) + + U, S, V = svd(T1) + @test U * S * V ≈ T1 + + @test_broken T12 = contract(T1, (1, -1), T2, (-1, 2)) + @test_broken T12 ≈ T1 * T2 + + @test_broken D12 = contract(D1, (1, -1), D2, (-1, 2)) + @test_broken D12 ≈ Array(T12) +end From d9b7d970ff20697ed19ba50574766cd71162d3d3 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 3 Oct 2023 17:10:22 -0400 Subject: [PATCH 6/8] Format --- .../BlockSparseArrays/src/blocksparsearray.jl | 16 ++++++++++++---- NDTensors/src/BlockSparseArrays/test/runtests.jl | 7 +++++-- NDTensors/src/arraytensor/arraytensor.jl | 7 ++++++- NDTensors/src/arraytensor/blocksparsearray.jl | 4 +--- 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl index 247881b199..440c1d2e6a 100644 --- a/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl +++ b/NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl @@ -17,13 +17,19 @@ struct BlockZero{Axes} axes::Axes end -(f::BlockZero)(T::Type, I::CartesianIndex) = fill!(T(undef, block_size(f.axes, Block(Tuple(I)))), false) +function (f::BlockZero)(T::Type, I::CartesianIndex) + return fill!(T(undef, block_size(f.axes, Block(Tuple(I)))), false) +end -function BlockSparseArray(blocks::AbstractVector{<:Block{N}}, blockdata::AbstractVector, axes::NTuple{N}) where {N} +function BlockSparseArray( + blocks::AbstractVector{<:Block{N}}, blockdata::AbstractVector, axes::NTuple{N} +) where {N} return BlockSparseArray(Dictionary(blocks, blockdata), axes) end -function BlockSparseArray(blockdata::Dictionary{<:Block{N}}, axes::NTuple{N,AbstractUnitRange{Int}}) where {N} +function BlockSparseArray( + blockdata::Dictionary{<:Block{N}}, axes::NTuple{N,AbstractUnitRange{Int}} +) where {N} blocks = keys(blockdata) cartesianblocks = map(block -> CartesianIndex(block.n), blocks) cartesiandata = Dictionary(cartesianblocks, blockdata) @@ -31,7 +37,9 @@ function BlockSparseArray(blockdata::Dictionary{<:Block{N}}, axes::NTuple{N,Abst return BlockSparseArray(block_storage, axes) end -function BlockSparseArray(blockdata::Dictionary{<:Block{N}}, blockinds::NTuple{N,AbstractVector}) where {N} +function BlockSparseArray( + blockdata::Dictionary{<:Block{N}}, blockinds::NTuple{N,AbstractVector} +) where {N} return BlockSparseArray(blockdata, blockedrange.(blockinds)) end diff --git a/NDTensors/src/BlockSparseArrays/test/runtests.jl b/NDTensors/src/BlockSparseArrays/test/runtests.jl index 32d43ae501..a048b0ac32 100644 --- a/NDTensors/src/BlockSparseArrays/test/runtests.jl +++ b/NDTensors/src/BlockSparseArrays/test/runtests.jl @@ -3,7 +3,10 @@ using NDTensors.BlockSparseArrays @testset "Test NDTensors.BlockSparseArrays" begin @testset "README" begin - @test include(joinpath(pkgdir(BlockSparseArrays), "src", "BlockSparseArrays", "examples", "README.jl")) isa Any + @test include( + joinpath( + pkgdir(BlockSparseArrays), "src", "BlockSparseArrays", "examples", "README.jl" + ), + ) isa Any end end - diff --git a/NDTensors/src/arraytensor/arraytensor.jl b/NDTensors/src/arraytensor/arraytensor.jl index 9ea03f7dc4..13607d240e 100644 --- a/NDTensors/src/arraytensor/arraytensor.jl +++ b/NDTensors/src/arraytensor/arraytensor.jl @@ -1,7 +1,12 @@ # Used for dispatch to distinguish from Tensors wrapping TensorStorage. # Remove once TensorStorage is removed. const ArrayStorage{T,N} = Union{ - Array{T,N},ReshapedArray{T,N},SubArray{T,N},PermutedDimsArray{T,N},StridedView{T,N},BlockSparseArray{T,N} + Array{T,N}, + ReshapedArray{T,N}, + SubArray{T,N}, + PermutedDimsArray{T,N}, + StridedView{T,N}, + BlockSparseArray{T,N}, } const MatrixStorage{T} = Union{ ArrayStorage{T,2}, diff --git a/NDTensors/src/arraytensor/blocksparsearray.jl b/NDTensors/src/arraytensor/blocksparsearray.jl index 002a147109..53da75cbf6 100644 --- a/NDTensors/src/arraytensor/blocksparsearray.jl +++ b/NDTensors/src/arraytensor/blocksparsearray.jl @@ -1,7 +1,5 @@ # TODO: Implement. -function contraction_output( - tensor1::BlockSparseArray, tensor2::BlockSparseArray, indsR -) +function contraction_output(tensor1::BlockSparseArray, tensor2::BlockSparseArray, indsR) return error("Not implemented") end From a8e13a76a9bff8c01b57c92c2a8587fa23a50745 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 3 Oct 2023 20:22:59 -0400 Subject: [PATCH 7/8] Fix tests --- NDTensors/Project.toml | 1 + NDTensors/src/BlockSparseArrays/README.md | 12 ++++++------ NDTensors/src/BlockSparseArrays/examples/README.jl | 12 ++++++------ NDTensors/test/BlockSparseArrays.jl | 4 ++++ NDTensors/test/Project.toml | 1 + NDTensors/test/arraytensor/blocksparsearray.jl | 12 +++++++----- NDTensors/test/runtests.jl | 1 + 7 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 NDTensors/test/BlockSparseArrays.jl diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index b33322a4f6..ce0ee0231c 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -36,6 +36,7 @@ NDTensorsTBLISExt = "TBLIS" [compat] Adapt = "3.5" +BlockArrays = "0.16" Compat = "4.9" Dictionaries = "0.3.5" FLoops = "0.2.1" diff --git a/NDTensors/src/BlockSparseArrays/README.md b/NDTensors/src/BlockSparseArrays/README.md index 27fd9564f3..ae3a119fbe 100644 --- a/NDTensors/src/BlockSparseArrays/README.md +++ b/NDTensors/src/BlockSparseArrays/README.md @@ -8,7 +8,7 @@ to store non-zero values, specifically a `Dictionary` from `Dictionaries.jl`. ````julia using NDTensors.BlockSparseArrays -using BlockArrays +using BlockArrays: BlockArrays, blockedrange # Block dimensions i1 = [2, 3] @@ -17,11 +17,11 @@ i2 = [2, 3] i_axes = (blockedrange(i1), blockedrange(i2)) function block_size(axes, block) - return length.(getindex.(axes, Block.(block.n))) + return length.(getindex.(axes, BlockArrays.Block.(block.n))) end # Data -nz_blocks = [Block(1, 1), Block(2, 2)] +nz_blocks = BlockArrays.Block.([(1, 1), (2, 2)]) nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] nz_block_lengths = prod.(nz_block_sizes) @@ -35,13 +35,13 @@ d_blocks = randn.(nz_block_sizes) B = BlockSparseArray(nz_blocks, d_blocks, i_axes) # Access a block -B[Block(1, 1)] +B[BlockArrays.Block(1, 1)] # Access a non-zero block, returns a zero matrix -B[Block(1, 2)] +B[BlockArrays.Block(1, 2)] # Set a zero block -B[Block(1, 2)] = randn(2, 3) +B[BlockArrays.Block(1, 2)] = randn(2, 3) # Matrix multiplication (not optimized for sparsity yet) B * B diff --git a/NDTensors/src/BlockSparseArrays/examples/README.jl b/NDTensors/src/BlockSparseArrays/examples/README.jl index 3a73fc692c..a55c39e2d0 100644 --- a/NDTensors/src/BlockSparseArrays/examples/README.jl +++ b/NDTensors/src/BlockSparseArrays/examples/README.jl @@ -7,7 +7,7 @@ # `BlockArrays` reinterprets the `SparseArray` as a blocked data structure. using NDTensors.BlockSparseArrays -using BlockArrays +using BlockArrays: BlockArrays, blockedrange ## Block dimensions i1 = [2, 3] @@ -16,11 +16,11 @@ i2 = [2, 3] i_axes = (blockedrange(i1), blockedrange(i2)) function block_size(axes, block) - return length.(getindex.(axes, Block.(block.n))) + return length.(getindex.(axes, BlockArrays.Block.(block.n))) end ## Data -nz_blocks = [Block(1, 1), Block(2, 2)] +nz_blocks = BlockArrays.Block.([(1, 1), (2, 2)]) nz_block_sizes = [block_size(i_axes, nz_block) for nz_block in nz_blocks] nz_block_lengths = prod.(nz_block_sizes) @@ -34,13 +34,13 @@ d_blocks = randn.(nz_block_sizes) B = BlockSparseArray(nz_blocks, d_blocks, i_axes) ## Access a block -B[Block(1, 1)] +B[BlockArrays.Block(1, 1)] ## Access a non-zero block, returns a zero matrix -B[Block(1, 2)] +B[BlockArrays.Block(1, 2)] ## Set a zero block -B[Block(1, 2)] = randn(2, 3) +B[BlockArrays.Block(1, 2)] = randn(2, 3) ## Matrix multiplication (not optimized for sparsity yet) B * B diff --git a/NDTensors/test/BlockSparseArrays.jl b/NDTensors/test/BlockSparseArrays.jl new file mode 100644 index 0000000000..5d1345d0e7 --- /dev/null +++ b/NDTensors/test/BlockSparseArrays.jl @@ -0,0 +1,4 @@ +using Test +using NDTensors + +include(joinpath(pkgdir(NDTensors), "src", "BlockSparseArrays", "test", "runtests.jl")) diff --git a/NDTensors/test/Project.toml b/NDTensors/test/Project.toml index 131682dd8c..c1c309317a 100644 --- a/NDTensors/test/Project.toml +++ b/NDTensors/test/Project.toml @@ -1,4 +1,5 @@ [deps] +BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" diff --git a/NDTensors/test/arraytensor/blocksparsearray.jl b/NDTensors/test/arraytensor/blocksparsearray.jl index 1c2accf7ee..5b87dc7b09 100644 --- a/NDTensors/test/arraytensor/blocksparsearray.jl +++ b/NDTensors/test/arraytensor/blocksparsearray.jl @@ -1,19 +1,21 @@ using NDTensors using NDTensors.BlockSparseArrays -using BlockArrays +using BlockArrays: BlockArrays using LinearAlgebra using Test -using BlockArrays: Block - using NDTensors: storage, storagetype @testset "Tensor wrapping BlockSparseArray" begin is1 = ([1, 1], [1, 2]) - D1 = BlockSparseArray([Block(1, 1), Block(2, 2)], [randn(1, 1), randn(1, 2)], is1) + D1 = BlockSparseArray( + [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 1), randn(1, 2)], is1 + ) is2 = ([1, 2], [2, 2]) - D2 = BlockSparseArray([Block(1, 1), Block(2, 2)], [randn(1, 2), randn(2, 2)], is2) + D2 = BlockSparseArray( + [BlockArrays.Block(1, 1), BlockArrays.Block(2, 2)], [randn(1, 2), randn(2, 2)], is2 + ) T1 = tensor(D1, is1) T2 = tensor(D2, is2) diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl index 8e4f1733f8..222353618a 100644 --- a/NDTensors/test/runtests.jl +++ b/NDTensors/test/runtests.jl @@ -19,6 +19,7 @@ end @safetestset "NDTensors" begin @testset "$filename" for filename in [ + "BlockSparseArrays.jl", "SetParameters.jl", "SmallVectors.jl", "linearalgebra.jl", From 1f2b1638bd65858d7554c443db0290a163a679b0 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Tue, 3 Oct 2023 20:48:23 -0400 Subject: [PATCH 8/8] Comment out broken SVD test --- NDTensors/test/arraytensor/blocksparsearray.jl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/NDTensors/test/arraytensor/blocksparsearray.jl b/NDTensors/test/arraytensor/blocksparsearray.jl index 5b87dc7b09..5d508608a5 100644 --- a/NDTensors/test/arraytensor/blocksparsearray.jl +++ b/NDTensors/test/arraytensor/blocksparsearray.jl @@ -40,8 +40,9 @@ using NDTensors: storage, storagetype @test_broken Array(T1r + T1) ≈ Array(T1r) + Array(T1) @test_broken Array(permutedims(T1, (2, 1))) ≈ permutedims(Array(T1), (2, 1)) - U, S, V = svd(T1) - @test U * S * V ≈ T1 + # TODO: Not implemented yet. + ## U, S, V = svd(T1) + ## @test U * S * V ≈ T1 @test_broken T12 = contract(T1, (1, -1), T2, (-1, 2)) @test_broken T12 ≈ T1 * T2