Skip to content

Commit

Permalink
[SparseArrayInterface] [BlockSparseArrays] Sparse and block sparse do…
Browse files Browse the repository at this point in the history
…t products
  • Loading branch information
mtfishman committed Nov 12, 2024
1 parent 99513c3 commit 1764f0a
Show file tree
Hide file tree
Showing 9 changed files with 141 additions and 21 deletions.
Original file line number Diff line number Diff line change
@@ -1,13 +1,23 @@
using ArrayLayouts: ArrayLayouts, MemoryLayout, MulAdd
using ArrayLayouts: ArrayLayouts, DualLayout, MemoryLayout, MulAdd
using BlockArrays: BlockLayout
using ..SparseArrayInterface: SparseLayout
using ..TypeParameterAccessors: similartype
using ..TypeParameterAccessors: parenttype, similartype

function ArrayLayouts.MemoryLayout(arraytype::Type{<:BlockSparseArrayLike})
outer_layout = typeof(MemoryLayout(blockstype(arraytype)))
inner_layout = typeof(MemoryLayout(blocktype(arraytype)))
return BlockLayout{outer_layout,inner_layout}()
end
function ArrayLayouts.MemoryLayout(
arraytype::Type{<:Adjoint{<:Any,<:AbstractBlockSparseVector}}
)
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end
function ArrayLayouts.MemoryLayout(
arraytype::Type{<:Transpose{<:Any,<:AbstractBlockSparseVector}}
)
return DualLayout{typeof(MemoryLayout(parenttype(arraytype)))}()
end

function Base.similar(
mul::MulAdd{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout},<:Any,<:Any,A,B},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,13 @@ function Base.similar(
return similar(arraytype, eltype(arraytype), axes)
end

# Fixes ambiguity error.
function Base.similar(
arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}}
)
return similar(arraytype, eltype(arraytype), axes)
end

# Needed by `BlockArrays` matrix multiplication interface
# TODO: This fixes an ambiguity error with `OffsetArrays.jl`, but
# is only appears to be needed in older versions of Julia like v1.6.
Expand Down Expand Up @@ -221,7 +228,7 @@ function Base.similar(
end

# Fixes ambiguity error.
function Base.similar(a::BlockSparseArrayLike{<:Any,0}, elt::Type, axes::Tuple{})
function Base.similar(a::BlockSparseArrayLike, elt::Type, axes::Tuple{})
return blocksparse_similar(a, elt, axes)
end

Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,48 @@
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd
using BlockArrays: BlockLayout
using ..SparseArrayInterface: SparseLayout
using LinearAlgebra: mul!
using LinearAlgebra: dot, mul!

function blocksparse_muladd!(
α::Number, a1::AbstractMatrix, a2::AbstractMatrix, β::Number, a_dest::AbstractMatrix
α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray
)
mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β)
return a_dest
end

function blocksparse_matmul!(m::MulAdd)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
blocksparse_muladd!(α, a1, a2, β, a_dest)
return a_dest
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
},
)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
blocksparse_muladd!(α, a1, a2, β, a_dest)
return a_dest
blocksparse_matmul!(m)
return m.C
end
function ArrayLayouts.materialize!(
m::MatMulVecAdd{
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
<:BlockLayout{<:SparseLayout},
},
)
blocksparse_matmul!(m)
return m.C
end

function blocksparse_dot(a1::AbstractArray, a2::AbstractArray)
# TODO: Add a check that the blocking of `a1` and `a2` are
# the same, or the same up to a reshape.
return dot(blocks(a1), blocks(a2))
end

function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}})
return blocksparse_dot(d.A, d.B)
end
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index)))

# Represents the array of arrays of a `Transpose`
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Transpose`.
struct SparseTransposeBlocks{T,BlockType<:AbstractMatrix{T},Array<:Transpose{T}} <:
struct SparseTransposeBlocks{T,BlockType<:AbstractArray{T},Array<:Transpose{T}} <:
AbstractSparseMatrix{BlockType}
array::Array
end
Expand Down Expand Up @@ -219,7 +219,7 @@ end

# Represents the array of arrays of a `Adjoint`
# wrapping a block spare array, i.e. `blocks(array)` where `a` is a `Adjoint`.
struct SparseAdjointBlocks{T,BlockType<:AbstractMatrix{T},Array<:Adjoint{T}} <:
struct SparseAdjointBlocks{T,BlockType<:AbstractArray{T},Array<:Adjoint{T}} <:
AbstractSparseMatrix{BlockType}
array::Array
end
Expand Down
16 changes: 15 additions & 1 deletion NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ using BlockArrays:
mortar
using Compat: @compat
using GPUArraysCore: @allowscalar
using LinearAlgebra: Adjoint, mul!, norm
using LinearAlgebra: Adjoint, dot, mul!, norm
using NDTensors.BlockSparseArrays:
@view!,
BlockSparseArray,
Expand Down Expand Up @@ -575,7 +575,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
a[b] = randn(elt, size(a[b]))
end
@test isassigned(a, 1, 1)
@test isassigned(a, 1, 1, 1)
@test !isassigned(a, 1, 1, 2)
@test isassigned(a, 5, 7)
@test isassigned(a, 5, 7, 1)
@test !isassigned(a, 5, 7, 2)
@test !isassigned(a, 0, 1)
@test !isassigned(a, 5, 8)
@test isassigned(a, Block(1), Block(1))
Expand Down Expand Up @@ -852,6 +856,16 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
@allowscalar @test Array(a_dest) Array(a1′) * Array(a2′)
end
end
@testset "Dot product" begin
a1 = dev(BlockSparseArray{elt}([2, 3, 4]))
a1[Block(1)] = dev(randn(elt, size(@view(a1[Block(1)]))))
a1[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
a2 = dev(BlockSparseArray{elt}([2, 3, 4]))
a2[Block(2)] = dev(randn(elt, size(@view(a1[Block(2)]))))
a2[Block(3)] = dev(randn(elt, size(@view(a1[Block(3)]))))
@test a1' * a2 Array(a1)' * Array(a2)
@test dot(a1, a2) a1' * a2
end
@testset "TensorAlgebra" begin
a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3]))
a1[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)]))))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,28 @@
using ArrayLayouts: ArrayLayouts, MatMulMatAdd
using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd

function ArrayLayouts.MemoryLayout(arraytype::Type{<:SparseArrayLike})
return SparseLayout()
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
)
function sparse_matmul!(m::MulAdd)
α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C
sparse_mul!(a_dest, a1, a2, α, β)
return a_dest
end

function ArrayLayouts.materialize!(
m::MatMulMatAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
)
sparse_matmul!(m)
return m.C
end
function ArrayLayouts.materialize!(
m::MatMulVecAdd{<:AbstractSparseLayout,<:AbstractSparseLayout,<:AbstractSparseLayout}
)
sparse_matmul!(m)
return m.C
end

function Base.copy(d::Dot{<:SparseLayout,<:SparseLayout})
return sparse_dot(d.A, d.B)
end
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using LinearAlgebra: mul!, norm
using LinearAlgebra: dot, mul!, norm

sparse_norm(a::AbstractArray, p::Real=2) = norm(sparse_storage(a))

Expand All @@ -9,6 +9,14 @@ function mul_indices(I1::CartesianIndex{2}, I2::CartesianIndex{2})
return CartesianIndex(I1[1], I2[2])
end

# TODO: Is this needed? Maybe when multiplying vectors?
function mul_indices(I1::CartesianIndex{1}, I2::CartesianIndex{1})
if I1 I2
return nothing
end
return CartesianIndex(I1)
end

function default_mul!!(
a_dest::AbstractMatrix,
a1::AbstractMatrix,
Expand All @@ -28,9 +36,9 @@ end

# a1 * a2 * α + a_dest * β
function sparse_mul!(
a_dest::AbstractMatrix,
a1::AbstractMatrix,
a2::AbstractMatrix,
a_dest::AbstractArray,
a1::AbstractArray,
a2::AbstractArray,
α::Number=true,
β::Number=false;
(mul!!)=(default_mul!!),
Expand All @@ -45,3 +53,25 @@ function sparse_mul!(
end
return a_dest
end

function sparse_dot(a1::AbstractArray, a2::AbstractArray)
# This assumes `a1` and `a2` have the same shape.
# TODO: Generalize (Base supports dot products of
# arrays with the same length but different sizes).
@assert size(a1) == size(a2)
dot_dest = zero(Base.promote_op(dot, eltype(a1), eltype(a2)))
# TODO: First check if the number of stored elements (`nstored`, to be renamed
# `stored_length`) is smaller in `a1` or `a2` and use whicheven one is smallar
# as the outer loop.
for I1 in stored_indices(a1)
# TODO: Overload and use `Base.isstored(a, I) = I in stored_indices(a)` instead.
# TODO: This assumes fast lookup of indices, which may not always be the case.
# It could be better to loop over `stored_indices(a2)` and check that
# `I1 == I2` instead (say using `mul_indices(I1, I2)`. We could have a trait
# `HasFastIsStored(a::AbstractArray)` to choose between the two.
if I1 in stored_indices(a2)
dot_dest += dot(a1[I1], a2[I1])
end
end
return dot_dest
end
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,10 @@ end
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::CartesianIndex{N}) where {N}
return sparse_isassigned(a, Tuple(I)...)
end
function sparse_isassigned(a::AbstractArray{<:Any,N}, I::Vararg{Integer,N}) where {N}
function sparse_isassigned(a::AbstractArray, I::Integer...)
# Check trailing dimensions are one. This is needed in generic
# AbstractArray show when `a isa AbstractVector`.
all(d -> isone(I[d]), (ndims(a) + 1):length(I)) || return false
return all(dim -> I[dim] axes(a, dim), 1:ndims(a))
end

Expand Down
16 changes: 16 additions & 0 deletions NDTensors/src/lib/TensorAlgebra/src/LinearAlgebraExtensions/qr.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
using ArrayLayouts: LayoutMatrix
using LinearAlgebra: LinearAlgebra, qr
using ..TensorAlgebra:
TensorAlgebra,
Expand All @@ -8,6 +9,8 @@ using ..TensorAlgebra:
fusedims,
splitdims

# TODO: Define as `tensor_qr`.
# TODO: This look generic but doesn't work for `BlockSparseArrays`.
function _qr(a::AbstractArray, biperm::BlockedPermutation{2})
a_matricized = fusedims(a, biperm)

Expand Down Expand Up @@ -38,6 +41,12 @@ function LinearAlgebra.qr(a::AbstractMatrix, biperm::BlockedPermutation{2})
return _qr(a, biperm)
end

# Fix ambiguity error with `ArrayLayouts`.
function LinearAlgebra.qr(a::LayoutMatrix, biperm::BlockedPermutation{2})
return _qr(a, biperm)
end

# TODO: Define in terms of an inner function `_qr` or `tensor_qr`.
function LinearAlgebra.qr(
a::AbstractArray, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
)
Expand All @@ -50,3 +59,10 @@ function LinearAlgebra.qr(
)
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
end

# Fix ambiguity error with `ArrayLayouts`.
function LinearAlgebra.qr(
a::LayoutMatrix, labels_a::Tuple, labels_q::Tuple, labels_r::Tuple
)
return qr(a, blockedperm_indexin(labels_a, labels_q, labels_r))
end

0 comments on commit 1764f0a

Please sign in to comment.