From 338feb8c2c3a5535370e8ac0b3956c66d6ba6d5a Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Fri, 14 Jun 2024 16:40:12 -0400 Subject: [PATCH 1/5] [Docs] Update RunningOnGPUs.md (#1499) * [Docs] Update RunningOnGPUs.md * [ITensors] Bump to v0.6.13 --- Project.toml | 2 +- docs/src/RunningOnGPUs.md | 17 +++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/Project.toml b/Project.toml index 8ca62a6d3f..3b05f24e1e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.6.12" +version = "0.6.13" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/docs/src/RunningOnGPUs.md b/docs/src/RunningOnGPUs.md index 45b9d26bed..25a1df4cfc 100644 --- a/docs/src/RunningOnGPUs.md +++ b/docs/src/RunningOnGPUs.md @@ -62,9 +62,14 @@ The table below summarizes each backend's current capabilities. | | CUDA | cuTENSOR | ROCm | Metal | oneAPI | |------------------------------|------|------------|--------|--------|--------| -| Contractions (dense) | ✓ (cuBLAS) | ✓ | ✓ | ✓ | N/A | -| QR (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU | On CPU | N/A | -| SVD (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU | On CPU | N/A | -| Eigendecomposition (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU | On CPU | N/A | -| Double precision (`Float64`) | ✓ (cuSOLVER) | ✓ | ✓ | N/A | N/A | -| Block sparse | In progress | In progress | In progress | In progress | N/A | +| Contractions (dense) | ✓ (cuBLAS) | ✓ | ✓ | ✓ | N/A[^oneapi] | +| QR (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | +| SVD (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | +| Eigendecomposition (dense) | ✓ (cuSOLVER) | ✓ (cuSOLVER) | On CPU[^linalg] | On CPU[^linalg] | N/A[^oneapi] | +| Double precision (`Float64`) | ✓ | ✓ | ✓ | N/A[^metal] | N/A[^oneapi] | +| Block sparse | ✓[^blocksparse] | ✓[^blocksparse] | ✓[^blocksparse] | ✓[^blocksparse] | N/A[^oneapi] | + +[^linalg]: Some GPU vendors have not implemented certain matrix factorizations, or the ones they have implemented are not efficient compared to running on CPU, so as a workaround we perform those operations on CPU by transferring the data back and forth from GPU to CPU. We will add support for running those operations on GPU as they become available. If your algorithm's cost is dominated by those operations you won't see any speedup by trying to run it on those kinds of GPUs. +[^blocksparse]: Support is experimental. Operations may not be fully optimized and could have bugs. +[^oneapi]: We plan to add Intel GPU support through Julia's oneAPI.jl interface but don't have any Intel GPUs to test on right now. +[^metal]: Apple doesn't support double precision floating point operations on their GPUs, see Section 2.1 of the [Metal Shading Language Specification](https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf). Until it does, we can't support double precision operations on Apple GPUs. From 9cf53012ee2262872314648d7275da2d4465a913 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 15 Jun 2024 14:27:50 -0400 Subject: [PATCH 2/5] CompatHelper: bump compat for StridedViews to 0.3 for package NDTensors, (keep existing compat) (#1500) * CompatHelper: bump compat for StridedViews to 0.3 for package NDTensors, (keep existing compat) * [NDTensors] Bump to v0.3.30 --- NDTensors/Project.toml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index ece7615a0d..133f6c98ae 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.29" +version = "0.3.30" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" @@ -42,8 +42,8 @@ TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1" [extensions] -NDTensorsAMDGPUExt = ["AMDGPU","GPUArraysCore"] -NDTensorsCUDAExt = ["CUDA","GPUArraysCore"] +NDTensorsAMDGPUExt = ["AMDGPU", "GPUArraysCore"] +NDTensorsCUDAExt = ["CUDA", "GPUArraysCore"] NDTensorsGPUArraysCoreExt = "GPUArraysCore" NDTensorsHDF5Ext = "HDF5" NDTensorsMetalExt = ["GPUArraysCore", "Metal"] @@ -59,7 +59,6 @@ ArrayLayouts = "1.4" BlockArrays = "1" CUDA = "5" Compat = "4.9" -cuTENSOR = "2" Dictionaries = "0.4" EllipsisNotation = "1.8" FillArrays = "1" @@ -81,11 +80,12 @@ SparseArrays = "1.6" SplitApplyCombine = "1.2.2" StaticArrays = "0.12, 1.0" Strided = "2" -StridedViews = "0.2.2" +StridedViews = "0.2.2, 0.3" TBLIS = "0.2" TimerOutputs = "0.5.5" TupleTools = "1.2.0" VectorInterface = "0.4.2" +cuTENSOR = "2" julia = "1.6" [extras] From b739c4066987b427d8b963ab511ccce949ddc78c Mon Sep 17 00:00:00 2001 From: Miles Date: Sun, 16 Jun 2024 15:14:18 -0600 Subject: [PATCH 3/5] [SiteTypes] Add S2 operator for S=1 and update code style (#1502) --- src/lib/SiteTypes/src/sitetypes/aliases.jl | 18 +++ src/lib/SiteTypes/src/sitetypes/qubit.jl | 30 ++-- src/lib/SiteTypes/src/sitetypes/spinone.jl | 156 +++++++++------------ test/base/test_phys_site_types.jl | 2 + 4 files changed, 105 insertions(+), 101 deletions(-) diff --git a/src/lib/SiteTypes/src/sitetypes/aliases.jl b/src/lib/SiteTypes/src/sitetypes/aliases.jl index 30e395234c..727ad7f990 100644 --- a/src/lib/SiteTypes/src/sitetypes/aliases.jl +++ b/src/lib/SiteTypes/src/sitetypes/aliases.jl @@ -20,3 +20,21 @@ alias(::OpName"ntot") = OpName"Ntot"() alias(::OpName"F↑") = OpName"Fup"() alias(::OpName"F↓") = OpName"Fdn"() alias(::OpName"I") = OpName"Id"() + +alias(::OpName"S²") = OpName"S2"() +alias(::OpName"Sᶻ") = OpName"Sz"() +alias(::OpName"Sʸ") = OpName"Sy"() +alias(::OpName"iSʸ") = OpName"iSy"() +alias(::OpName"Sˣ") = OpName"Sx"() +alias(::OpName"S⁻") = OpName"S-"() +alias(::OpName"Sminus") = OpName"S-"() +alias(::OpName"Sm") = OpName"S-"() +alias(::OpName"S⁺") = OpName"S+"() +alias(::OpName"Splus") = OpName"S+"() +alias(::OpName"Sp") = OpName"S+"() +alias(::OpName"projUp") = OpName"ProjUp"() +alias(::OpName"projDn") = OpName"ProjDn"() + +alias(::OpName"Proj0") = OpName"ProjUp"() +alias(::OpName"Proj1") = OpName"ProjDn"() +alias(::OpName"Rn̂") = OpName"Rn"() diff --git a/src/lib/SiteTypes/src/sitetypes/qubit.jl b/src/lib/SiteTypes/src/sitetypes/qubit.jl index 149539355e..e07bd547f0 100644 --- a/src/lib/SiteTypes/src/sitetypes/qubit.jl +++ b/src/lib/SiteTypes/src/sitetypes/qubit.jl @@ -196,8 +196,8 @@ function op(::OpName"Rn", ::SiteType"Qubit"; θ::Real, ϕ::Real, λ::Real) ] end -function op(::OpName"Rn̂", t::SiteType"Qubit"; kwargs...) - return op(OpName("Rn"), t; kwargs...) +function op(on::OpName"Rn̂", t::SiteType"Qubit"; kwargs...) + return op(alias(on), t; kwargs...) end # @@ -435,68 +435,68 @@ op(::OpName"Sz", ::SiteType"Qubit") = [ 0.0 -0.5 ] -op(::OpName"Sᶻ", t::SiteType"Qubit") = op(OpName("Sz"), t) +op(on::OpName"Sᶻ", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"S+", ::SiteType"Qubit") = [ 0 1 0 0 ] -op(::OpName"S⁺", t::SiteType"Qubit") = op(OpName("S+"), t) +op(on::OpName"S⁺", t::SiteType"Qubit") = op(alias(on), t) -op(::OpName"Splus", t::SiteType"Qubit") = op(OpName("S+"), t) +op(on::OpName"Splus", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"S-", ::SiteType"Qubit") = [ 0 0 1 0 ] -op(::OpName"S⁻", t::SiteType"Qubit") = op(OpName("S-"), t) +op(on::OpName"S⁻", t::SiteType"Qubit") = op(alias(on), t) -op(::OpName"Sminus", t::SiteType"Qubit") = op(OpName("S-"), t) +op(on::OpName"Sminus", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"Sx", ::SiteType"Qubit") = [ 0.0 0.5 0.5 0.0 ] -op(::OpName"Sˣ", t::SiteType"Qubit") = op(OpName("Sx"), t) +op(on::OpName"Sˣ", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"iSy", ::SiteType"Qubit") = [ 0.0 0.5 -0.5 0.0 ] -op(::OpName"iSʸ", t::SiteType"Qubit") = op(OpName("iSy"), t) +op(on::OpName"iSʸ", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"Sy", ::SiteType"Qubit") = [ 0.0 -0.5im 0.5im 0.0 ] -op(::OpName"Sʸ", t::SiteType"Qubit") = op(OpName("Sy"), t) +op(on::OpName"Sʸ", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"S2", ::SiteType"Qubit") = [ 0.75 0.0 0.0 0.75 ] -op(::OpName"S²", t::SiteType"Qubit") = op(OpName("S2"), t) +op(on::OpName"S²", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"ProjUp", ::SiteType"Qubit") = [ 1 0 0 0 ] -op(::OpName"projUp", t::SiteType"Qubit") = op(OpName("ProjUp"), t) +op(on::OpName"projUp", t::SiteType"Qubit") = op(alias(on), t) -op(::OpName"Proj0", t::SiteType"Qubit") = op(OpName("ProjUp"), t) +op(on::OpName"Proj0", t::SiteType"Qubit") = op(alias(on), t) op(::OpName"ProjDn", ::SiteType"Qubit") = [ 0 0 0 1 ] -op(::OpName"projDn", t::SiteType"Qubit") = op(OpName("ProjDn"), t) +op(on::OpName"projDn", t::SiteType"Qubit") = op(alias(on), t) -op(::OpName"Proj1", t::SiteType"Qubit") = op(OpName("ProjDn"), t) +op(on::OpName"Proj1", t::SiteType"Qubit") = op(alias(on), t) diff --git a/src/lib/SiteTypes/src/sitetypes/spinone.jl b/src/lib/SiteTypes/src/sitetypes/spinone.jl index bae0b765df..1de6902243 100644 --- a/src/lib/SiteTypes/src/sitetypes/spinone.jl +++ b/src/lib/SiteTypes/src/sitetypes/spinone.jl @@ -1,5 +1,7 @@ using ..ITensors: complex!, QN +alias(::SiteType"SpinOne") = SiteType"S=1"() + """ space(::SiteType"S=1"; conserve_qns = false, @@ -51,109 +53,91 @@ state(::StateName"Y+", ::SiteType"S=1") = [-1 / 2, -im / sqrt(2), 1 / 2] state(::StateName"Y0", ::SiteType"S=1") = [1 / sqrt(2), 0, 1 / sqrt(2)] state(::StateName"Y-", ::SiteType"S=1") = [-1 / 2, im / sqrt(2), 1 / 2] -function op!(Op::ITensor, ::OpName"Sz", ::SiteType"S=1", s::Index) - Op[s' => 1, s => 1] = +1.0 - return Op[s' => 3, s => 3] = -1.0 -end +op(::OpName"Sz", ::SiteType"S=1") = [ + 1.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 -1.0 +] -function op!(Op::ITensor, ::OpName"Sᶻ", t::SiteType"S=1", s::Index) - return op!(Op, OpName("Sz"), t, s) -end +op(on::OpName"Sᶻ", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"S+", ::SiteType"S=1", s::Index) - Op[s' => 2, s => 3] = sqrt(2) - return Op[s' => 1, s => 2] = sqrt(2) -end +op(::OpName"S+", ::SiteType"S=1") = [ + 0.0 √2 0.0 + 0.0 0.0 √2 + 0.0 0.0 0.0 +] -function op!(Op::ITensor, ::OpName"S⁺", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S+"), t, s) -end +op(on::OpName"S⁺", t::SiteType"S=1") = op(alias(on), t) +op(on::OpName"Splus", t::SiteType"S=1") = op(alias(on), t) +op(on::OpName"Sp", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"Splus", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S+"), t, s) -end +op(::OpName"S-", ::SiteType"S=1") = [ + 0.0 0.0 0.0 + √2 0.0 0.0 + 0.0 √2 0.0 +] -function op!(Op::ITensor, ::OpName"Sp", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S+"), t, s) -end +op(on::OpName"S⁻", t::SiteType"S=1") = op(alias(on), t) +op(on::OpName"Sminus", t::SiteType"S=1") = op(alias(on), t) +op(on::OpName"Sm", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"S-", ::SiteType"S=1", s::Index) - Op[s' => 3, s => 2] = sqrt(2) - return Op[s' => 2, s => 1] = sqrt(2) -end +op(::OpName"Sx", ::SiteType"S=1") = [ + 0.0 1/√2 0.0 + 1/√2 0.0 1/√2 + 0.0 1/√2 0.0 +] -function op!(Op::ITensor, ::OpName"S⁻", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S-"), t, s) -end +op(on::OpName"Sˣ", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"Sminus", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S-"), t, s) -end +op(::OpName"iSy", ::SiteType"S=1") = [ + 0.0 1/√2 0.0 + -1/√2 0.0 1/√2 + 0.0 -1/√2 0.0 +] -function op!(Op::ITensor, ::OpName"Sm", t::SiteType"S=1", s::Index) - return op!(Op, OpName("S-"), t, s) -end +op(on::OpName"iSʸ", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"Sx", ::SiteType"S=1", s::Index) - Op[s' => 2, s => 1] = 1 / sqrt(2) - Op[s' => 1, s => 2] = 1 / sqrt(2) - Op[s' => 3, s => 2] = 1 / sqrt(2) - return Op[s' => 2, s => 3] = 1 / sqrt(2) -end +op(::OpName"Sy", ::SiteType"S=1") = [ + 0.0 -im/√2 0.0 + im/√2 0.0 -im/√2 + 0.0 im/√2 0.0 +] -function op!(Op::ITensor, ::OpName"Sˣ", t::SiteType"S=1", s::Index) - return op!(Op, OpName("Sx"), t, s) -end +op(on::OpName"Sʸ", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"iSy", ::SiteType"S=1", s::Index) - Op[s' => 2, s => 1] = -1 / sqrt(2) - Op[s' => 1, s => 2] = +1 / sqrt(2) - Op[s' => 3, s => 2] = -1 / sqrt(2) - return Op[s' => 2, s => 3] = +1 / sqrt(2) -end +op(::OpName"Sz2", ::SiteType"S=1") = [ + 1.0 0.0 0.0 + 0.0 0.0 0.0 + 0.0 0.0 1.0 +] -function op!(Op::ITensor, ::OpName"iSʸ", t::SiteType"S=1", s::Index) - return op!(Op, OpName("iSy"), t, s) -end +op(::OpName"Sx2", ::SiteType"S=1") = [ + 0.5 0.0 0.5 + 0.0 1.0 0.0 + 0.5 0.0 0.5 +] -function op!(Op::ITensor, ::OpName"Sy", ::SiteType"S=1", s::Index) - complex!(Op) - Op[s' => 2, s => 1] = +1im / sqrt(2) - Op[s' => 1, s => 2] = -1im / sqrt(2) - Op[s' => 3, s => 2] = +1im / sqrt(2) - return Op[s' => 2, s => 3] = -1im / sqrt(2) -end +op(::OpName"Sy2", ::SiteType"S=1") = [ + 0.5 0.0 -0.5 + 0.0 1.0 0.0 + -0.5 0.0 0.5 +] -function op!(Op::ITensor, ::OpName"Sʸ", t::SiteType"S=1", s::Index) - return op!(Op, OpName("Sy"), t, s) -end +op(::OpName"S2", ::SiteType"S=1") = [ + 2.0 0.0 0.0 + 0.0 2.0 0.0 + 0.0 0.0 2.0 +] -function op!(Op::ITensor, ::OpName"Sz2", ::SiteType"S=1", s::Index) - Op[s' => 1, s => 1] = +1.0 - return Op[s' => 3, s => 3] = +1.0 -end +op(on::OpName"S²", t::SiteType"S=1") = op(alias(on), t) -function op!(Op::ITensor, ::OpName"Sx2", ::SiteType"S=1", s::Index) - Op[s' => 1, s => 1] = 0.5 - Op[s' => 3, s => 1] = 0.5 - Op[s' => 2, s => 2] = 1.0 - Op[s' => 1, s => 3] = 0.5 - return Op[s' => 3, s => 3] = 0.5 -end - -function op!(Op::ITensor, ::OpName"Sy2", ::SiteType"S=1", s::Index) - Op[s' => 1, s => 1] = +0.5 - Op[s' => 3, s => 1] = -0.5 - Op[s' => 2, s => 2] = +1.0 - Op[s' => 1, s => 3] = -0.5 - return Op[s' => 3, s => 3] = +0.5 -end +space(st::SiteType"SpinOne"; kwargs...) = space(alias(st); kwargs...) -space(::SiteType"SpinOne"; kwargs...) = space(SiteType("S=1"); kwargs...) +state(name::StateName, st::SiteType"SpinOne") = state(name, alias(st)) +val(name::ValName, st::SiteType"SpinOne") = val(name, alias(st)) -state(name::StateName, ::SiteType"SpinOne") = state(name, SiteType("S=1")) -val(name::ValName, ::SiteType"SpinOne") = val(name, SiteType("S=1")) - -function op!(Op::ITensor, o::OpName, ::SiteType"SpinOne", s::Index) - return op!(Op, o, SiteType("S=1"), s) +function op!(Op::ITensor, o::OpName, st::SiteType"SpinOne", s::Index) + return op!(Op, o, alias(st), s) end + +op(o::OpName, st::SiteType"SpinOne") = op(o, alias(st)) diff --git a/test/base/test_phys_site_types.jl b/test/base/test_phys_site_types.jl index 6ff1154893..21d25d7532 100644 --- a/test/base/test_phys_site_types.jl +++ b/test/base/test_phys_site_types.jl @@ -393,6 +393,8 @@ using ITensors, LinearAlgebra, Test @test Array(op("Sz2", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0] @test Array(op("Sx2", s, 2), s[2]', s[2]) ≈ [0.5 0 0.5; 0 1.0 0; 0.5 0 0.5] @test Array(op("Sy2", s, 2), s[2]', s[2]) ≈ [0.5 0 -0.5; 0 1.0 0; -0.5 0 0.5] + @test Array(op("S2", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0] + @test Array(op("S²", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0] end end From 35807eedaa9da5d24bba32af6931b637aaceef44 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Sun, 16 Jun 2024 17:16:21 -0400 Subject: [PATCH 4/5] [ITensors] Bump to v0.6.14 [no ci] --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 3b05f24e1e..251e846818 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.6.13" +version = "0.6.14" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" From 7b2ada97fe54532bb4410d0ce2cfc29314f98add Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Tue, 18 Jun 2024 19:17:44 -0400 Subject: [PATCH 5/5] [BlockSparseArrays] Update to BlockArrays v1.1, fix some issues with nested views (#1503) * [BlockSparseArrays] Update to BlockArrays v1.1, fix some issues with nested views * [NDTensors] Bump to v0.3.31, BlockArrays v1.1 --- NDTensors/Project.toml | 4 +- .../BlockArraysExtensions.jl | 4 +- .../src/abstractblocksparsearray/view.jl | 4 - .../wrappedabstractblocksparsearray.jl | 75 +++--- .../blocksparsearrayinterface.jl | 23 +- .../lib/BlockSparseArrays/test/test_basics.jl | 215 ++++++++++++------ 6 files changed, 218 insertions(+), 107 deletions(-) diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 133f6c98ae..a4e9bd391b 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.30" +version = "0.3.31" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" @@ -56,7 +56,7 @@ AMDGPU = "0.9" Accessors = "0.1.33" Adapt = "3.7, 4" ArrayLayouts = "1.4" -BlockArrays = "1" +BlockArrays = "1.1" CUDA = "5" Compat = "4.9" Dictionaries = "0.4" diff --git a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl index 1c058a8efa..bb2634ce14 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -215,7 +215,7 @@ end function blockrange( axis::AbstractUnitRange, - r::BlockVector{BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, + r::BlockVector{<:BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, ) return map(b -> Block(b), blocks(r)) end @@ -271,7 +271,7 @@ end function blockindices( a::AbstractUnitRange, b::Block, - r::BlockVector{BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, + r::BlockVector{<:BlockIndex{1},<:AbstractVector{<:BlockIndexRange{1}}}, ) # TODO: Change to iterate over `BlockRange(r)` # once https://github.com/JuliaArrays/BlockArrays.jl/issues/404 diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl index bbe586d771..61f303384f 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/view.jl @@ -27,7 +27,3 @@ end function Base.view(a::BlockSparseArrayLike{<:Any,1}, index::Block{1}) return blocksparse_view(a, index) end - -function Base.view(a::BlockSparseArrayLike, indices::BlockIndexRange) - return view(view(a, block(indices)), indices.indices...) -end diff --git a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 8b3d37bd36..8b51619f99 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -120,19 +120,18 @@ blocktype(a::BlockSparseArrayLike) = eltype(blocks(a)) blocktype(arraytype::Type{<:BlockSparseArrayLike}) = eltype(blockstype(arraytype)) using ArrayLayouts: ArrayLayouts -## function Base.getindex(a::BlockSparseArrayLike{<:Any,N}, I::Vararg{Int,N}) where {N} -## return ArrayLayouts.layout_getindex(a, I...) -## end function Base.getindex(a::BlockSparseArrayLike{<:Any,N}, I::CartesianIndices{N}) where {N} return ArrayLayouts.layout_getindex(a, I) end function Base.getindex( - a::BlockSparseArrayLike{<:Any,N}, I::Vararg{AbstractUnitRange,N} + a::BlockSparseArrayLike{<:Any,N}, I::Vararg{AbstractUnitRange{<:Integer},N} ) where {N} return ArrayLayouts.layout_getindex(a, I...) end # TODO: Define `AnyBlockSparseMatrix`. -function Base.getindex(a::BlockSparseArrayLike{<:Any,2}, I::Vararg{AbstractUnitRange,2}) +function Base.getindex( + a::BlockSparseArrayLike{<:Any,2}, I::Vararg{AbstractUnitRange{<:Integer},2} +) return ArrayLayouts.layout_getindex(a, I...) end @@ -199,7 +198,7 @@ end # Needed by `BlockArrays` matrix multiplication interface function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Vararg{AbstractUnitRange}} + arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}} ) return similar(arraytype, eltype(arraytype), axes) end @@ -210,37 +209,26 @@ end # Delete once we drop support for older versions of Julia. function Base.similar( arraytype::Type{<:BlockSparseArrayLike}, - axes::Tuple{AbstractUnitRange,Vararg{AbstractUnitRange}}, -) - return similar(arraytype, eltype(arraytype), axes) -end - -# Needed by `BlockArrays` matrix multiplication interface -# Fixes ambiguity error with `BlockArrays.jl`. -function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, - axes::Tuple{AbstractBlockedUnitRange,Vararg{AbstractUnitRange{Int}}}, + axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) end -# Needed by `BlockArrays` matrix multiplication interface -# Fixes ambiguity error with `BlockArrays.jl`. +# Fixes ambiguity error with `BlockArrays`. function Base.similar( arraytype::Type{<:BlockSparseArrayLike}, - axes::Tuple{ - AbstractBlockedUnitRange,AbstractBlockedUnitRange,Vararg{AbstractUnitRange{Int}} - }, + axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) end -# Needed by `BlockArrays` matrix multiplication interface -# Fixes ambiguity error with `BlockArrays.jl`. +# Fixes ambiguity error with `BlockArrays`. function Base.similar( arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{ - AbstractUnitRange{Int},AbstractBlockedUnitRange,Vararg{AbstractUnitRange{Int}} + AbstractUnitRange{<:Integer}, + AbstractBlockedUnitRange{<:Integer}, + Vararg{AbstractUnitRange{<:Integer}}, }, ) return similar(arraytype, eltype(arraytype), axes) @@ -248,7 +236,8 @@ end # Needed for disambiguation function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, axes::Tuple{Vararg{AbstractBlockedUnitRange}} + arraytype::Type{<:BlockSparseArrayLike}, + axes::Tuple{Vararg{AbstractBlockedUnitRange{<:Integer}}}, ) return similar(arraytype, eltype(arraytype), axes) end @@ -256,7 +245,9 @@ end # Needed by `BlockArrays` matrix multiplication interface # TODO: Define a `blocksparse_similar` function. function Base.similar( - arraytype::Type{<:BlockSparseArrayLike}, elt::Type, axes::Tuple{Vararg{AbstractUnitRange}} + arraytype::Type{<:BlockSparseArrayLike}, + elt::Type, + axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) # TODO: Make generic for GPU, maybe using `blocktype`. # TODO: For non-block axes this should output `Array`. @@ -265,7 +256,7 @@ end # TODO: Define a `blocksparse_similar` function. function Base.similar( - a::BlockSparseArrayLike, elt::Type, axes::Tuple{Vararg{AbstractUnitRange}} + a::BlockSparseArrayLike, elt::Type, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}} ) # TODO: Make generic for GPU, maybe using `blocktype`. # TODO: For non-block axes this should output `Array`. @@ -277,7 +268,9 @@ end function Base.similar( a::BlockSparseArrayLike, elt::Type, - axes::Tuple{AbstractBlockedUnitRange,Vararg{AbstractBlockedUnitRange}}, + axes::Tuple{ + AbstractBlockedUnitRange{<:Integer},Vararg{AbstractBlockedUnitRange{<:Integer}} + }, ) # TODO: Make generic for GPU, maybe using `blocktype`. # TODO: For non-block axes this should output `Array`. @@ -289,13 +282,37 @@ end function Base.similar( a::BlockSparseArrayLike, elt::Type, - axes::Tuple{AbstractUnitRange,Vararg{AbstractUnitRange}}, + axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) # TODO: Make generic for GPU, maybe using `blocktype`. # TODO: For non-block axes this should output `Array`. return BlockSparseArray{elt}(undef, axes) end +# Fixes ambiguity error with `BlockArrays`. +function Base.similar( + a::BlockSparseArrayLike, + elt::Type, + axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, +) + # TODO: Make generic for GPU, maybe using `blocktype`. + # TODO: For non-block axes this should output `Array`. + return BlockSparseArray{elt}(undef, axes) +end + +# Fixes ambiguity errors with BlockArrays. +function Base.similar( + a::BlockSparseArrayLike, + elt::Type, + axes::Tuple{ + AbstractUnitRange{<:Integer}, + AbstractBlockedUnitRange{<:Integer}, + Vararg{AbstractUnitRange{<:Integer}}, + }, +) + return BlockSparseArray{elt}(undef, axes) +end + # TODO: Define a `blocksparse_similar` function. # Fixes ambiguity error with `StaticArrays`. function Base.similar( diff --git a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index d1fd59ebf5..fe97f326cd 100644 --- a/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/NDTensors/src/lib/BlockSparseArrays/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -102,7 +102,24 @@ function blocksparse_fill!(a::AbstractArray, value) return a end for b in BlockRange(a) - a[b] .= value + # We can't use: + # ```julia + # a[b] .= value + # ``` + # since that would lead to a stack overflow, + # because broadcasting calls `fill!`. + + # TODO: Ideally we would use: + # ```julia + # @view!(a[b]) .= value + # ``` + # but that doesn't work on `SubArray` right now. + + # This line is needed to instantiate blocks + # that aren't instantiated yet. Maybe + # we can make this work without this line? + blocks(a)[Int.(Tuple(b))...] = blocks(a)[Int.(Tuple(b))...] + blocks(a)[Int.(Tuple(b))...] .= value end return a end @@ -268,6 +285,10 @@ function Base.getindex(a::SparseSubArrayBlocks{<:Any,N}, I::CartesianIndex{N}) w end function Base.setindex!(a::SparseSubArrayBlocks{<:Any,N}, value, I::Vararg{Int,N}) where {N} parent_blocks = view(blocks(parent(a.array)), axes(a)...) + # TODO: The following line is required to instantiate + # uninstantiated blocks, maybe use `@view!` instead, + # or some other code pattern. + parent_blocks[I...] = parent_blocks[I...] return parent_blocks[I...][blockindices(parent(a.array), Block(I), a.array.indices)...] = value end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index adc54453b8..e844bcb548 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -1,14 +1,17 @@ @eval module $(gensym()) using BlockArrays: Block, + BlockIndexRange, BlockRange, + BlockSlice, + BlockVector, BlockedOneTo, BlockedUnitRange, - BlockVector, blockedrange, blocklength, blocklengths, blocksize, + blocksizes, mortar using LinearAlgebra: mul! using NDTensors.BlockSparseArrays: @@ -20,25 +23,22 @@ include("TestBlockSparseArraysUtils.jl") @testset "BlockSparseArrays (eltype=$elt)" for elt in (Float32, Float64, ComplexF32, ComplexF64) @testset "Broken" begin - # TODO: These are broken, need to fix. - a = BlockSparseArray{elt}([2, 3], [2, 3]) - for I in (Block.(1:2), [Block(1), Block(2)]) - b = @view a[I, I] - x = randn(elt, 2, 2) - b[Block(1, 1)] = x - # These outputs a block of zeros, - # for some reason the block - # is not getting set. - # I think the issue is that: - # ```julia - # @view(@view(a[I, I]))[Block(1, 1)] - # ``` - # creates a doubly-wrapped SubArray - # instead of flattening down to a - # single SubArray wrapper. - @test_broken a[Block(1, 1)] == x - @test_broken b[Block(1, 1)] == x + a = BlockSparseArray{elt}([2, 3], [3, 4]) + @test_broken a[Block(1, 2)] .= 1 + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] + @test_broken b[2:4, 2:4] + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @views a[Block(1, 1)][1:2, 1:1] + for i in parentindices(b) + @test_broken i isa BlockSlice{<:BlockIndexRange{1}} end + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] + @test_broken b[Block(1, 1)] = randn(3, 3) end @testset "Basics" begin a = BlockSparseArray{elt}([2, 3], [2, 3]) @@ -75,15 +75,17 @@ include("TestBlockSparseArraysUtils.jl") end @testset "Tensor algebra" begin a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end @test eltype(a) == elt @test block_nstored(a) == 2 @test nstored(a) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = similar(a, complex(elt)) @test eltype(b) == complex(eltype(a)) @test iszero(b) @@ -93,30 +95,34 @@ include("TestBlockSparseArraysUtils.jl") @test blocksize(b) == blocksize(a) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = copy(a) b[1, 1] = 11 @test b[1, 1] == 11 @test a[1, 1] ≠ 11 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = copy(a) b .*= 2 @test b ≈ 2a a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = copy(a) b ./= 2 @test b ≈ a / 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = 2 * a @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt @@ -124,8 +130,9 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = (2 + 3im) * a @test Array(b) ≈ (2 + 3im) * Array(a) @test eltype(b) == complex(elt) @@ -133,8 +140,9 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a + a @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt @@ -142,11 +150,13 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end x = BlockSparseArray{elt}(undef, ([3, 4], [2, 3])) - x[Block(1, 2)] = randn(elt, size(@view(x[Block(1, 2)]))) - x[Block(2, 1)] = randn(elt, size(@view(x[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + x[b] = randn(elt, size(x[b])) + end b = a .+ a .+ 3 .* PermutedDimsArray(x, (2, 1)) @test Array(b) ≈ 2 * Array(a) + 3 * permutedims(Array(x), (2, 1)) @test eltype(b) == elt @@ -154,8 +164,9 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = permutedims(a, (2, 1)) @test Array(b) ≈ permutedims(Array(a), (2, 1)) @test eltype(b) == elt @@ -163,8 +174,9 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = map(x -> 2x, a) @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt @@ -174,8 +186,9 @@ include("TestBlockSparseArraysUtils.jl") @test nstored(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[[Block(2), Block(1)], [Block(2), Block(1)]] @test b[Block(1, 1)] == a[Block(2, 2)] @test b[Block(1, 2)] == a[Block(2, 1)] @@ -187,8 +200,9 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[Block(1):Block(2), Block(1):Block(2)] @test b == a @test size(b) == size(a) @@ -197,8 +211,9 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[Block(1):Block(1), Block(1):Block(2)] @test b == Array(a)[1:2, 1:end] @test b[Block(1, 1)] == a[Block(1, 1)] @@ -209,8 +224,9 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(b) == 1 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[2:4, 2:4] @test b == Array(a)[2:4, 2:4] @test size(b) == (3, 3) @@ -219,8 +235,9 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[Block(2, 1)[1:2, 2:3]] @test b == Array(a)[3:4, 2:3] @test size(b) == (2, 2) @@ -229,8 +246,9 @@ include("TestBlockSparseArraysUtils.jl") @test block_nstored(b) == 1 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = PermutedDimsArray(a, (2, 1)) @test block_nstored(b) == 2 @test Array(b) == permutedims(Array(a), (2, 1)) @@ -239,8 +257,9 @@ include("TestBlockSparseArraysUtils.jl") @test Array(c) == 2 * permutedims(Array(a), (2, 1)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a' @test block_nstored(b) == 2 @test Array(b) == Array(a)' @@ -249,8 +268,9 @@ include("TestBlockSparseArraysUtils.jl") @test Array(c) == 2 * Array(a)' a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = transpose(a) @test block_nstored(b) == 2 @test Array(b) == transpose(Array(a)) @@ -259,8 +279,9 @@ include("TestBlockSparseArraysUtils.jl") @test Array(c) == 2 * transpose(Array(a)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = a[Block(1), Block(1):Block(2)] @test size(b) == (2, 7) @test blocksize(b) == (1, 2) @@ -268,21 +289,39 @@ include("TestBlockSparseArraysUtils.jl") @test b[Block(1, 2)] == a[Block(1, 2)] a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = copy(a) x = randn(elt, size(@view(a[Block(2, 2)]))) b[Block(2), Block(2)] = x @test b[Block(2, 2)] == x a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) - a[Block(1, 2)] = randn(elt, size(@view(a[Block(1, 2)]))) - a[Block(2, 1)] = randn(elt, size(@view(a[Block(2, 1)]))) + @views for b in [Block(1, 2), Block(2, 1)] + a[b] = randn(elt, size(a[b])) + end b = copy(a) b[Block(1, 1)] .= 1 - # TODO: Use `blocksizes(b)[1, 1]` once we upgrade to - # BlockArrays.jl v1. - @test b[Block(1, 1)] == trues(size(@view(b[Block(1, 1)]))) + @test b[Block(1, 1)] == trues(blocksizes(b)[1, 1]) + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[Block(2, 2)] + @test size(b) == (3, 4) + for i in parentindices(b) + @test i isa BlockSlice{<:Block{1}} + end + @test parentindices(b)[1] == BlockSlice(Block(2), 3:5) + @test parentindices(b)[2] == BlockSlice(Block(2), 4:7) + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[Block(2, 2)[1:2, 2:2]] + @test size(b) == (2, 1) + for i in parentindices(b) + @test i isa BlockSlice{<:BlockIndexRange{1}} + end + @test parentindices(b)[1] == BlockSlice(Block(2)[1:2], 3:4) + @test parentindices(b)[2] == BlockSlice(Block(2)[2:2], 5:5) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) x = randn(elt, 1, 2) @@ -300,7 +339,6 @@ include("TestBlockSparseArraysUtils.jl") a = BlockSparseArray{elt}([2, 3], [2, 3]) @views for b in [Block(1, 1), Block(2, 2)] - # TODO: Use `blocksizes(a)[Int.(Tuple(b))...]` once available. a[b] = randn(elt, size(a[b])) end for I in ( @@ -432,6 +470,45 @@ include("TestBlockSparseArraysUtils.jl") a .= 0 @test iszero(a) @test iszero(block_nstored(a)) + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + for I in (Block.(1:2), [Block(1), Block(2)]) + b = @view a[I, I] + x = randn(elt, 3, 4) + b[Block(2, 2)] = x + # These outputs a block of zeros, + # for some reason the block + # is not getting set. + # I think the issue is that: + # ```julia + # @view(@view(a[I, I]))[Block(1, 1)] + # ``` + # creates a doubly-wrapped SubArray + # instead of flattening down to a + # single SubArray wrapper. + @test a[Block(2, 2)] == x + @test b[Block(2, 2)] == x + end + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] + x = randn(elt, 3, 4) + b[Block(1, 1)] .= x + @test b[Block(1, 1)] == x + @test a[Block(2, 2)] == x + @test_throws DimensionMismatch b[Block(1, 1)] .= randn(2, 3) + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] + for index in parentindices(@view(b[Block(1, 1)])) + @test index isa BlockSlice{<:Block{1}} + end + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + b = @view a[Block(1, 1)[1:2, 1:1]] + for i in parentindices(b) + @test i isa BlockSlice{<:BlockIndexRange{1}} + end end @testset "view!" begin for blk in ((Block(2, 2),), (Block(2), Block(2)))