Skip to content

Commit

Permalink
Merge commit '66f5d391a6ef01834658ec9d5a9a8ba3d4b0f827' into kmp5/fea…
Browse files Browse the repository at this point in the history
…ture/FieldTypes
  • Loading branch information
kmp5VT committed Nov 2, 2023
2 parents c1b834f + 66f5d39 commit e9d8b06
Show file tree
Hide file tree
Showing 103 changed files with 2,104 additions and 938 deletions.
1 change: 1 addition & 0 deletions ITensorUnicodePlots/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ Graphs = "1.4.1"
ITensorVisualizationBase = "0.1.5"
NetworkLayout = "0.4.3"
Reexport = "1.2.2"
Statistics = "1"
UnicodePlots = "2.5.0, 3"
julia = "1.6"

Expand Down
1 change: 1 addition & 0 deletions ITensorVisualizationBase/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ Graphs = "1.4.1"
ITensors = "0.2.12, 0.3"
MetaGraphs = "0.7.1"
NetworkLayout = "0.4.3"
Statistics = "1"
julia = "1.6"

[extras]
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.2.14"
version = "0.2.15"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand Down
1 change: 1 addition & 0 deletions NDTensors/ext/NDTensorsCUDAExt/NDTensorsCUDAExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ module NDTensorsCUDAExt

using NDTensors
using NDTensors.SetParameters
using NDTensors.Unwrap
using Adapt
using Functors
using LinearAlgebra
Expand Down
26 changes: 21 additions & 5 deletions NDTensors/ext/NDTensorsCUDAExt/indexing.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,24 @@
function Base.getindex(::Type{<:CuArray}, T::DenseTensor{<:Number})
return CUDA.@allowscalar data(T)[]
function Base.getindex(E::Exposed{<:CuArray})
return CUDA.@allowscalar unexpose(E)[]
end

function Base.setindex!(::Type{<:CuArray}, T::DenseTensor{<:Number}, x::Number)
CUDA.@allowscalar data(T)[] = x
return T
function setindex!(E::Exposed{<:CuArray}, x::Number)
CUDA.@allowscalar unexpose(E)[] = x
return unexpose(E)
end

function Base.getindex(E::Exposed{<:CuArray,<:Adjoint}, I...)
Ap = parent(E)
return expose(Ap)[I...]
end

function Base.copy(E::Exposed{<:CuArray,<:Base.ReshapedArray})
Ap = parent(E)
return copy(expose(Ap))
end

Base.any(f, E::Exposed{<:CuArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E)))

function Base.print_array(io::IO, E::Exposed{<:CuArray})
return Base.print_array(io, expose(NDTensors.cpu(E)))
end
2 changes: 1 addition & 1 deletion NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ function NDTensors.svd_catch_error(A::CuMatrix; alg="JacobiAlgorithm")
alg = CUDA.CUSOLVER.QRAlgorithm()
end
USV = try
svd(A; alg=alg)
svd(expose(A); alg=alg)
catch
return nothing
end
Expand Down
4 changes: 3 additions & 1 deletion NDTensors/ext/NDTensorsMetalExt/NDTensorsMetalExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ module NDTensorsMetalExt

using Adapt
using Functors
using LinearAlgebra: LinearAlgebra
using LinearAlgebra: LinearAlgebra, Transpose, mul!
using NDTensors
using NDTensors.SetParameters

Expand All @@ -18,5 +18,7 @@ include("set_types.jl")
include("indexing.jl")
include("linearalgebra.jl")
include("copyto.jl")
include("append.jl")
include("permutedims.jl")
include("mul.jl")
end
5 changes: 5 additions & 0 deletions NDTensors/ext/NDTensorsMetalExt/append.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# This circumvents an issues that `MtlArray` can't call `resize!`.
# TODO: Raise an issue with Metal.jl.
function NDTensors.append!!(::Type{<:MtlArray}, collection, collections...)
return vcat(collection, collections...)
end
1 change: 1 addition & 0 deletions NDTensors/ext/NDTensorsMetalExt/imports.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import NDTensors: mtl, set_ndims, set_eltype, set_eltype_if_unspecified
import NDTensors.SetParameters: nparameters, get_parameter, set_parameter, default_parameter

using NDTensors.Unwrap: Exposed, unwrap_type, unexpose, expose
using Metal: DefaultStorageMode
using NDTensors: adapt
10 changes: 5 additions & 5 deletions NDTensors/ext/NDTensorsMetalExt/indexing.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
function Base.getindex(::Type{<:MtlArray}, T::DenseTensor{<:Number})
return Metal.@allowscalar data(T)[]
function Base.getindex(E::Exposed{<:MtlArray})
return Metal.@allowscalar unexpose(E)[]
end

function Base.setindex!(::Type{<:MtlArray}, T::DenseTensor{<:Number}, x::Number)
Metal.@allowscalar data(T)[] = x
return T
function Base.setindex!(E::Exposed{<:MtlArray}, x::Number)
Metal.@allowscalar unexpose(E)[] = x
return unexpose(E)
end
36 changes: 25 additions & 11 deletions NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl
Original file line number Diff line number Diff line change
@@ -1,16 +1,30 @@
function NDTensors.qr(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix)
Q, R = NDTensors.qr(NDTensors.cpu(A))
return adapt(leaf_parenttype, Matrix(Q)), adapt(leaf_parenttype, R)
function LinearAlgebra.qr(A::Exposed{<:MtlMatrix})
Q, R = qr(expose(NDTensors.cpu(A)))
return adapt(unwrap_type(A), Matrix(Q)), adapt(unwrap_type(A), R)
end

function NDTensors.eigen(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix)
D, U = NDTensors.eigen(NDTensors.cpu(A))
return adapt(set_ndims(leaf_parenttype, ndims(D)), D), adapt(leaf_parenttype, U)
function NDTensors.Unwrap.qr_positive(A::Exposed{<:MtlMatrix})
Q, R = qr_positive(expose(NDTensors.cpu(A)))
return adapt(unwrap_type(A), Matrix(Q)), adapt(unwrap_type(A), R)
end

function NDTensors.svd(leaf_parenttype::Type{<:MtlArray}, A::AbstractMatrix)
U, S, V = NDTensors.svd(NDTensors.cpu(A))
return adapt(leaf_parenttype, U),
adapt(set_ndims(leaf_parenttype, ndims(S)), S),
adapt(leaf_parenttype, V)
function NDTensors.Unwrap.ql(A::Exposed{<:MtlMatrix})
Q, L = ql(expose(NDTensors.cpu(A)))
return adapt(unwrap_type(A), Matrix(Q)), adapt(unwrap_type(A), L)
end
function NDTensors.Unwrap.ql_positive(A::Exposed{<:MtlMatrix})
Q, L = ql_positive(expose(NDTensors.cpu(A)))
return adapt(unwrap_type(A), Matrix(Q)), adapt(unwrap_type(A), L)
end

function LinearAlgebra.eigen(A::Exposed{<:MtlMatrix})
D, U = eigen(expose(NDTensors.cpu(A)))
return adapt(set_ndims(unwrap_type(A), ndims(D)), D), adapt(unwrap_type(A), U)
end

function LinearAlgebra.svd(A::Exposed{<:MtlMatrix}; kwargs...)
U, S, V = svd(expose(NDTensors.cpu(A)); kwargs...)
return adapt(unwrap_type(A), U),
adapt(set_ndims(unwrap_type(A), ndims(S)), S),
adapt(unwrap_type(A), V)
end
12 changes: 12 additions & 0 deletions NDTensors/ext/NDTensorsMetalExt/mul.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# This was calling generic matrix multiplication.
# TODO: Raise an issue with `Metal.jl`.
function LinearAlgebra.mul!(
CM::Exposed{<:MtlArray,<:Transpose},
AM::Exposed{<:MtlArray},
BM::Exposed{<:MtlArray},
α,
β,
)
mul!(transpose(CM), transpose(BM), transpose(AM), α, β)
return unexpose(CM)
end
15 changes: 5 additions & 10 deletions NDTensors/ext/NDTensorsMetalExt/permutedims.jl
Original file line number Diff line number Diff line change
@@ -1,12 +1,7 @@
function NDTensors.permutedims!(
::Type{<:MtlArray},
Adest::Base.ReshapedArray{<:Any,<:Any,<:SubArray},
::Type{<:MtlArray},
A,
perm,
function permutedims!(
Edest::Exposed{<:MtlArray,<:Base.ReshapedArray}, Esrc::Exposed{<:MtlArray}, perm
)
Aperm = permutedims(A, perm)
Adest_parent = parent(Adest)
copyto!(Adest_parent, Aperm)
return Adest
Aperm = permutedims(Esrc, perm)
copyto!(expose(parent(Edest)), expose(Aperm))
return unexpose(Edest)
end
1 change: 0 additions & 1 deletion NDTensors/src/BlockSparseArrays/src/blocksparsearray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ function BlockArrays.viewblock(block_arr::BlockSparseArray, block)
# TODO: Make this `Zeros`?
## zero = zeros(eltype(block_arr), block_size)
return block_arr.blocks[blks...] # Fails because zero isn't defined
## return get_nonzero(block_arr.blocks, blks, zero)
end

function Base.getindex(block_arr::BlockSparseArray{T,N}, bi::BlockIndex{N}) where {T,N}
Expand Down
13 changes: 2 additions & 11 deletions NDTensors/src/BlockSparseArrays/src/sparsearray.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# TODO: Define a constructor with a default `zero`.
struct SparseArray{T,N,Zero} <: AbstractArray{T,N}
data::Dictionary{CartesianIndex{N},T}
dims::NTuple{N,Int64}
dims::NTuple{N,Int}
zero::Zero
end

Expand All @@ -20,13 +21,3 @@ end
function Base.getindex(a::SparseArray{T,N}, I::Vararg{Int,N}) where {T,N}
return getindex(a, CartesianIndex(I))
end

## # `getindex` but uses a default if the value is
## # structurally zero.
## function get_nonzero(a::SparseArray{T,N}, I::CartesianIndex{N}, zero) where {T,N}
## @boundscheck checkbounds(a, I)
## return get(a.data, I, zero)
## end
## function get_nonzero(a::SparseArray{T,N}, I::NTuple{N,Int}, zero) where {T,N}
## return get_nonzero(a, CartesianIndex(I), zero)
## end
45 changes: 45 additions & 0 deletions NDTensors/src/DiagonalArrays/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# DiagonalArrays.jl

A Julia `DiagonalArray` type.

````julia
using NDTensors.DiagonalArrays:
DiagonalArray,
DiagIndex,
DiagIndices,
densearray

d = DiagonalArray([1.0, 2, 3], 3, 4, 5)
@show d[1, 1, 1] == 1
@show d[2, 2, 2] == 2
@show d[1, 2, 1] == 0

d[2, 2, 2] = 22
@show d[2, 2, 2] == 22

@show length(d[DiagIndices()]) == 3
@show densearray(d) == d
@show d[DiagIndex(2)] == d[2, 2, 2]

d[DiagIndex(2)] = 222
@show d[2, 2, 2] == 222

a = randn(3, 4, 5)
new_diag = randn(3)
a[DiagIndices()] = new_diag
d[DiagIndices()] = a[DiagIndices()]

@show a[DiagIndices()] == new_diag
@show d[DiagIndices()] == new_diag
````

You can generate this README with:
```julia
using Literate
Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor())
```

---

*This page was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*

34 changes: 34 additions & 0 deletions NDTensors/src/DiagonalArrays/examples/README.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# # DiagonalArrays.jl
#
# A Julia `DiagonalArray` type.

using NDTensors.DiagonalArrays: DiagonalArray, DiagIndex, DiagIndices, densearray

d = DiagonalArray([1.0, 2, 3], 3, 4, 5)
@show d[1, 1, 1] == 1
@show d[2, 2, 2] == 2
@show d[1, 2, 1] == 0

d[2, 2, 2] = 22
@show d[2, 2, 2] == 22

@show length(d[DiagIndices()]) == 3
@show densearray(d) == d
@show d[DiagIndex(2)] == d[2, 2, 2]

d[DiagIndex(2)] = 222
@show d[2, 2, 2] == 222

a = randn(3, 4, 5)
new_diag = randn(3)
a[DiagIndices()] = new_diag
d[DiagIndices()] = a[DiagIndices()]

@show a[DiagIndices()] == new_diag
@show d[DiagIndices()] == new_diag

# You can generate this README with:
# ```julia
# using Literate
# Literate.markdown("examples/README.jl", "."; flavor=Literate.CommonMarkFlavor())
# ```
Loading

0 comments on commit e9d8b06

Please sign in to comment.