Skip to content

Commit

Permalink
idea to update UniformDiag to work for different backends
Browse files Browse the repository at this point in the history
  • Loading branch information
kmp5VT committed May 22, 2024
1 parent 4bb306d commit 21f1b04
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 14 deletions.
3 changes: 3 additions & 0 deletions NDTensors/src/NDTensors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,9 @@ include("empty/EmptyTensor.jl")
include("empty/tensoralgebra/contract.jl")
include("empty/adapt.jl")

## I am not exactly sure where this should go yet but it
## does work here
include("lib/GPUArraysCoreExtensions/src/contract.jl")
#####################################
# Deprecations
#
Expand Down
7 changes: 4 additions & 3 deletions NDTensors/src/diag/diagtensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT<:Diag,I
return Tensor{ElT,N,dense(StoreT),IndsT}
end

using .TypeParameterAccessors: unwrap_array_type
using .TypeParameterAccessors: unspecify_type_parameters, unwrap_array_type
# convert to Dense
function dense(T::DiagTensor)
return dense(unwrap_array_type(T), T)
Expand All @@ -123,9 +123,10 @@ function dense(::Type{<:Array}, T::DiagTensor)
end

# GPU version
function dense(::Type{<:AbstractArray}, T::DiagTensor)
## arrayt must be fully specified for this to work or fully unspecified
function dense(datat::Type{<:AbstractArray}, T::DiagTensor)
D_cpu = dense(Array, cpu(T))
return adapt(unwrap_array_type(T), D_cpu)
return adapt(datat, D_cpu)
end

# UniformDiag version
Expand Down
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
using NDTensors: NDTensors, DenseTensor, DiagTensor, contract!, dense, inds, Tensor
using NDTensors.Expose: Exposed, expose, unexpose
using NDTensors.TypeParameterAccessors: parenttype
using CUDA: CuArray
using NDTensors.TypeParameterAccessors: parenttype, set_eltype, set_ndims
using GPUArraysCore: AbstractGPUArray

## In this function we convert the DiagTensor to a dense tensor and
## Feed it back into contract
function NDTensors.contract!(
output_tensor::Exposed{<:CuArray,<:DenseTensor},
output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor},
labelsoutput_tensor,
tensor1::Exposed{<:Any,<:DiagTensor},
labelstensor1,
tensor2::Exposed{<:CuArray,<:DenseTensor},
tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor},
labelstensor2,
α::Number=one(Bool),
β::Number=zero(Bool),
)
tensor1 = unexpose(tensor1)
## convert tensor1 to a dense
tensor1 = adapt(parenttype(typeof(tensor2)), dense(tensor1))
arrayt = set_ndims(parenttype(typeof(tensor2)), 1)
tensor1 = dense(arrayt, unexpose(tensor1))
return contract!(
output_tensor,
labelsoutput_tensor,
Expand All @@ -31,9 +31,9 @@ function NDTensors.contract!(
end

function NDTensors.contract!(
output_tensor::Exposed{<:CuArray,<:DenseTensor},
output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor},
labelsoutput_tensor,
tensor1::Exposed{<:CuArray,<:DenseTensor},
tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor},
labelstensor1,
tensor2::Exposed{<:Any,<:DiagTensor},
labelstensor2,
Expand Down
3 changes: 2 additions & 1 deletion NDTensors/src/tensor/tensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,8 @@ function dense(::Type{<:Tensor{ElT,NT,StoreT,IndsT}}) where {ElT,NT,StoreT,IndsT
return Tensor{ElT,NT,dense(StoreT),IndsT}
end

dense(T::Tensor) = setstorage(T, dense(storage(T)))
dense(T::Tensor) = dense(unwrap_array_type(T), T)
dense(datat::Type{<:AbstractArray}, T::Tensor) = setstorage(T, adapt(datat, dense(storage(T))))

# Convert to Array, avoiding copying if possible
array(T::Tensor) = array(dense(T))
Expand Down
7 changes: 5 additions & 2 deletions NDTensors/test/test_diag.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using Test: @testset, @test, @test_throws
using GPUArraysCore: @allowscalar
include("NDTensorsTestUtils/NDTensorsTestUtils.jl")
using .NDTensorsTestUtils: devices_list, is_supported_eltype
using LinearAlgebra: dot

@testset "DiagTensor basic functionality" begin
@testset "test device: $dev" for dev in devices_list(copy(ARGS)),
Expand All @@ -20,7 +21,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
d = rand(real(elt), 10)
D = dev(Diag{elt}(d))
@test eltype(D) == elt
@test @allowscalar dev(Array(dense(D))) == convert.(elt, d)
@test @allowscalar dense(D) == dev(convert.(elt, d))
simD = similar(D)
@test length(simD) == length(D)
@test eltype(simD) == eltype(D)
Expand Down Expand Up @@ -66,7 +67,9 @@ end
@test contract(A, (-2, 1), t, (-2, 3)) == transpose(A)

## Testing sparse contractions on GPU
t = tensor(Diag(one(Float64)), (3, 3))
elt = (dev == NDTensors.mtl ? Float32 : Float64)
A = randomTensor(Dense{elt}, (3,3))
t = tensor(Diag(one(elt)), (3, 3))
@test contract(t, (-1, -2), dev(A), (-1, -2))[] == dot(t, A)
end
nothing
Expand Down
4 changes: 4 additions & 0 deletions src/itensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -581,6 +581,10 @@ function dense(A::ITensor)
return setinds(itensor(dense(tensor(A))), removeqns(inds(A)))
end

function dense(datat::Type, A::ITensor)
return setinds(itensor(dense(datat, tensor(A))), removeqns(inds(A)))
end

"""
random_itensor([rng=Random.default_rng()], [ElT=Float64], inds)
random_itensor([rng=Random.default_rng()], [ElT=Float64], inds::Index...)
Expand Down

0 comments on commit 21f1b04

Please sign in to comment.