From 21f1b04b6999e860805c2446f079b330494e9960 Mon Sep 17 00:00:00 2001 From: Karl Pierce Date: Wed, 22 May 2024 14:44:39 -0400 Subject: [PATCH] idea to update UniformDiag to work for different backends --- NDTensors/src/NDTensors.jl | 3 +++ NDTensors/src/diag/diagtensor.jl | 7 ++++--- .../lib/GPUArraysCoreExtensions/src}/contract.jl | 16 ++++++++-------- NDTensors/src/tensor/tensor.jl | 3 ++- NDTensors/test/test_diag.jl | 7 +++++-- src/itensor.jl | 4 ++++ 6 files changed, 26 insertions(+), 14 deletions(-) rename NDTensors/{ext/NDTensorsCUDAExt => src/lib/GPUArraysCoreExtensions/src}/contract.jl (67%) diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl index 7e7cffb8dc..6ccc081668 100644 --- a/NDTensors/src/NDTensors.jl +++ b/NDTensors/src/NDTensors.jl @@ -86,6 +86,9 @@ include("empty/EmptyTensor.jl") include("empty/tensoralgebra/contract.jl") include("empty/adapt.jl") +## I am not exactly sure where this should go yet but it +## does work here +include("lib/GPUArraysCoreExtensions/src/contract.jl") ##################################### # Deprecations # diff --git a/NDTensors/src/diag/diagtensor.jl b/NDTensors/src/diag/diagtensor.jl index 7a5cddff0e..4e05b4f472 100644 --- a/NDTensors/src/diag/diagtensor.jl +++ b/NDTensors/src/diag/diagtensor.jl @@ -107,7 +107,7 @@ function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT<:Diag,I return Tensor{ElT,N,dense(StoreT),IndsT} end -using .TypeParameterAccessors: unwrap_array_type +using .TypeParameterAccessors: unspecify_type_parameters, unwrap_array_type # convert to Dense function dense(T::DiagTensor) return dense(unwrap_array_type(T), T) @@ -123,9 +123,10 @@ function dense(::Type{<:Array}, T::DiagTensor) end # GPU version -function dense(::Type{<:AbstractArray}, T::DiagTensor) +## arrayt must be fully specified for this to work or fully unspecified +function dense(datat::Type{<:AbstractArray}, T::DiagTensor) D_cpu = dense(Array, cpu(T)) - return adapt(unwrap_array_type(T), D_cpu) + return adapt(datat, D_cpu) end # UniformDiag version diff --git a/NDTensors/ext/NDTensorsCUDAExt/contract.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/src/contract.jl similarity index 67% rename from NDTensors/ext/NDTensorsCUDAExt/contract.jl rename to NDTensors/src/lib/GPUArraysCoreExtensions/src/contract.jl index 6f2678580c..8067cdb98d 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/contract.jl +++ b/NDTensors/src/lib/GPUArraysCoreExtensions/src/contract.jl @@ -1,23 +1,23 @@ using NDTensors: NDTensors, DenseTensor, DiagTensor, contract!, dense, inds, Tensor using NDTensors.Expose: Exposed, expose, unexpose -using NDTensors.TypeParameterAccessors: parenttype -using CUDA: CuArray +using NDTensors.TypeParameterAccessors: parenttype, set_eltype, set_ndims +using GPUArraysCore: AbstractGPUArray ## In this function we convert the DiagTensor to a dense tensor and ## Feed it back into contract function NDTensors.contract!( - output_tensor::Exposed{<:CuArray,<:DenseTensor}, + output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, labelsoutput_tensor, tensor1::Exposed{<:Any,<:DiagTensor}, labelstensor1, - tensor2::Exposed{<:CuArray,<:DenseTensor}, + tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor}, labelstensor2, α::Number=one(Bool), β::Number=zero(Bool), ) - tensor1 = unexpose(tensor1) ## convert tensor1 to a dense - tensor1 = adapt(parenttype(typeof(tensor2)), dense(tensor1)) + arrayt = set_ndims(parenttype(typeof(tensor2)), 1) + tensor1 = dense(arrayt, unexpose(tensor1)) return contract!( output_tensor, labelsoutput_tensor, @@ -31,9 +31,9 @@ function NDTensors.contract!( end function NDTensors.contract!( - output_tensor::Exposed{<:CuArray,<:DenseTensor}, + output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, labelsoutput_tensor, - tensor1::Exposed{<:CuArray,<:DenseTensor}, + tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor}, labelstensor1, tensor2::Exposed{<:Any,<:DiagTensor}, labelstensor2, diff --git a/NDTensors/src/tensor/tensor.jl b/NDTensors/src/tensor/tensor.jl index 4dfa483030..5fbc9ccc8f 100644 --- a/NDTensors/src/tensor/tensor.jl +++ b/NDTensors/src/tensor/tensor.jl @@ -274,7 +274,8 @@ function dense(::Type{<:Tensor{ElT,NT,StoreT,IndsT}}) where {ElT,NT,StoreT,IndsT return Tensor{ElT,NT,dense(StoreT),IndsT} end -dense(T::Tensor) = setstorage(T, dense(storage(T))) +dense(T::Tensor) = dense(unwrap_array_type(T), T) +dense(datat::Type{<:AbstractArray}, T::Tensor) = setstorage(T, adapt(datat, dense(storage(T)))) # Convert to Array, avoiding copying if possible array(T::Tensor) = array(dense(T)) diff --git a/NDTensors/test/test_diag.jl b/NDTensors/test/test_diag.jl index 46a4ab15ac..c7f0758441 100644 --- a/NDTensors/test/test_diag.jl +++ b/NDTensors/test/test_diag.jl @@ -4,6 +4,7 @@ using Test: @testset, @test, @test_throws using GPUArraysCore: @allowscalar include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list, is_supported_eltype +using LinearAlgebra: dot @testset "DiagTensor basic functionality" begin @testset "test device: $dev" for dev in devices_list(copy(ARGS)), @@ -20,7 +21,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype d = rand(real(elt), 10) D = dev(Diag{elt}(d)) @test eltype(D) == elt - @test @allowscalar dev(Array(dense(D))) == convert.(elt, d) + @test @allowscalar dense(D) == dev(convert.(elt, d)) simD = similar(D) @test length(simD) == length(D) @test eltype(simD) == eltype(D) @@ -66,7 +67,9 @@ end @test contract(A, (-2, 1), t, (-2, 3)) == transpose(A) ## Testing sparse contractions on GPU - t = tensor(Diag(one(Float64)), (3, 3)) + elt = (dev == NDTensors.mtl ? Float32 : Float64) + A = randomTensor(Dense{elt}, (3,3)) + t = tensor(Diag(one(elt)), (3, 3)) @test contract(t, (-1, -2), dev(A), (-1, -2))[] == dot(t, A) end nothing diff --git a/src/itensor.jl b/src/itensor.jl index ea73d9353f..3237ce52f1 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -581,6 +581,10 @@ function dense(A::ITensor) return setinds(itensor(dense(tensor(A))), removeqns(inds(A))) end +function dense(datat::Type, A::ITensor) + return setinds(itensor(dense(datat, tensor(A))), removeqns(inds(A))) +end + """ random_itensor([rng=Random.default_rng()], [ElT=Float64], inds) random_itensor([rng=Random.default_rng()], [ElT=Float64], inds::Index...)