Skip to content

Commit

Permalink
Merge branch 'main' into kmp5/feature/dagger_ext
Browse files Browse the repository at this point in the history
  • Loading branch information
kmp5VT authored Oct 20, 2024
2 parents 0c59245 + 5fb4696 commit 11f0bf2
Show file tree
Hide file tree
Showing 25 changed files with 197 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
strategy:
matrix:
version:
- '1.6'
- 'lts'
- '1'
os:
# - windows-latest # windows tests are failing for an unknow reason, disable for now
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_itensormps_ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
version:
- '1.6'
- 'lts'
- '1'
os:
- ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_itensors_base_ubuntu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
version:
- '1.6'
- 'lts'
- '1'
os:
- ubuntu-latest
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_ndtensors.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
version:
- '1.6'
- 'lts'
- '1'
os:
- ubuntu-latest
Expand Down
6 changes: 3 additions & 3 deletions NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.3.43"
version = "0.3.46"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand Down Expand Up @@ -68,7 +68,7 @@ EllipsisNotation = "1.8"
FillArrays = "1"
Folds = "0.2.8"
Functors = "0.2, 0.3, 0.4"
GPUArraysCore = "0.1"
GPUArraysCore = "0.1, 0.2"
HDF5 = "0.14, 0.15, 0.16, 0.17"
HalfIntegers = "1"
InlineStrings = "1"
Expand All @@ -91,7 +91,7 @@ TimerOutputs = "0.5.5"
TupleTools = "1.2.0"
VectorInterface = "0.4.2"
cuTENSOR = "2"
julia = "1.6"
julia = "1.10"

[extras]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
Expand Down
2 changes: 0 additions & 2 deletions NDTensors/src/empty/EmptyTensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,6 @@ end

setindex!!(T::EmptyTensor, x, I...) = setindex(T, x, I...)

promote_rule(::Type{EmptyNumber}, ::Type{T}) where {T<:Number} = T

function promote_rule(
::Type{T1}, ::Type{T2}
) where {T1<:EmptyStorage{EmptyNumber},T2<:TensorStorage}
Expand Down
8 changes: 8 additions & 0 deletions NDTensors/src/emptynumber.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,14 @@ convert(::Type{T}, x::EmptyNumber) where {T<:Number} = T(zero(T))
# This helps with defining `norm` of `EmptyStorage{EmptyNumber}`.
AbstractFloat(::EmptyNumber) = zero(AbstractFloat)

# Extra definitions fix ambiguity errors.
Base.promote_rule(::Type{EmptyNumber}, T::Type{<:Number}) = T
Base.promote_rule(T::Type{<:Number}, ::Type{EmptyNumber}) = T
Base.promote_rule(::Type{EmptyNumber}, ::Type{Bool}) = Bool
Base.promote_rule(::Type{Bool}, ::Type{EmptyNumber}) = Bool
Base.promote_rule(::Type{EmptyNumber}, T::Type{Complex{R}}) where {R<:Real} = T
Base.promote_rule(T::Type{Complex{R}}, ::Type{EmptyNumber}) where {R<:Real} = T

# Basic arithmetic
(::EmptyNumber + ::EmptyNumber) = EmptyNumber()
(::EmptyNumber - ::EmptyNumber) = EmptyNumber()
Expand Down
51 changes: 34 additions & 17 deletions NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -597,21 +597,31 @@ include("TestBlockSparseArraysUtils.jl")
c = @view b[4:8, 4:8]
@test c isa SubArray{<:Any,<:Any,<:BlockSparseArray}
@test size(c) == (5, 5)
@test block_nstored(c) == 2
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test block_nstored(c) == 2 broken = VERSION > v"1.11-"
@test blocksize(c) == (2, 2)
@test blocklengths.(axes(c)) == ([2, 3], [2, 3])
@test size(c[Block(1, 1)]) == (2, 2)
@test c[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]]
@test size(c[Block(2, 2)]) == (3, 3)
@test c[Block(2, 2)] == a[Block(1, 1)[1:3, 1:3]]
@test size(c[Block(2, 1)]) == (3, 2)
@test iszero(c[Block(2, 1)])
@test size(c[Block(1, 2)]) == (2, 3)
@test iszero(c[Block(1, 2)])
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test size(c[Block(1, 1)]) == (2, 2) broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test c[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test size(c[Block(2, 2)]) == (3, 3) broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test c[Block(2, 2)] == a[Block(1, 1)[1:3, 1:3]] broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test size(c[Block(2, 1)]) == (3, 2) broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test iszero(c[Block(2, 1)]) broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test size(c[Block(1, 2)]) == (2, 3) broken = VERSION v"1.11-"
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test iszero(c[Block(1, 2)]) broken = VERSION v"1.11-"

x = randn(elt, 3, 3)
c[Block(2, 2)] = x
@test c[Block(2, 2)] == x
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test c[Block(2, 2)] == x broken = VERSION v"1.11-"
@test a[Block(1, 1)[1:3, 1:3]] == x

a = BlockSparseArray{elt}([2, 3], [3, 4])
Expand All @@ -637,10 +647,13 @@ include("TestBlockSparseArraysUtils.jl")
@test copy(b) == a
@test blocksize(b) == (2, 2)
@test blocklengths.(axes(b)) == ([4, 4], [4, 4])
@test b[Block(1, 1)] == a[Block.(1:2), Block.(1:2)]
@test b[Block(2, 1)] == a[Block.(3:4), Block.(1:2)]
@test b[Block(1, 2)] == a[Block.(1:2), Block.(3:4)]
@test b[Block(2, 2)] == a[Block.(3:4), Block.(3:4)]
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
if VERSION < v"1.11-"
@test b[Block(1, 1)] == a[Block.(1:2), Block.(1:2)]
@test b[Block(2, 1)] == a[Block.(3:4), Block.(1:2)]
@test b[Block(1, 2)] == a[Block.(1:2), Block.(3:4)]
@test b[Block(2, 2)] == a[Block.(3:4), Block.(3:4)]
end
c = @view b[Block(2, 2)]
@test blocksize(c) == (1, 1)
@test c == a[Block.(3:4), Block.(3:4)]
Expand Down Expand Up @@ -669,13 +682,17 @@ include("TestBlockSparseArraysUtils.jl")
@test copy(b) == a[J, J]
@test blocksize(b) == (2, 2)
@test blocklengths.(axes(b)) == ([4, 4], [4, 4])
@test b[Block(1, 1)] == Array(a)[[7, 8, 5, 6], [7, 8, 5, 6]]
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test b[Block(1, 1)] == Array(a)[[7, 8, 5, 6], [7, 8, 5, 6]] broken =
VERSION v"1.11-"
c = @views b[Block(1, 1)][2:3, 2:3]
@test c == Array(a)[[8, 5], [8, 5]]
@test copy(c) == Array(a)[[8, 5], [8, 5]]
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test copy(c) == Array(a)[[8, 5], [8, 5]] broken = VERSION v"1.11-"
c = @view b[Block(1, 1)[2:3, 2:3]]
@test c == Array(a)[[8, 5], [8, 5]]
@test copy(c) == Array(a)[[8, 5], [8, 5]]
# TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539).
@test copy(c) == Array(a)[[8, 5], [8, 5]] broken = VERSION v"1.11-"
end

# TODO: Add more tests of this, it may
Expand Down
11 changes: 11 additions & 0 deletions NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,17 @@ end

labelled_getindex(a, index) = labelled(unlabel(a)[index], label(a))

# This is required in Julia 1.11 and above since
# the generic `axes(a::AbstractRange)` definition was removed
# and replace with a generic `axes(a)` definition that
# is written in terms of `Base.unchecked_oneto`, i.e.:
# ```julia
# map(Base.unchecked_oneto, size(A))
# ```
# which returns a `Base.OneTo` instead of a `LabelledUnitRange`.
Base.axes(a::LabelledUnitRange) = Base.oneto.(size(a))

# TODO: Delete this definition, this should output a `Base.OneTo`.
Base.OneTo(stop::LabelledInteger) = labelled(Base.OneTo(unlabel(stop)), label(stop))

# Fix ambiguity error with `AbstractRange` definition in `Base`.
Expand Down
4 changes: 2 additions & 2 deletions NDTensors/test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143"
TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9"
TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
Expand All @@ -30,5 +29,6 @@ Metal = "1.1.0"
[extras]
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1"
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9"
14 changes: 14 additions & 0 deletions NDTensors/test/test_dense.jl
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,20 @@ NDTensors.dim(i::MyInd) = i.dim
@test A[2, 2] == Aview[1, 1]
end

## Testing A .= α .* B .+ β .* A
C = copy(A)
@allowscalar fill!(B, zero(elt))
β = elt(2)
α = elt(1)
permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b)))
@allowscalar 2 .* C == A
randn!(B)
C = copy(A)
A = permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b)))
@allowscalar for i in 1:3, j in 1:4
@test A[i, j] == α * B[i, j] + β * C[i, j]
end

## add elt around 2.0 to preserve the eltype of A.
@test data(A * elt(2.0)) == data(elt(2.0) * A)

Expand Down
13 changes: 10 additions & 3 deletions NDTensors/test/test_emptynumber.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,21 @@
@eval module $(gensym())
using NDTensors
using LinearAlgebra: norm
using NDTensors: EmptyNumber
using Test: @testset, @test, @test_throws

const 𝟎 = NDTensors.EmptyNumber()
const 𝟎 = EmptyNumber()

@testset "NDTensors.EmptyNumber" begin
x = 2.3

@test complex(𝟎) == 𝟎
@test complex(NDTensors.EmptyNumber) == Complex{NDTensors.EmptyNumber}
@test complex(EmptyNumber) == Complex{EmptyNumber}

# Promotion
for T in (Bool, Float32, Float64, Complex{Float32}, Complex{Float64})
@test promote_type(EmptyNumber, T) === T
@test promote_type(T, EmptyNumber) === T
end

# Basic arithmetic
@test 𝟎 + 𝟎 == 𝟎
Expand Down
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "ITensors"
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
authors = ["Matthew Fishman <[email protected]>", "Miles Stoudenmire <[email protected]>"]
version = "0.6.19"
version = "0.6.22"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand Down Expand Up @@ -73,7 +73,7 @@ TupleTools = "1.2"
VectorInterface = "0.4"
Zeros = "0.3.0"
ZygoteRules = "0.2.2"
julia = "1.6"
julia = "1.10"

[extras]
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
module ITensorsPackageCompilerExt
include("compile.jl")
include("precompile_itensors.jl")
end
2 changes: 1 addition & 1 deletion jenkins/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ RUN apt-get update && \

ARG JULIA=1.6
RUN curl -s -L https://julialang-s3.julialang.org/bin/linux/x64/${JULIA}/julia-${JULIA}-latest-linux-x86_64.tar.gz | \
tar -C /usr/local -x -z --strip-components=1 -f -
tar -C /usr/local -x -z --strip-components=1 -f -
78 changes: 74 additions & 4 deletions jenkins/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ pipeline {
stages {
stage('GPU Testing') {
parallel {
stage('NDTensorsCUDAExt julia-1.10') {
stage('NDTensorsCUDAExt julia-lts') {
options {
timeout(time: 45, unit: 'MINUTES')
}
Expand All @@ -31,7 +31,31 @@ pipeline {
'''
}
}
stage('NDTensorscuTENSORExt julia-1.10') {
stage('NDTensorsCUDAExt julia-1') {
options {
timeout(time: 45, unit: 'MINUTES')
}
agent {
dockerfile {
label 'gpu&&v100'
filename 'Dockerfile'
dir 'jenkins'
additionalBuildArgs '--build-arg JULIA=1.11'
args '--gpus "device=1"'
}
}
environment {
HOME = pwd(tmp:true)
OMP_NUM_THREADS = 4
JULIA_NUM_THREADS = 4
}
steps {
sh '''
julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cuda"])'
'''
}
}
stage('NDTensorscuTENSORExt julia-lts') {
options {
timeout(time: 45, unit: 'MINUTES')
}
Expand All @@ -55,7 +79,52 @@ pipeline {
'''
}
}
stage('NDTensorsMetalExt Julia-1.10'){
stage('NDTensorscuTENSORExt julia-1') {
options {
timeout(time: 45, unit: 'MINUTES')
}
agent {
dockerfile {
label 'gpu&&v100'
filename 'Dockerfile'
dir 'jenkins'
additionalBuildArgs '--build-arg JULIA=1.11'
args '--gpus "device=1"'
}
}
environment {
HOME = pwd(tmp:true)
OMP_NUM_THREADS = 4
JULIA_NUM_THREADS = 4
}
steps {
sh '''
julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cutensor"])'
'''
}
}
stage('NDTensorsMetalExt Julia-lts'){
options {
timeout(time: 45, unit: 'MINUTES')
}
agent {
label 'm1'
}
environment{
PATH="${env.HOME}/.juliaup/bin:${env.PATH}"
PLATFORM = 'macos'
}
steps{
sh '''
juliaup add lts
juliaup default lts
'''
sh '''
julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])'
'''
}
}
stage('NDTensorsMetalExt Julia-1'){
options {
timeout(time: 45, unit: 'MINUTES')
}
Expand All @@ -69,6 +138,7 @@ pipeline {
steps{
sh '''
juliaup update
juliaup default release
'''
sh '''
julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])'
Expand All @@ -78,4 +148,4 @@ pipeline {
}
}
}
}
}
Loading

0 comments on commit 11f0bf2

Please sign in to comment.