diff --git a/.github/workflows/main_test_itensors_base_macos_windows.yml b/.github/workflows/main_test_itensors_base_macos_windows.yml index a92e581dd6..f9f39d16fd 100644 --- a/.github/workflows/main_test_itensors_base_macos_windows.yml +++ b/.github/workflows/main_test_itensors_base_macos_windows.yml @@ -14,7 +14,7 @@ jobs: strategy: matrix: version: - - '1.6' + - 'lts' - '1' os: # - windows-latest # windows tests are failing for an unknow reason, disable for now diff --git a/.github/workflows/test_itensormps_ubuntu.yml b/.github/workflows/test_itensormps_ubuntu.yml index fb81c37280..cf80a2a8b3 100644 --- a/.github/workflows/test_itensormps_ubuntu.yml +++ b/.github/workflows/test_itensormps_ubuntu.yml @@ -16,7 +16,7 @@ jobs: strategy: matrix: version: - - '1.6' + - 'lts' - '1' os: - ubuntu-latest diff --git a/.github/workflows/test_itensors_base_ubuntu.yml b/.github/workflows/test_itensors_base_ubuntu.yml index d185c5de56..7004dae0f7 100644 --- a/.github/workflows/test_itensors_base_ubuntu.yml +++ b/.github/workflows/test_itensors_base_ubuntu.yml @@ -16,7 +16,7 @@ jobs: strategy: matrix: version: - - '1.6' + - 'lts' - '1' os: - ubuntu-latest diff --git a/.github/workflows/test_ndtensors.yml b/.github/workflows/test_ndtensors.yml index 67a46f39b0..1f3f4c8be1 100644 --- a/.github/workflows/test_ndtensors.yml +++ b/.github/workflows/test_ndtensors.yml @@ -16,7 +16,7 @@ jobs: strategy: matrix: version: - - '1.6' + - 'lts' - '1' os: - ubuntu-latest diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index 65771b0c5a..cdfd6102bb 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.3.43" +version = "0.3.46" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" @@ -68,7 +68,7 @@ EllipsisNotation = "1.8" FillArrays = "1" Folds = "0.2.8" Functors = "0.2, 0.3, 0.4" -GPUArraysCore = "0.1" +GPUArraysCore = "0.1, 0.2" HDF5 = "0.14, 0.15, 0.16, 0.17" HalfIntegers = "1" InlineStrings = "1" @@ -91,7 +91,7 @@ TimerOutputs = "0.5.5" TupleTools = "1.2.0" VectorInterface = "0.4.2" cuTENSOR = "2" -julia = "1.6" +julia = "1.10" [extras] AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" diff --git a/NDTensors/src/empty/EmptyTensor.jl b/NDTensors/src/empty/EmptyTensor.jl index 3fcbf4ed63..f5e04e0652 100644 --- a/NDTensors/src/empty/EmptyTensor.jl +++ b/NDTensors/src/empty/EmptyTensor.jl @@ -152,8 +152,6 @@ end setindex!!(T::EmptyTensor, x, I...) = setindex(T, x, I...) -promote_rule(::Type{EmptyNumber}, ::Type{T}) where {T<:Number} = T - function promote_rule( ::Type{T1}, ::Type{T2} ) where {T1<:EmptyStorage{EmptyNumber},T2<:TensorStorage} diff --git a/NDTensors/src/emptynumber.jl b/NDTensors/src/emptynumber.jl index d84abb52ca..1d9799b740 100644 --- a/NDTensors/src/emptynumber.jl +++ b/NDTensors/src/emptynumber.jl @@ -18,6 +18,14 @@ convert(::Type{T}, x::EmptyNumber) where {T<:Number} = T(zero(T)) # This helps with defining `norm` of `EmptyStorage{EmptyNumber}`. AbstractFloat(::EmptyNumber) = zero(AbstractFloat) +# Extra definitions fix ambiguity errors. +Base.promote_rule(::Type{EmptyNumber}, T::Type{<:Number}) = T +Base.promote_rule(T::Type{<:Number}, ::Type{EmptyNumber}) = T +Base.promote_rule(::Type{EmptyNumber}, ::Type{Bool}) = Bool +Base.promote_rule(::Type{Bool}, ::Type{EmptyNumber}) = Bool +Base.promote_rule(::Type{EmptyNumber}, T::Type{Complex{R}}) where {R<:Real} = T +Base.promote_rule(T::Type{Complex{R}}, ::Type{EmptyNumber}) where {R<:Real} = T + # Basic arithmetic (::EmptyNumber + ::EmptyNumber) = EmptyNumber() (::EmptyNumber - ::EmptyNumber) = EmptyNumber() diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 10f8d6e35d..a7797766d4 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -597,21 +597,31 @@ include("TestBlockSparseArraysUtils.jl") c = @view b[4:8, 4:8] @test c isa SubArray{<:Any,<:Any,<:BlockSparseArray} @test size(c) == (5, 5) - @test block_nstored(c) == 2 + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test block_nstored(c) == 2 broken = VERSION > v"1.11-" @test blocksize(c) == (2, 2) @test blocklengths.(axes(c)) == ([2, 3], [2, 3]) - @test size(c[Block(1, 1)]) == (2, 2) - @test c[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] - @test size(c[Block(2, 2)]) == (3, 3) - @test c[Block(2, 2)] == a[Block(1, 1)[1:3, 1:3]] - @test size(c[Block(2, 1)]) == (3, 2) - @test iszero(c[Block(2, 1)]) - @test size(c[Block(1, 2)]) == (2, 3) - @test iszero(c[Block(1, 2)]) + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test size(c[Block(1, 1)]) == (2, 2) broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test c[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test size(c[Block(2, 2)]) == (3, 3) broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test c[Block(2, 2)] == a[Block(1, 1)[1:3, 1:3]] broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test size(c[Block(2, 1)]) == (3, 2) broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test iszero(c[Block(2, 1)]) broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test size(c[Block(1, 2)]) == (2, 3) broken = VERSION ≥ v"1.11-" + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test iszero(c[Block(1, 2)]) broken = VERSION ≥ v"1.11-" x = randn(elt, 3, 3) c[Block(2, 2)] = x - @test c[Block(2, 2)] == x + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test c[Block(2, 2)] == x broken = VERSION ≥ v"1.11-" @test a[Block(1, 1)[1:3, 1:3]] == x a = BlockSparseArray{elt}([2, 3], [3, 4]) @@ -637,10 +647,13 @@ include("TestBlockSparseArraysUtils.jl") @test copy(b) == a @test blocksize(b) == (2, 2) @test blocklengths.(axes(b)) == ([4, 4], [4, 4]) - @test b[Block(1, 1)] == a[Block.(1:2), Block.(1:2)] - @test b[Block(2, 1)] == a[Block.(3:4), Block.(1:2)] - @test b[Block(1, 2)] == a[Block.(1:2), Block.(3:4)] - @test b[Block(2, 2)] == a[Block.(3:4), Block.(3:4)] + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + if VERSION < v"1.11-" + @test b[Block(1, 1)] == a[Block.(1:2), Block.(1:2)] + @test b[Block(2, 1)] == a[Block.(3:4), Block.(1:2)] + @test b[Block(1, 2)] == a[Block.(1:2), Block.(3:4)] + @test b[Block(2, 2)] == a[Block.(3:4), Block.(3:4)] + end c = @view b[Block(2, 2)] @test blocksize(c) == (1, 1) @test c == a[Block.(3:4), Block.(3:4)] @@ -669,13 +682,17 @@ include("TestBlockSparseArraysUtils.jl") @test copy(b) == a[J, J] @test blocksize(b) == (2, 2) @test blocklengths.(axes(b)) == ([4, 4], [4, 4]) - @test b[Block(1, 1)] == Array(a)[[7, 8, 5, 6], [7, 8, 5, 6]] + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test b[Block(1, 1)] == Array(a)[[7, 8, 5, 6], [7, 8, 5, 6]] broken = + VERSION ≥ v"1.11-" c = @views b[Block(1, 1)][2:3, 2:3] @test c == Array(a)[[8, 5], [8, 5]] - @test copy(c) == Array(a)[[8, 5], [8, 5]] + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test copy(c) == Array(a)[[8, 5], [8, 5]] broken = VERSION ≥ v"1.11-" c = @view b[Block(1, 1)[2:3, 2:3]] @test c == Array(a)[[8, 5], [8, 5]] - @test copy(c) == Array(a)[[8, 5], [8, 5]] + # TODO: Fix in Julia 1.11 (https://github.com/ITensor/ITensors.jl/pull/1539). + @test copy(c) == Array(a)[[8, 5], [8, 5]] broken = VERSION ≥ v"1.11-" end # TODO: Add more tests of this, it may diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl index 2e4379daba..a82c666987 100644 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl +++ b/NDTensors/src/lib/LabelledNumbers/src/labelledunitrange.jl @@ -37,6 +37,17 @@ end labelled_getindex(a, index) = labelled(unlabel(a)[index], label(a)) +# This is required in Julia 1.11 and above since +# the generic `axes(a::AbstractRange)` definition was removed +# and replace with a generic `axes(a)` definition that +# is written in terms of `Base.unchecked_oneto`, i.e.: +# ```julia +# map(Base.unchecked_oneto, size(A)) +# ``` +# which returns a `Base.OneTo` instead of a `LabelledUnitRange`. +Base.axes(a::LabelledUnitRange) = Base.oneto.(size(a)) + +# TODO: Delete this definition, this should output a `Base.OneTo`. Base.OneTo(stop::LabelledInteger) = labelled(Base.OneTo(unlabel(stop)), label(stop)) # Fix ambiguity error with `AbstractRange` definition in `Base`. diff --git a/NDTensors/test/Project.toml b/NDTensors/test/Project.toml index 5244d98895..15787f7fbb 100644 --- a/NDTensors/test/Project.toml +++ b/NDTensors/test/Project.toml @@ -18,7 +18,6 @@ Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143" -TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" @@ -30,5 +29,6 @@ Metal = "1.1.0" [extras] AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e" CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba" -Metal = "dde4c033-4e86-420c-a63e-0dd931031962" cuTENSOR = "011b41b2-24ef-40a8-b3eb-fa098493e9e1" +Metal = "dde4c033-4e86-420c-a63e-0dd931031962" +TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9" diff --git a/NDTensors/test/test_dense.jl b/NDTensors/test/test_dense.jl index 94c52f4132..c2b327811a 100644 --- a/NDTensors/test/test_dense.jl +++ b/NDTensors/test/test_dense.jl @@ -83,6 +83,20 @@ NDTensors.dim(i::MyInd) = i.dim @test A[2, 2] == Aview[1, 1] end + ## Testing A .= α .* B .+ β .* A + C = copy(A) + @allowscalar fill!(B, zero(elt)) + β = elt(2) + α = elt(1) + permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) + @allowscalar 2 .* C == A + randn!(B) + C = copy(A) + A = permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) + @allowscalar for i in 1:3, j in 1:4 + @test A[i, j] == α * B[i, j] + β * C[i, j] + end + ## add elt around 2.0 to preserve the eltype of A. @test data(A * elt(2.0)) == data(elt(2.0) * A) diff --git a/NDTensors/test/test_emptynumber.jl b/NDTensors/test/test_emptynumber.jl index dc8357a115..73d82117f5 100644 --- a/NDTensors/test/test_emptynumber.jl +++ b/NDTensors/test/test_emptynumber.jl @@ -1,14 +1,21 @@ @eval module $(gensym()) -using NDTensors +using LinearAlgebra: norm +using NDTensors: EmptyNumber using Test: @testset, @test, @test_throws -const 𝟎 = NDTensors.EmptyNumber() +const 𝟎 = EmptyNumber() @testset "NDTensors.EmptyNumber" begin x = 2.3 @test complex(𝟎) == 𝟎 - @test complex(NDTensors.EmptyNumber) == Complex{NDTensors.EmptyNumber} + @test complex(EmptyNumber) == Complex{EmptyNumber} + + # Promotion + for T in (Bool, Float32, Float64, Complex{Float32}, Complex{Float64}) + @test promote_type(EmptyNumber, T) === T + @test promote_type(T, EmptyNumber) === T + end # Basic arithmetic @test 𝟎 + 𝟎 == 𝟎 diff --git a/Project.toml b/Project.toml index 0e9b91a39e..f7f9e50b0a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.6.19" +version = "0.6.22" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" @@ -73,7 +73,7 @@ TupleTools = "1.2" VectorInterface = "0.4" Zeros = "0.3.0" ZygoteRules = "0.2.2" -julia = "1.6" +julia = "1.10" [extras] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" diff --git a/ext/ITensorsPackageCompilerExt/ITensorsPackageCompilerExt.jl b/ext/ITensorsPackageCompilerExt/ITensorsPackageCompilerExt.jl index 78b7166e74..3bb7b57ac1 100644 --- a/ext/ITensorsPackageCompilerExt/ITensorsPackageCompilerExt.jl +++ b/ext/ITensorsPackageCompilerExt/ITensorsPackageCompilerExt.jl @@ -1,4 +1,3 @@ module ITensorsPackageCompilerExt include("compile.jl") -include("precompile_itensors.jl") end diff --git a/jenkins/Dockerfile b/jenkins/Dockerfile index c677086f2f..e2367a8c01 100644 --- a/jenkins/Dockerfile +++ b/jenkins/Dockerfile @@ -13,4 +13,4 @@ RUN apt-get update && \ ARG JULIA=1.6 RUN curl -s -L https://julialang-s3.julialang.org/bin/linux/x64/${JULIA}/julia-${JULIA}-latest-linux-x86_64.tar.gz | \ - tar -C /usr/local -x -z --strip-components=1 -f - + tar -C /usr/local -x -z --strip-components=1 -f - \ No newline at end of file diff --git a/jenkins/Jenkinsfile b/jenkins/Jenkinsfile index 5ac7f6a62b..4fd308d59a 100644 --- a/jenkins/Jenkinsfile +++ b/jenkins/Jenkinsfile @@ -7,7 +7,7 @@ pipeline { stages { stage('GPU Testing') { parallel { - stage('NDTensorsCUDAExt julia-1.10') { + stage('NDTensorsCUDAExt julia-lts') { options { timeout(time: 45, unit: 'MINUTES') } @@ -31,7 +31,31 @@ pipeline { ''' } } - stage('NDTensorscuTENSORExt julia-1.10') { + stage('NDTensorsCUDAExt julia-1') { + options { + timeout(time: 45, unit: 'MINUTES') + } + agent { + dockerfile { + label 'gpu&&v100' + filename 'Dockerfile' + dir 'jenkins' + additionalBuildArgs '--build-arg JULIA=1.11' + args '--gpus "device=1"' + } + } + environment { + HOME = pwd(tmp:true) + OMP_NUM_THREADS = 4 + JULIA_NUM_THREADS = 4 + } + steps { + sh ''' + julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cuda"])' + ''' + } + } + stage('NDTensorscuTENSORExt julia-lts') { options { timeout(time: 45, unit: 'MINUTES') } @@ -55,7 +79,52 @@ pipeline { ''' } } - stage('NDTensorsMetalExt Julia-1.10'){ + stage('NDTensorscuTENSORExt julia-1') { + options { + timeout(time: 45, unit: 'MINUTES') + } + agent { + dockerfile { + label 'gpu&&v100' + filename 'Dockerfile' + dir 'jenkins' + additionalBuildArgs '--build-arg JULIA=1.11' + args '--gpus "device=1"' + } + } + environment { + HOME = pwd(tmp:true) + OMP_NUM_THREADS = 4 + JULIA_NUM_THREADS = 4 + } + steps { + sh ''' + julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.Registry.update(); Pkg.update(); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["cutensor"])' + ''' + } + } + stage('NDTensorsMetalExt Julia-lts'){ + options { + timeout(time: 45, unit: 'MINUTES') + } + agent { + label 'm1' + } + environment{ + PATH="${env.HOME}/.juliaup/bin:${env.PATH}" + PLATFORM = 'macos' + } + steps{ + sh ''' + juliaup add lts + juliaup default lts + ''' + sh ''' + julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])' + ''' + } + } + stage('NDTensorsMetalExt Julia-1'){ options { timeout(time: 45, unit: 'MINUTES') } @@ -69,6 +138,7 @@ pipeline { steps{ sh ''' juliaup update + juliaup default release ''' sh ''' julia -e 'using Pkg; Pkg.activate(temp=true); Pkg.develop(path="./NDTensors"); Pkg.develop(path="."); Pkg.test("NDTensors"; test_args=["metal"])' @@ -78,4 +148,4 @@ pipeline { } } } -} +} \ No newline at end of file diff --git a/src/broadcast.jl b/src/broadcast.jl index 3e91d64714..4f0848d580 100644 --- a/src/broadcast.jl +++ b/src/broadcast.jl @@ -395,6 +395,13 @@ end # C .= β .* C .+ α .* A .* B # +struct axpby{Alpha,Beta} <: Function + alpha::Alpha + beta::Beta +end + +(f::axpby)(y, x) = x * f.alpha + y * f.beta + ## TODO this code doesn't actually get called function Base.copyto!( T::ITensor, @@ -414,7 +421,9 @@ function Base.copyto!( A, C = C, A end if !isnothing(A) && !isnothing(C) && !isnothing(α) && !isnothing(β) - map!((r, t) -> β * r + α * t, T, T, A) + # The following fails to compile on some GPU backends. + # map!((r, t) -> β * r + α * t, T, T, A) + map!(axpby(α, β), T, T, A) else bc_bc_α = find_type(Broadcasted, bc_α.args) if isnothing(α) diff --git a/src/lib/ITensorMPS/test/Project.toml b/src/lib/ITensorMPS/test/Project.toml index 127ce9e065..7345ba4285 100644 --- a/src/lib/ITensorMPS/test/Project.toml +++ b/src/lib/ITensorMPS/test/Project.toml @@ -6,5 +6,6 @@ ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" OptimKit = "77e91f04-9b3b-57a6-a776-40b61faaebe0" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/src/lib/ITensorMPS/test/base/Project.toml b/src/lib/ITensorMPS/test/base/Project.toml index 895d21daab..0522110b73 100644 --- a/src/lib/ITensorMPS/test/base/Project.toml +++ b/src/lib/ITensorMPS/test/base/Project.toml @@ -4,4 +4,5 @@ HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" diff --git a/src/lib/ITensorMPS/test/base/test_mpo.jl b/src/lib/ITensorMPS/test/base/test_mpo.jl index 32746a752b..929eaa2d7b 100644 --- a/src/lib/ITensorMPS/test/base/test_mpo.jl +++ b/src/lib/ITensorMPS/test/base/test_mpo.jl @@ -2,6 +2,7 @@ using Combinatorics using ITensors using NDTensors: scalartype +using StableRNGs: StableRNG using Test include(joinpath(@__DIR__, "utils", "util.jl")) @@ -812,11 +813,11 @@ end N = 6 sites = [Index(2, "Site,n=$n") for n in 1:N] seed = 623 - mt = MersenneTwister(seed) - K = random_mps(mt, sites) + rng = StableRNG(seed) + K = random_mps(rng, sites) L = MPO(K) - result = sample(mt, L) - @test result ≈ [1, 2, 1, 1, 2, 2] + result = sample(rng, L) + @test result ≈ [1, 1, 2, 1, 1, 1] end @testset "MPO+MPO sum (directsum)" begin diff --git a/src/packagecompile/compile.jl b/src/packagecompile/compile.jl index 0034ab0971..093e73dac3 100644 --- a/src/packagecompile/compile.jl +++ b/src/packagecompile/compile.jl @@ -40,12 +40,6 @@ function compile(; backend=Algorithm"PackageCompiler"(), kwargs...) return compile(backend; kwargs...) end -function compile(::Algorithm; kwargs...) - return error( - "As of ITensors v0.5, you must install `PackageCompiler.jl` (`using Pkg: Pkg; Pkg.add(\"PackageCompiler\")`) and execute `using PackageCompiler` to use `ITensors.compile`.", - ) -end - @doc """ ITensors.compile(; dir = "$(default_compile_dir())", filename = "$(default_compile_filename())") diff --git a/test/Project.toml b/test/Project.toml index 34fe930acb..509e3a49f0 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -12,9 +12,11 @@ KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" OptimKit = "77e91f04-9b3b-57a6-a776-40b61faaebe0" +PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" diff --git a/test/ext/ITensorsPackageCompilerExt/Project.toml b/test/ext/ITensorsPackageCompilerExt/Project.toml new file mode 100644 index 0000000000..9b60c20322 --- /dev/null +++ b/test/ext/ITensorsPackageCompilerExt/Project.toml @@ -0,0 +1,4 @@ +[deps] +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d" +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/ext/ITensorsPackageCompilerExt/runtests.jl b/test/ext/ITensorsPackageCompilerExt/runtests.jl new file mode 100644 index 0000000000..08cae7400b --- /dev/null +++ b/test/ext/ITensorsPackageCompilerExt/runtests.jl @@ -0,0 +1,10 @@ +@eval module $(gensym()) +using ITensors: ITensors +using PackageCompiler: PackageCompiler +using Test: @testset, @test +@testset "ITensorsPackageCompilerExt" begin + # Testing `ITensors.compile` would take too long so we just check + # that `ITensorsPackageCompilerExt` overloads `ITensors.compile`. + @test hasmethod(ITensors.compile, Tuple{ITensors.Algorithm"PackageCompiler"}) +end +end diff --git a/test/runtests.jl b/test/runtests.jl index 60782dca54..88af73a005 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -20,6 +20,7 @@ ITensors.disable_threaded_blocksparse() "threading", "lib/ContractionSequenceOptimization", "ext/ITensorsChainRulesCoreExt", + "ext/ITensorsPackageCompilerExt", "ext/ITensorsVectorInterfaceExt", ] @time for dir in dirs