Skip to content

Commit

Permalink
Remove Weierstrass example for now. Add Laplace solver example
Browse files Browse the repository at this point in the history
  • Loading branch information
JoeyT1994 committed Apr 4, 2024
1 parent 60526c7 commit ae165fb
Show file tree
Hide file tree
Showing 7 changed files with 112 additions and 170 deletions.
58 changes: 58 additions & 0 deletions examples/2d_laplace_solver.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
using Test
using TensorNetworkFunctionals

using Graphs: SimpleGraph, uniform_tree, binary_tree, random_regular_graph, is_tree
using NamedGraphs:
NamedGraph,
named_grid,
vertices,
named_comb_tree,
rename_vertices,
random_bfs_tree,
undirected_graph
using ITensors: ITensors, Index, siteinds, dim, tags, replaceprime!, MPO, MPS, inner
using ITensorNetworks:
ITensorNetwork,
dmrg,
TTN,
maxlinkdim
using Dictionaries: Dictionary
using SplitApplyCombine: group
using Random: seed!
using Distributions: Uniform

using UnicodePlots

#Solve the 2D Laplace equation on a random tree
seed!(1234)
L = 14
g = NamedGraph(SimpleGraph(uniform_tree(L)))
s = siteinds("S=1/2", g)

vertex_to_dimension_map = Dictionary(vertices(g), [(v[1] % 2) + 1 for v in vertices(g)])
vertex_to_bit_map = Dictionary(vertices(g), [ceil(Int64, v[1] * 0.5) for v in vertices(g)])
bit_map = BitMap(vertex_to_bit_map, vertex_to_dimension_map)

ψ_fxy = 0.1 * rand_itn(s, bit_map; link_space=2)
= laplacian_operator(s, bit_map; scale=false)
= truncate(∇; cutoff=1e-12)
@show maxlinkdim(∇)

dmrg_kwargs = (nsweeps=25, normalize=true, maxdim=20, cutoff=1e-12, outputlevel=1, nsites=2)
ϕ_fxy = dmrg(∇, TTN(itensornetwork(ψ_fxy)); dmrg_kwargs...)
ϕ_fxy = ITensorNetworkFunction(ITensorNetwork(ϕ_fxy), bit_map)

final_energy = inner(TTN(itensornetwork(ϕ_fxy))', ∇, TTN(itensornetwork(ϕ_fxy)))
#Smallest eigenvalue in this case should be -8
@show final_energy

n_grid = 100
x_vals, y_vals = grid_points(bit_map, n_grid, 1), grid_points(bit_map, n_grid, 2)
vals = zeros((length(x_vals), length(y_vals)))
for (i, x) in enumerate(x_vals)
for (j, y) in enumerate(y_vals)
vals[i, j] = real(calculate_fxyz(ϕ_fxy, [x, y]))
end
end

show(heatmap(vals))
150 changes: 0 additions & 150 deletions examples/weierstrass.jl

This file was deleted.

14 changes: 9 additions & 5 deletions src/TensorNetworkFunctionals.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,16 @@ include("itensornetworkfunction.jl")
include("itensornetworks_elementary_functions.jl")
include("itensornetworks_elementary_operators.jl")

export ITensorNetworkFunction
export ITensorNetworkFunction, itensornetwork
export BitMap,
default_dimension_map,
vertex,
calculate_xyz,
calculate_x,
calculate_bit_values,
dimension,
base
base,
grid_points
export const_itensornetwork,
exp_itensornetwork,
cosh_itensornetwork,
Expand All @@ -24,10 +25,13 @@ export const_itensornetwork,
sin_itensornetwork,
get_edge_toward_root,
polynomial_itensornetwork,
random_itensornetworkfunction,
laplacian_operator,
derivative_operator
export const_itn, poly_itn, cosh_itn, sinh_itn, tanh_itn, exp_itn, sin_itn, cos_itn
derivative_operator,
identity_operator
export const_itn,
poly_itn, cosh_itn, sinh_itn, tanh_itn, exp_itn, sin_itn, cos_itn, rand_itn
export calculate_fx, calculate_fxyz
export operate, apply_gx_operator, multiply
export operate, operator, multiply

end
8 changes: 8 additions & 0 deletions src/bitmaps.jl
Original file line number Diff line number Diff line change
Expand Up @@ -125,3 +125,11 @@ end
function calculate_bit_values(bm::BitMap, x::Float64; kwargs...)
return calculate_bit_values(bm, [x], [1]; kwargs...)
end

function grid_points(bm::BitMap, N::Int64, dimension::Int64)
vals = Vector{Float64}
L = length(vertices(bm, dimension))
a = round(base(bm)^L / N)
grid_points = [i * (a / base(bm)^L) for i in 0:(N + 1)]
return filter(x -> x <= 1, grid_points)
end
5 changes: 5 additions & 0 deletions src/itensornetworks_elementary_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,10 @@ function polynomial_itensornetwork(
return ITensorNetworkFunction(ψ, bit_map)
end

function random_itensornetwork(s::IndsNetwork, bit_map; kwargs...)
return ITensorNetworkFunction(randomITensorNetwork(s; kwargs...), bit_map)
end

const const_itn = const_itensornetwork
const poly_itn = polynomial_itensornetwork
const cosh_itn = cosh_itensornetwork
Expand All @@ -256,3 +260,4 @@ const tanh_itn = tanh_itensornetwork
const exp_itn = exp_itensornetwork
const sin_itn = sin_itensornetwork
const cos_itn = cos_itensornetwork
const rand_itn = random_itensornetwork
35 changes: 26 additions & 9 deletions src/itensornetworks_elementary_operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ using ITensors:
contract
using ITensorNetworks: IndsNetwork, ITensorNetwork, TTN, TreeTensorNetwork, combine_linkinds

function plus_shift_ttn(s::IndsNetwork, bit_map; dimension=default_dimension())
function plus_shift_ttn(
s::IndsNetwork, bit_map; dimension=default_dimension(), boundary_value=[0.0]
)
@assert is_tree(s)
ttn_op = OpSum()
dim_vertices = vertices(bit_map, dimension)
Expand Down Expand Up @@ -63,6 +65,7 @@ function stencil(
shifts::Vector{Float64},
delta_power::Int64;
dimension=default_dimension(),
scale=true,
truncate_kwargs...,
)
@assert length(shifts) == 3
Expand All @@ -73,24 +76,38 @@ function stencil(
stencil_op = plus_shift + minus_shift + no_shift
stencil_op = truncate(stencil_op; truncate_kwargs...)

for v in vertices(bit_map, dimension)
stencil_op[v] = (base(bit_map)^delta_power) * stencil_op[v]
if scale
for v in vertices(bit_map, dimension)
stencil_op[v] = (base(bit_map)^delta_power) * stencil_op[v]
end
end

return truncate(stencil_op; truncate_kwargs...)
return stencil_op
end

function laplacian_operator(s::IndsNetwork, bit_map; kwargs...)
return stencil(s, bit_map, [1.0, -2.0, 1.0], 2; kwargs...)
function laplacian_operator(
s::IndsNetwork, bit_map; dimensions=[i for i in 1:dimension(bit_map)], kwargs...
)
remaining_dims = copy(dimensions)
= stencil(s, bit_map, [1.0, -2.0, 1.0], 2; dimension=first(remaining_dims), kwargs...)
popfirst!(remaining_dims)
for rd in remaining_dims
+= stencil(s, bit_map, [1.0, -2.0, 1.0], 2; dimension=rd, kwargs...)
end
return
end

function derivative_operator(s::IndsNetwork, bit_map; kwargs...)
return 0.5 * stencil(s, bit_map, [1.0, 0.0, -1.0], 1; kwargs...)
end

function apply_gx_operator(gx::ITensorNetworkFunction)
gx = copy(gx)
operator = itensornetwork(gx)
function identity_operator(s::IndsNetwork, bit_map; kwargs...)
return stencil(s, bit_map, [0.0, 1.0, 0.0], 0; kwargs...)
end

function operator(fx::ITensorNetworkFunction)
fx = copy(fx)
operator = itensornetwork(fx)
s = siteinds(operator)
for v in vertices(operator)
sind = s[v]
Expand Down
12 changes: 6 additions & 6 deletions src/itensornetworksutils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@ using ITensors: Index, dim, inds
using ITensorNetworks: randomITensorNetwork, IndsNetwork

"""Build the order L tensor corresponding to fx(x): x ∈ [0,1]."""
function build_full_rank_tensor(L::Int64, fx::Function)
inds = [Index(2, "$i") for i in 1:L]
dims = Tuple([2 for i in 1:L])
function build_full_rank_tensor(L::Int64, fx::Function; base::Int64=2)
inds = [Index(base, "$i") for i in 1:L]
dims = Tuple([base for i in 1:L])
array = zeros(dims)
for i in 0:(2^(L) - 1)
xis = digits(i; base=2, pad=L)
x = sum([xis[i] / (2^i) for i in 1:L])
for i in 0:(base^(L) - 1)
xis = digits(i; base, pad=L)
x = sum([xis[i] / (base^i) for i in 1:L])
array[Tuple(xis + ones(Int64, (L)))...] = fx(x)
end

Expand Down

0 comments on commit ae165fb

Please sign in to comment.