From f99e8d68a55254207d024e0372a005e67df6af87 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 05:11:57 +0000 Subject: [PATCH 1/5] docs: update makedocs syntax to Documenter@1 --- docs/make.jl | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/docs/make.jl b/docs/make.jl index 5c7f3dc41..968b2b651 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -10,14 +10,8 @@ using Plots include("pages.jl") makedocs(sitename = "Surrogates.jl", - strict = [ - :doctest, - :linkcheck, - :parse_error, - :example_block, - # Other available options are - # :autodocs_block, :cross_references, :docs_block, :eval_block, :example_block, :footnote, :meta_block, :missing_docs, :setup_block - ], + linkcheck = true, + warnonly = [:missing_docs], format = Documenter.HTML(analytics = "UA-90474609-3", assets = ["assets/favicon.ico"], canonical = "https://docs.sciml.ai/Surrogates/stable/"), From 9224fd87d4530159dd6570e8ab9e5b54eced0338 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 05:12:40 +0000 Subject: [PATCH 2/5] chore: format the repo --- docs/make.jl | 12 +- docs/pages.jl | 76 ++++----- lib/SurrogatesAbstractGPs/test/runtests.jl | 4 +- lib/SurrogatesFlux/src/SurrogatesFlux.jl | 6 +- lib/SurrogatesFlux/test/runtests.jl | 16 +- lib/SurrogatesMOE/X-QMC2.csv | 151 ++++++++++++++++++ lib/SurrogatesMOE/X-QMC3.csv | 151 ++++++++++++++++++ lib/SurrogatesMOE/src/SurrogatesMOE.jl | 48 +++--- lib/SurrogatesMOE/test/runtests.jl | 16 +- .../src/SurrogatesPolyChaos.jl | 14 +- lib/SurrogatesPolyChaos/test/runtests.jl | 2 +- .../src/SurrogatesRandomForest.jl | 3 +- lib/SurrogatesSVM/src/SurrogatesSVM.jl | 2 +- src/Earth.jl | 22 +-- src/GEKPLS.jl | 54 +++---- src/Kriging.jl | 30 ++-- src/Lobachevsky.jl | 6 +- src/Optimization.jl | 119 +++++++------- src/PolynomialChaos.jl | 14 +- src/Radials.jl | 24 +-- src/Sampling.jl | 38 +++-- src/Surrogates.jl | 18 ++- src/VariableFidelity.jl | 62 +++---- src/VirtualStrategy.jl | 17 +- test/GEKPLS.jl | 10 +- test/SectionSampleTests.jl | 14 +- test/VariableFidelity.jl | 16 +- test/optimization.jl | 28 ++-- test/parallel.jl | 46 ++++-- test/runtests.jl | 60 +++++-- 30 files changed, 730 insertions(+), 349 deletions(-) create mode 100644 lib/SurrogatesMOE/X-QMC2.csv create mode 100644 lib/SurrogatesMOE/X-QMC3.csv diff --git a/docs/make.jl b/docs/make.jl index 968b2b651..2025f4ba3 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -10,11 +10,11 @@ using Plots include("pages.jl") makedocs(sitename = "Surrogates.jl", - linkcheck = true, - warnonly = [:missing_docs], - format = Documenter.HTML(analytics = "UA-90474609-3", - assets = ["assets/favicon.ico"], - canonical = "https://docs.sciml.ai/Surrogates/stable/"), - pages = pages) + linkcheck = true, + warnonly = [:missing_docs], + format = Documenter.HTML(analytics = "UA-90474609-3", + assets = ["assets/favicon.ico"], + canonical = "https://docs.sciml.ai/Surrogates/stable/"), + pages = pages) deploydocs(repo = "github.com/SciML/Surrogates.jl.git") diff --git a/docs/pages.jl b/docs/pages.jl index ce76c3fdb..1574aff5a 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -1,39 +1,39 @@ pages = ["index.md" - "Tutorials" => [ - "Basics" => "tutorials.md", - "Radials" => "radials.md", - "Kriging" => "kriging.md", - "Gaussian Process" => "abstractgps.md", - "Lobachevsky" => "lobachevsky.md", - "Linear" => "LinearSurrogate.md", - "InverseDistance" => "InverseDistance.md", - "RandomForest" => "randomforest.md", - "SecondOrderPolynomial" => "secondorderpoly.md", - "NeuralSurrogate" => "neural.md", - "Wendland" => "wendland.md", - "Polynomial Chaos" => "polychaos.md", - "Variable Fidelity" => "variablefidelity.md", - "Gradient Enhanced Kriging" => "gek.md", - "GEKPLS" => "gekpls.md", - "MOE" => "moe.md", - "Parallel Optimization" => "parallel.md" - ] - "User guide" => [ - "Samples" => "samples.md", - "Surrogates" => "surrogate.md", - "Optimization" => "optimizations.md", - ] - "Benchmarks" => [ - "Sphere function" => "sphere_function.md", - "Lp norm" => "lp.md", - "Rosenbrock" => "rosenbrock.md", - "Tensor product" => "tensor_prod.md", - "Cantilever beam" => "cantilever.md", - "Water Flow function" => "water_flow.md", - "Welded beam function" => "welded_beam.md", - "Branin function" => "BraninFunction.md", - "Ackley function" => "ackley.md", - "Gramacy & Lee Function" => "gramacylee.md", - "Salustowicz Benchmark" => "Salustowicz.md", - "Multi objective optimization" => "multi_objective_opt.md", - ]] + "Tutorials" => [ + "Basics" => "tutorials.md", + "Radials" => "radials.md", + "Kriging" => "kriging.md", + "Gaussian Process" => "abstractgps.md", + "Lobachevsky" => "lobachevsky.md", + "Linear" => "LinearSurrogate.md", + "InverseDistance" => "InverseDistance.md", + "RandomForest" => "randomforest.md", + "SecondOrderPolynomial" => "secondorderpoly.md", + "NeuralSurrogate" => "neural.md", + "Wendland" => "wendland.md", + "Polynomial Chaos" => "polychaos.md", + "Variable Fidelity" => "variablefidelity.md", + "Gradient Enhanced Kriging" => "gek.md", + "GEKPLS" => "gekpls.md", + "MOE" => "moe.md", + "Parallel Optimization" => "parallel.md", +] + "User guide" => [ + "Samples" => "samples.md", + "Surrogates" => "surrogate.md", + "Optimization" => "optimizations.md", +] + "Benchmarks" => [ + "Sphere function" => "sphere_function.md", + "Lp norm" => "lp.md", + "Rosenbrock" => "rosenbrock.md", + "Tensor product" => "tensor_prod.md", + "Cantilever beam" => "cantilever.md", + "Water Flow function" => "water_flow.md", + "Welded beam function" => "welded_beam.md", + "Branin function" => "BraninFunction.md", + "Ackley function" => "ackley.md", + "Gramacy & Lee Function" => "gramacylee.md", + "Salustowicz Benchmark" => "Salustowicz.md", + "Multi objective optimization" => "multi_objective_opt.md", +]] diff --git a/lib/SurrogatesAbstractGPs/test/runtests.jl b/lib/SurrogatesAbstractGPs/test/runtests.jl index 33f0d5aa9..fb553f0e0 100644 --- a/lib/SurrogatesAbstractGPs/test/runtests.jl +++ b/lib/SurrogatesAbstractGPs/test/runtests.jl @@ -27,7 +27,7 @@ using Surrogates: sample, SobolSample x_points = sample(5, lb, ub, SobolSample()) y_points = f.(x_points) agp1D = AbstractGPSurrogate([x_points[1]], [y_points[1]], - gp = GP(SqExponentialKernel()), Σy = 0.05) + gp = GP(SqExponentialKernel()), Σy = 0.05) x_new = 2.5 y_actual = f.(x_new) for i in 2:length(x_points) @@ -88,7 +88,7 @@ using Surrogates: sample, SobolSample b = 6 my_k_EI1 = AbstractGPSurrogate(x, y) surrogate_optimize(objective_function, EI(), a, b, my_k_EI1, RandomSample(), - maxiters = 200, num_new_samples = 155) + maxiters = 200, num_new_samples = 155) end @testset "Optimization ND" begin diff --git a/lib/SurrogatesFlux/src/SurrogatesFlux.jl b/lib/SurrogatesFlux/src/SurrogatesFlux.jl index a078b473b..f97085fb3 100644 --- a/lib/SurrogatesFlux/src/SurrogatesFlux.jl +++ b/lib/SurrogatesFlux/src/SurrogatesFlux.jl @@ -26,8 +26,8 @@ NeuralSurrogate(x,y,lb,ub,model,loss,opt,n_echos) """ function NeuralSurrogate(x, y, lb, ub; model = Chain(Dense(length(x[1]), 1), first), - loss = (x, y) -> Flux.mse(model(x), y), opt = Descent(0.01), - n_echos::Int = 1) + loss = (x, y) -> Flux.mse(model(x), y), opt = Descent(0.01), + n_echos::Int = 1) X = vec.(collect.(x)) data = zip(X, y) ps = Flux.params(model) @@ -59,7 +59,7 @@ function add_point!(my_n::NeuralSurrogate, x_new, y_new) end X = vec.(collect.(my_n.x)) data = zip(X, my_n.y) - for epoch in 1:my_n.n_echos + for epoch in 1:(my_n.n_echos) Flux.train!(my_n.loss, my_n.ps, data, my_n.opt) end nothing diff --git a/lib/SurrogatesFlux/test/runtests.jl b/lib/SurrogatesFlux/test/runtests.jl index 3e0c1d7ae..eaea94910 100644 --- a/lib/SurrogatesFlux/test/runtests.jl +++ b/lib/SurrogatesFlux/test/runtests.jl @@ -19,7 +19,7 @@ using SafeTestsets my_opt = Descent(0.01) n_echos = 1 my_neural = NeuralSurrogate(x, y, a, b, model = my_model, loss = my_loss, opt = my_opt, - n_echos = 1) + n_echos = 1) my_neural_kwargs = NeuralSurrogate(x, y, a, b) add_point!(my_neural, 8.5, 20.0) add_point!(my_neural, [3.2, 3.5], [7.4, 8.0]) @@ -37,7 +37,7 @@ using SafeTestsets my_opt = Descent(0.01) n_echos = 1 my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) my_neural_kwargs = NeuralSurrogate(x, y, lb, ub) my_neural((3.5, 1.49)) my_neural([3.4, 1.4]) @@ -54,7 +54,7 @@ using SafeTestsets my_model = Chain(Dense(1, 2)) my_loss(x, y) = Flux.mse(my_model(x), y) surrogate = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) surr_kwargs = NeuralSurrogate(x, y, lb, ub) f = x -> [x[1], x[2]^2] @@ -66,7 +66,7 @@ using SafeTestsets my_model = Chain(Dense(2, 2)) my_loss(x, y) = Flux.mse(my_model(x), y) surrogate = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) surrogate_kwargs = NeuralSurrogate(x, y, lb, ub) surrogate((1.0, 2.0)) x_new = (2.0, 2.0) @@ -85,7 +85,7 @@ using SafeTestsets n_echos = 1 my_neural_ND_neural = NeuralSurrogate(x, y, lb, ub) surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_neural_ND_neural, - SobolSample(), maxiters = 15) + SobolSample(), maxiters = 15) # AD Compatibility lb = 0.0 @@ -101,7 +101,7 @@ using SafeTestsets my_opt = Descent(0.01) n_echos = 1 my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) g = x -> my_neural'(x) g(3.4) end @@ -120,7 +120,7 @@ using SafeTestsets my_opt = Descent(0.01) n_echos = 1 my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) g = x -> Zygote.gradient(my_neural, x) g((2.0, 5.0)) end @@ -141,7 +141,7 @@ using SafeTestsets my_opt = Descent(0.01) n_echos = 1 my_neural = NeuralSurrogate(x, y, lb, ub, model = my_model, loss = my_loss, - opt = my_opt, n_echos = 1) + opt = my_opt, n_echos = 1) Zygote.gradient(x -> sum(my_neural(x)), (2.0, 5.0)) my_rad = RadialBasis(x, y, lb, ub, rad = linearRadial()) diff --git a/lib/SurrogatesMOE/X-QMC2.csv b/lib/SurrogatesMOE/X-QMC2.csv new file mode 100644 index 000000000..026e09ab5 --- /dev/null +++ b/lib/SurrogatesMOE/X-QMC2.csv @@ -0,0 +1,151 @@ +1,2 +-0.9765625,-0.3359375 +0.0234375,0.6640625 +0.5234375,-0.8359375 +-0.4765625,0.1640625 +-0.2265625,-0.5859375 +0.7734375,0.4140625 +0.2734375,-0.0859375 +-0.7265625,0.9140625 +-0.6015625,-0.9609375 +0.3984375,0.0390625 +0.8984375,-0.4609375 +-0.1015625,0.5390625 +-0.3515625,-0.2109375 +0.6484375,0.7890625 +0.1484375,-0.7109375 +-0.8515625,0.2890625 +-0.7890625,-0.6484375 +0.2109375,0.3515625 +0.7109375,-0.1484375 +-0.2890625,0.8515625 +-0.0390625,-0.3984375 +0.9609375,0.6015625 +0.4609375,-0.8984375 +-0.5390625,0.1015625 +-0.6640625,-0.0234375 +0.3359375,0.9765625 +0.8359375,-0.5234375 +-0.1640625,0.4765625 +-0.4140625,-0.7734375 +0.5859375,0.2265625 +0.0859375,-0.2734375 +-0.9140625,0.7265625 +-0.8828125,-0.8671875 +0.1171875,0.1328125 +0.6171875,-0.3671875 +-0.3828125,0.6328125 +-0.1328125,-0.1171875 +0.8671875,0.8828125 +0.3671875,-0.6171875 +-0.6328125,0.3828125 +-0.5078125,-0.4921875 +0.4921875,0.5078125 +0.9921875,-0.9921875 +-0.0078125,0.0078125 +-0.2578125,-0.7421875 +0.7421875,0.2578125 +0.2421875,-0.2421875 +-0.7578125,0.7578125 +-0.8203125,-0.1796875 +0.1796875,0.8203125 +0.6796875,-0.6796875 +-0.3203125,0.3203125 +-0.0703125,-0.9296875 +0.9296875,0.0703125 +0.4296875,-0.4296875 +-0.5703125,0.5703125 +-0.6953125,-0.5546875 +0.3046875,0.4453125 +0.8046875,-0.0546875 +-0.1953125,0.9453125 +-0.4453125,-0.3046875 +0.5546875,0.6953125 +0.0546875,-0.8046875 +-0.9453125,0.1953125 +-0.9609375,-0.6015625 +0.0390625,0.3984375 +0.5390625,-0.1015625 +-0.4609375,0.8984375 +-0.2109375,-0.3515625 +0.7890625,0.6484375 +0.2890625,-0.8515625 +-0.7109375,0.1484375 +-0.5859375,-0.2265625 +0.4140625,0.7734375 +0.9140625,-0.7265625 +-0.0859375,0.2734375 +-0.3359375,-0.9765625 +0.6640625,0.0234375 +0.1640625,-0.4765625 +-0.8359375,0.5234375 +-0.7734375,-0.4140625 +0.2265625,0.5859375 +0.7265625,-0.9140625 +-0.2734375,0.0859375 +-0.0234375,-0.6640625 +0.9765625,0.3359375 +0.4765625,-0.1640625 +-0.5234375,0.8359375 +-0.6484375,-0.7890625 +0.3515625,0.2109375 +0.8515625,-0.2890625 +-0.1484375,0.7109375 +-0.3984375,-0.0390625 +0.6015625,0.9609375 +0.1015625,-0.5390625 +-0.8984375,0.4609375 +-0.9296875,-0.0703125 +0.0703125,0.9296875 +0.5703125,-0.5703125 +-0.4296875,0.4296875 +-0.1796875,-0.8203125 +0.8203125,0.1796875 +0.3203125,-0.3203125 +-0.6796875,0.6796875 +-0.5546875,-0.6953125 +0.4453125,0.3046875 +0.9453125,-0.1953125 +-0.0546875,0.8046875 +-0.3046875,-0.4453125 +0.6953125,0.5546875 +0.1953125,-0.9453125 +-0.8046875,0.0546875 +-0.8671875,-0.8828125 +0.1328125,0.1171875 +0.6328125,-0.3828125 +-0.3671875,0.6171875 +-0.1171875,-0.1328125 +0.8828125,0.8671875 +0.3828125,-0.6328125 +-0.6171875,0.3671875 +-0.7421875,-0.2578125 +0.2578125,0.7421875 +0.7578125,-0.7578125 +-0.2421875,0.2421875 +-0.4921875,-0.5078125 +0.5078125,0.4921875 +0.0078125,-0.0078125 +-0.9921875,0.9921875 +-0.98828125,-0.00390625 +0.01171875,0.99609375 +0.51171875,-0.50390625 +-0.48828125,0.49609375 +-0.23828125,-0.75390625 +0.76171875,0.24609375 +0.26171875,-0.25390625 +-0.73828125,0.74609375 +-0.61328125,-0.62890625 +0.38671875,0.37109375 +0.88671875,-0.12890625 +-0.11328125,0.87109375 +-0.36328125,-0.37890625 +0.63671875,0.62109375 +0.13671875,-0.87890625 +-0.86328125,0.12109375 +-0.80078125,-0.94140625 +0.19921875,0.05859375 +0.69921875,-0.44140625 +-0.30078125,0.55859375 +-0.05078125,-0.19140625 +0.94921875,0.80859375 diff --git a/lib/SurrogatesMOE/X-QMC3.csv b/lib/SurrogatesMOE/X-QMC3.csv new file mode 100644 index 000000000..026e09ab5 --- /dev/null +++ b/lib/SurrogatesMOE/X-QMC3.csv @@ -0,0 +1,151 @@ +1,2 +-0.9765625,-0.3359375 +0.0234375,0.6640625 +0.5234375,-0.8359375 +-0.4765625,0.1640625 +-0.2265625,-0.5859375 +0.7734375,0.4140625 +0.2734375,-0.0859375 +-0.7265625,0.9140625 +-0.6015625,-0.9609375 +0.3984375,0.0390625 +0.8984375,-0.4609375 +-0.1015625,0.5390625 +-0.3515625,-0.2109375 +0.6484375,0.7890625 +0.1484375,-0.7109375 +-0.8515625,0.2890625 +-0.7890625,-0.6484375 +0.2109375,0.3515625 +0.7109375,-0.1484375 +-0.2890625,0.8515625 +-0.0390625,-0.3984375 +0.9609375,0.6015625 +0.4609375,-0.8984375 +-0.5390625,0.1015625 +-0.6640625,-0.0234375 +0.3359375,0.9765625 +0.8359375,-0.5234375 +-0.1640625,0.4765625 +-0.4140625,-0.7734375 +0.5859375,0.2265625 +0.0859375,-0.2734375 +-0.9140625,0.7265625 +-0.8828125,-0.8671875 +0.1171875,0.1328125 +0.6171875,-0.3671875 +-0.3828125,0.6328125 +-0.1328125,-0.1171875 +0.8671875,0.8828125 +0.3671875,-0.6171875 +-0.6328125,0.3828125 +-0.5078125,-0.4921875 +0.4921875,0.5078125 +0.9921875,-0.9921875 +-0.0078125,0.0078125 +-0.2578125,-0.7421875 +0.7421875,0.2578125 +0.2421875,-0.2421875 +-0.7578125,0.7578125 +-0.8203125,-0.1796875 +0.1796875,0.8203125 +0.6796875,-0.6796875 +-0.3203125,0.3203125 +-0.0703125,-0.9296875 +0.9296875,0.0703125 +0.4296875,-0.4296875 +-0.5703125,0.5703125 +-0.6953125,-0.5546875 +0.3046875,0.4453125 +0.8046875,-0.0546875 +-0.1953125,0.9453125 +-0.4453125,-0.3046875 +0.5546875,0.6953125 +0.0546875,-0.8046875 +-0.9453125,0.1953125 +-0.9609375,-0.6015625 +0.0390625,0.3984375 +0.5390625,-0.1015625 +-0.4609375,0.8984375 +-0.2109375,-0.3515625 +0.7890625,0.6484375 +0.2890625,-0.8515625 +-0.7109375,0.1484375 +-0.5859375,-0.2265625 +0.4140625,0.7734375 +0.9140625,-0.7265625 +-0.0859375,0.2734375 +-0.3359375,-0.9765625 +0.6640625,0.0234375 +0.1640625,-0.4765625 +-0.8359375,0.5234375 +-0.7734375,-0.4140625 +0.2265625,0.5859375 +0.7265625,-0.9140625 +-0.2734375,0.0859375 +-0.0234375,-0.6640625 +0.9765625,0.3359375 +0.4765625,-0.1640625 +-0.5234375,0.8359375 +-0.6484375,-0.7890625 +0.3515625,0.2109375 +0.8515625,-0.2890625 +-0.1484375,0.7109375 +-0.3984375,-0.0390625 +0.6015625,0.9609375 +0.1015625,-0.5390625 +-0.8984375,0.4609375 +-0.9296875,-0.0703125 +0.0703125,0.9296875 +0.5703125,-0.5703125 +-0.4296875,0.4296875 +-0.1796875,-0.8203125 +0.8203125,0.1796875 +0.3203125,-0.3203125 +-0.6796875,0.6796875 +-0.5546875,-0.6953125 +0.4453125,0.3046875 +0.9453125,-0.1953125 +-0.0546875,0.8046875 +-0.3046875,-0.4453125 +0.6953125,0.5546875 +0.1953125,-0.9453125 +-0.8046875,0.0546875 +-0.8671875,-0.8828125 +0.1328125,0.1171875 +0.6328125,-0.3828125 +-0.3671875,0.6171875 +-0.1171875,-0.1328125 +0.8828125,0.8671875 +0.3828125,-0.6328125 +-0.6171875,0.3671875 +-0.7421875,-0.2578125 +0.2578125,0.7421875 +0.7578125,-0.7578125 +-0.2421875,0.2421875 +-0.4921875,-0.5078125 +0.5078125,0.4921875 +0.0078125,-0.0078125 +-0.9921875,0.9921875 +-0.98828125,-0.00390625 +0.01171875,0.99609375 +0.51171875,-0.50390625 +-0.48828125,0.49609375 +-0.23828125,-0.75390625 +0.76171875,0.24609375 +0.26171875,-0.25390625 +-0.73828125,0.74609375 +-0.61328125,-0.62890625 +0.38671875,0.37109375 +0.88671875,-0.12890625 +-0.11328125,0.87109375 +-0.36328125,-0.37890625 +0.63671875,0.62109375 +0.13671875,-0.87890625 +-0.86328125,0.12109375 +-0.80078125,-0.94140625 +0.19921875,0.05859375 +0.69921875,-0.44140625 +-0.30078125,0.55859375 +-0.05078125,-0.19140625 +0.94921875,0.80859375 diff --git a/lib/SurrogatesMOE/src/SurrogatesMOE.jl b/lib/SurrogatesMOE/src/SurrogatesMOE.jl index 1833290ab..a2b8df0cc 100644 --- a/lib/SurrogatesMOE/src/SurrogatesMOE.jl +++ b/lib/SurrogatesMOE/src/SurrogatesMOE.jl @@ -1,10 +1,10 @@ module SurrogatesMOE import Surrogates: AbstractSurrogate, linearRadial, cubicRadial, multiquadricRadial, - thinplateRadial, RadialBasisStructure, RadialBasis, - InverseDistanceSurrogate, Kriging, LobachevskyStructure, - LobachevskySurrogate, NeuralStructure, PolyChaosStructure, - LinearSurrogate, add_point! + thinplateRadial, RadialBasisStructure, RadialBasis, + InverseDistanceSurrogate, Kriging, LobachevskyStructure, + LobachevskySurrogate, NeuralStructure, PolyChaosStructure, + LinearSurrogate, add_point! export MOE @@ -46,7 +46,7 @@ function MOE(x, y, expert_types; ndim = 1, n_clusters = 2, quantile = 10) # https://github.com/davidavdav/GaussianMixtures.jl/issues/21 jitter_vals = ((rand(eltype(x_and_y_train), size(x_and_y_train))) ./ 10000) gm_cluster = GMM(n_clusters, x_and_y_train + jitter_vals, kind = :full, nInit = 50, - nIter = 20) + nIter = 20) mvn_distributions = _create_clusters_distributions(gm_cluster, ndim, n_clusters) cluster_classifier_train = _cluster_predict(gm_cluster, x_and_y_train) clusters_train = _cluster_values(x_and_y_train, cluster_classifier_train, n_clusters) @@ -55,7 +55,7 @@ function MOE(x, y, expert_types; ndim = 1, n_clusters = 2, quantile = 10) best_models = [] for i in 1:n_clusters best_model = _find_best_model(clusters_train[i], clusters_test[i], ndim, - expert_types) + expert_types) push!(best_models, best_model) end # X = values[:, 1:ndim] @@ -63,7 +63,7 @@ function MOE(x, y, expert_types; ndim = 1, n_clusters = 2, quantile = 10) #return MOE(X, y, gm_cluster, mvn_distributions, best_models) return MOE(x, y, gm_cluster, mvn_distributions, best_models, expert_types, ndim, - n_clusters) + n_clusters) end """ @@ -224,7 +224,7 @@ finds best model for each set of clustered values by validating against the clus """ function _find_best_model(clustered_train_values, clustered_test_values, dim, - enabled_expert_types) + enabled_expert_types) # find upper and lower bounds for clustered_train and test values concatenated x_vec = [a[1:dim] for a in clustered_train_values] @@ -247,7 +247,7 @@ function _find_best_model(clustered_train_values, clustered_test_values, dim, # call on _surrogate_builder with clustered_train_vals, enabled expert types, lb, ub surr_vec = _surrogate_builder(enabled_expert_types, length(enabled_expert_types), x_vec, - y_vec, lb, ub) + y_vec, lb, ub) # use the models to find best model after validating against test data and return best model best_rmse = Inf @@ -274,9 +274,9 @@ function _surrogate_builder(local_kind, k, x, y, lb, ub) if local_kind[i][1] == "RadialBasis" #fit and append to local_surr my_local_i = RadialBasis(x, y, lb, ub, - rad = local_kind[i].radial_function, - scale_factor = local_kind[i].scale_factor, - sparse = local_kind[i].sparse) + rad = local_kind[i].radial_function, + scale_factor = local_kind[i].scale_factor, + sparse = local_kind[i].sparse) push!(local_surr, my_local_i) elseif local_kind[i][1] == "Kriging" @@ -286,12 +286,12 @@ function _surrogate_builder(local_kind, k, x, y, lb, ub) end my_local_i = Kriging(x, y, lb, ub, p = local_kind[i].p, - theta = local_kind[i].theta) + theta = local_kind[i].theta) push!(local_surr, my_local_i) elseif local_kind[i][1] == "GEK" my_local_i = GEK(x, y, lb, ub, p = local_kind[i].p, - theta = local_kind[i].theta) + theta = local_kind[i].theta) push!(local_surr, my_local_i) elseif local_kind[i] == "LinearSurrogate" @@ -304,21 +304,21 @@ function _surrogate_builder(local_kind, k, x, y, lb, ub) elseif local_kind[i][1] == "LobachevskySurrogate" my_local_i = LobachevskyStructure(x, y, lb, ub, - alpha = local_kind[i].alpha, - n = local_kind[i].n, - sparse = local_kind[i].sparse) + alpha = local_kind[i].alpha, + n = local_kind[i].n, + sparse = local_kind[i].sparse) push!(local_surr, my_local_i) elseif local_kind[i][1] == "NeuralSurrogate" my_local_i = NeuralSurrogate(x, y, lb, ub, - model = local_kind[i].model, - loss = local_kind[i].loss, opt = local_kind[i].opt, - n_echos = local_kind[i].n_echos) + model = local_kind[i].model, + loss = local_kind[i].loss, opt = local_kind[i].opt, + n_echos = local_kind[i].n_echos) push!(local_surr, my_local_i) elseif local_kind[i][1] == "RandomForestSurrogate" my_local_i = RandomForestSurrogate(x, y, lb, ub, - num_round = local_kind[i].num_round) + num_round = local_kind[i].num_round) push!(local_surr, my_local_i) elseif local_kind[i] == "SecondOrderPolynomialSurrogate" @@ -327,7 +327,7 @@ function _surrogate_builder(local_kind, k, x, y, lb, ub) elseif local_kind[i][1] == "Wendland" my_local_i = Wendand(x, y, lb, ub, eps = local_kind[i].eps, - maxiters = local_kind[i].maxiters, tol = local_kind[i].tol) + maxiters = local_kind[i].maxiters, tol = local_kind[i].tol) push!(local_surr, my_local_i) elseif local_kind[i][1] == "PolynomialChaosSurrogate" @@ -363,7 +363,7 @@ function add_point!(m::MOE, x, y) # https://github.com/davidavdav/GaussianMixtures.jl/issues/21 jitter_vals = ((rand(eltype(x_and_y_train), size(x_and_y_train))) ./ 10000) gm_cluster = GMM(m.nc, x_and_y_train + jitter_vals, kind = :full, nInit = 50, - nIter = 20) + nIter = 20) mvn_distributions = _create_clusters_distributions(gm_cluster, m.nd, m.nc) cluster_classifier_train = _cluster_predict(gm_cluster, x_and_y_train) clusters_train = _cluster_values(x_and_y_train, cluster_classifier_train, m.nc) @@ -372,7 +372,7 @@ function add_point!(m::MOE, x, y) best_models = [] for i in 1:(m.nc) best_model = _find_best_model(clusters_train[i], clusters_test[i], m.nd, - m.e) + m.e) push!(best_models, best_model) end m.c = gm_cluster diff --git a/lib/SurrogatesMOE/test/runtests.jl b/lib/SurrogatesMOE/test/runtests.jl index a6aa4c4c9..f47ea8f63 100644 --- a/lib/SurrogatesMOE/test/runtests.jl +++ b/lib/SurrogatesMOE/test/runtests.jl @@ -23,12 +23,12 @@ Random.seed!(StableRNG(SEED), SEED) # Radials vs MOE RAD_1D = RadialBasis(x, y, lb, ub, rad = linearRadial(), scale_factor = 1.0, - sparse = false) + sparse = false) expert_types = [ RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), RadialBasisStructure(radial_function = cubicRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), ] MOE_1D_RAD_RAD = MOE(x, y, expert_types) @@ -83,7 +83,7 @@ end expert_types = [ KrigingStructure(p = [1.0, 1.0], theta = [1.0, 1.0]), RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), ] moe_nd_krig_rad = MOE(x, y, expert_types, ndim = 2, quantile = 5) moe_pred_vals = moe_nd_krig_rad.(x_test) @@ -123,7 +123,7 @@ end # test if MOE handles 3 experts including SurrogatesFlux expert_types = [ RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), LinearStructure(), InverseDistanceStructure(p = 1.0), ] @@ -161,9 +161,9 @@ end expert_types = [ RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), RadialBasisStructure(radial_function = cubicRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), ] moe = MOE(x, y, expert_types) add_point!(moe, 0.5, 5.0) @@ -188,7 +188,7 @@ end y = discont_NDIM.(x) expert_types = [InverseDistanceStructure(p = 1.0), RadialBasisStructure(radial_function = linearRadial(), scale_factor = 1.0, - sparse = false), + sparse = false), ] moe_nd_inv_rad = MOE(x, y, expert_types, ndim = 2) add_point!(moe_nd_inv_rad, (0.5, 0.5), sum((0.5, 0.5) .^ 2) + 5) diff --git a/lib/SurrogatesPolyChaos/src/SurrogatesPolyChaos.jl b/lib/SurrogatesPolyChaos/src/SurrogatesPolyChaos.jl index 634b885f9..27f962dd6 100644 --- a/lib/SurrogatesPolyChaos/src/SurrogatesPolyChaos.jl +++ b/lib/SurrogatesPolyChaos/src/SurrogatesPolyChaos.jl @@ -25,7 +25,7 @@ function _calculatepce_coeff(x, y, num_of_multi_indexes, op::AbstractCanonicalOr end function PolynomialChaosSurrogate(x, y, lb::Number, ub::Number; - op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2)) + op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2)) n = length(x) poly_degree = op.deg num_of_multi_indexes = 1 + poly_degree @@ -59,9 +59,9 @@ function _calculatepce_coeff(x, y, num_of_multi_indexes, op::MultiOrthoPoly) end function PolynomialChaosSurrogate(x, y, lb, ub; - op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2) - for j in 1:length(lb)], - 2)) + op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2) + for j in 1:length(lb)], + 2)) n = length(x) d = length(lb) poly_degree = op.deg @@ -82,7 +82,7 @@ function (pcND::PolynomialChaosSurrogate)(val) sum = sum + pcND.coeff[i] * first(PolyChaos.evaluate(pcND.ortopolys.ind[i, :], collect(val), - pcND.ortopolys)) + pcND.ortopolys)) end return sum end @@ -93,12 +93,12 @@ function add_point!(polych::PolynomialChaosSurrogate, x_new, y_new) polych.x = vcat(polych.x, x_new) polych.y = vcat(polych.y, y_new) polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes, - polych.ortopolys) + polych.ortopolys) else polych.x = vcat(polych.x, x_new) polych.y = vcat(polych.y, y_new) polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes, - polych.ortopolys) + polych.ortopolys) end nothing end diff --git a/lib/SurrogatesPolyChaos/test/runtests.jl b/lib/SurrogatesPolyChaos/test/runtests.jl index 3a7f3d57a..767b4b19e 100644 --- a/lib/SurrogatesPolyChaos/test/runtests.jl +++ b/lib/SurrogatesPolyChaos/test/runtests.jl @@ -49,7 +49,7 @@ using SafeTestsets y = objective_function.(x) my_poly1d = PolynomialChaosSurrogate(x, y, lb, ub) @test_broken surrogate_optimize(objective_function, SRBF(), a, b, my_poly1d, - LowDiscrepancySample(; base = 2)) + LowDiscrepancySample(; base = 2)) lb = [0.0, 0.0] ub = [10.0, 10.0] diff --git a/lib/SurrogatesRandomForest/src/SurrogatesRandomForest.jl b/lib/SurrogatesRandomForest/src/SurrogatesRandomForest.jl index 69532f999..b3db175d4 100644 --- a/lib/SurrogatesRandomForest/src/SurrogatesRandomForest.jl +++ b/lib/SurrogatesRandomForest/src/SurrogatesRandomForest.jl @@ -50,7 +50,8 @@ function add_point!(rndfor::RandomForestSurrogate, x_new, y_new) #1D rndfor.x = vcat(rndfor.x, x_new) rndfor.y = vcat(rndfor.y, y_new) - rndfor.bst = xgboost((reshape(rndfor.x, length(rndfor.x), 1), rndfor.y); num_round = rndfor.num_round) + rndfor.bst = xgboost((reshape(rndfor.x, length(rndfor.x), 1), rndfor.y); + num_round = rndfor.num_round) else n_previous = length(rndfor.x) a = vcat(rndfor.x, x_new) diff --git a/lib/SurrogatesSVM/src/SurrogatesSVM.jl b/lib/SurrogatesSVM/src/SurrogatesSVM.jl index 65da3087f..6cc23f388 100644 --- a/lib/SurrogatesSVM/src/SurrogatesSVM.jl +++ b/lib/SurrogatesSVM/src/SurrogatesSVM.jl @@ -48,7 +48,7 @@ function add_point!(svmsurr::SVMSurrogate, x_new, y_new) svmsurr.x = vcat(svmsurr.x, x_new) svmsurr.y = vcat(svmsurr.y, y_new) svmsurr.model = LIBSVM.fit!(SVC(), reshape(svmsurr.x, length(svmsurr.x), 1), - svmsurr.y) + svmsurr.y) else n_previous = length(svmsurr.x) a = vcat(svmsurr.x, x_new) diff --git a/src/Earth.jl b/src/Earth.jl index 0a4d79490..fb620b745 100644 --- a/src/Earth.jl +++ b/src/Earth.jl @@ -149,15 +149,15 @@ function _backward_pass_1d(x, y, n_min_terms, basis, penalty, rel_GCV) end function EarthSurrogate(x, y, lb::Number, ub::Number; penalty::Number = 2.0, - n_min_terms::Int = 2, n_max_terms::Int = 10, - rel_res_error::Number = 1e-2, rel_GCV::Number = 1e-2, - maxiters = 100) + n_min_terms::Int = 2, n_max_terms::Int = 10, + rel_res_error::Number = 1e-2, rel_GCV::Number = 1e-2, + maxiters = 100) intercept = sum([y[i] for i in 1:length(y)]) / length(y) basis_after_forward = _forward_pass_1d(x, y, n_max_terms, rel_res_error, maxiters) basis = _backward_pass_1d(x, y, n_min_terms, basis_after_forward, penalty, rel_GCV) coeff = _coeff_1d(x, y, basis) return EarthSurrogate(x, y, lb, ub, basis, coeff, penalty, n_min_terms, n_max_terms, - rel_res_error, rel_GCV, intercept, maxiters) + rel_res_error, rel_GCV, intercept, maxiters) end function (earth::EarthSurrogate)(val::Number) @@ -319,14 +319,14 @@ function _backward_pass_nd(x, y, n_min_terms, basis, penalty, rel_GCV) end function EarthSurrogate(x, y, lb, ub; penalty::Number = 2.0, n_min_terms::Int = 2, - n_max_terms::Int = 10, rel_res_error::Number = 1e-2, - rel_GCV::Number = 1e-2, maxiters = 100) + n_max_terms::Int = 10, rel_res_error::Number = 1e-2, + rel_GCV::Number = 1e-2, maxiters = 100) intercept = sum([y[i] for i in 1:length(y)]) / length(y) basis_after_forward = _forward_pass_nd(x, y, n_max_terms, rel_res_error, maxiters) basis = _backward_pass_nd(x, y, n_min_terms, basis_after_forward, penalty, rel_GCV) coeff = _coeff_nd(x, y, basis) return EarthSurrogate(x, y, lb, ub, basis, coeff, penalty, n_min_terms, n_max_terms, - rel_res_error, rel_GCV, intercept, maxiters) + rel_res_error, rel_GCV, intercept, maxiters) end function (earth::EarthSurrogate)(val) @@ -343,9 +343,9 @@ function add_point!(earth::EarthSurrogate, x_new, y_new) earth.y = vcat(earth.y, y_new) earth.intercept = sum([earth.y[i] for i in 1:length(earth.y)]) / length(earth.y) basis_after_forward = _forward_pass_1d(earth.x, earth.y, earth.n_max_terms, - earth.rel_res_error, earth.maxiters) + earth.rel_res_error, earth.maxiters) earth.basis = _backward_pass_1d(earth.x, earth.y, earth.n_min_terms, - basis_after_forward, earth.penalty, earth.rel_GCV) + basis_after_forward, earth.penalty, earth.rel_GCV) earth.coeff = _coeff_1d(earth.x, earth.y, earth.basis) nothing else @@ -354,9 +354,9 @@ function add_point!(earth::EarthSurrogate, x_new, y_new) earth.y = vcat(earth.y, y_new) earth.intercept = sum([earth.y[i] for i in 1:length(earth.y)]) / length(earth.y) basis_after_forward = _forward_pass_nd(earth.x, earth.y, earth.n_max_terms, - earth.rel_res_error, earth.maxiters) + earth.rel_res_error, earth.maxiters) earth.basis = _backward_pass_nd(earth.x, earth.y, earth.n_min_terms, - basis_after_forward, earth.penalty, earth.rel_GCV) + basis_after_forward, earth.penalty, earth.rel_GCV) earth.coeff = _coeff_nd(earth.x, earth.y, earth.basis) nothing end diff --git a/src/GEKPLS.jl b/src/GEKPLS.jl index 3d3246ac0..1e26bf26f 100644 --- a/src/GEKPLS.jl +++ b/src/GEKPLS.jl @@ -72,21 +72,21 @@ function GEKPLS(x_vec, y_vec, grads_vec, n_comp, delta_x, lb, ub, extra_points, end pls_mean, X_after_PLS, y_after_PLS = _ge_compute_pls(X, y, n_comp, grads, delta_x, - xlimits, extra_points) + xlimits, extra_points) X_after_std, y_after_std, X_offset, y_mean, X_scale, y_std = standardization(X_after_PLS, - y_after_PLS) + y_after_PLS) D, ij = cross_distances(X_after_std) pls_mean_reshaped = reshape(pls_mean, (size(X, 2), n_comp)) d = componentwise_distance_PLS(D, "squar_exp", n_comp, pls_mean_reshaped) nt, nd = size(X_after_PLS) beta, gamma, reduced_likelihood_function_value = _reduced_likelihood_function(theta, - "squar_exp", - d, nt, ij, - y_after_std) + "squar_exp", + d, nt, ij, + y_after_std) return GEKPLS(x_vec, y_vec, X, y, grads, xlimits, delta_x, extra_points, n_comp, beta, - gamma, theta, - reduced_likelihood_function_value, - X_offset, X_scale, X_after_std, pls_mean_reshaped, y_mean, y_std) + gamma, theta, + reduced_likelihood_function_value, + X_offset, X_scale, X_after_std, pls_mean_reshaped, y_mean, y_std) end """ @@ -134,21 +134,21 @@ function add_point!(g::GEKPLS, x_tup, y_val, grad_tup) g.y_matrix = vcat(g.y_matrix, y_val) g.grads = vcat(g.grads, new_grads) pls_mean, X_after_PLS, y_after_PLS = _ge_compute_pls(g.x_matrix, g.y_matrix, - g.num_components, - g.grads, g.delta, g.xl, - g.extra_points) + g.num_components, + g.grads, g.delta, g.xl, + g.extra_points) g.X_after_std, y_after_std, g.X_offset, g.y_mean, g.X_scale, g.y_std = standardization(X_after_PLS, - y_after_PLS) + y_after_PLS) D, ij = cross_distances(g.X_after_std) g.pls_mean = reshape(pls_mean, (size(g.x_matrix, 2), g.num_components)) d = componentwise_distance_PLS(D, "squar_exp", g.num_components, g.pls_mean) nt, nd = size(X_after_PLS) g.beta, g.gamma, g.reduced_likelihood_function_value = _reduced_likelihood_function(g.theta, - "squar_exp", - d, - nt, - ij, - y_after_std) + "squar_exp", + d, + nt, + ij, + y_after_std) end """ @@ -185,14 +185,14 @@ function _ge_compute_pls(X, y, n_comp, grads, delta_x, xlimits, extra_points) bb_vals = circshift(boxbehnken(dim, 1), 1) else bb_vals = [0.0 0.0; #center - 1.0 0.0; #right - 0.0 1.0; #up - -1.0 0.0; #left - 0.0 -1.0; #down - 1.0 1.0; #right up - -1.0 1.0; #left up - -1.0 -1.0; #left down - 1.0 -1.0] + 1.0 0.0; #right + 0.0 1.0; #up + -1.0 0.0; #left + 0.0 -1.0; #down + 1.0 1.0; #right up + -1.0 1.0; #left up + -1.0 -1.0; #left down + 1.0 -1.0] end _X = zeros((size(bb_vals)[1], dim)) _y = zeros((size(bb_vals)[1], 1)) @@ -273,9 +273,9 @@ function boxbehnken(matrix_size::Int, center::Int) for j in (i + 1):matrix_size l = l + 1 A[(max(0, (l - 1) * size(A_fact)[1]) + 1):(l * size(A_fact)[1]), i] = A_fact[:, - 1] + 1] A[(max(0, (l - 1) * size(A_fact)[1]) + 1):(l * size(A_fact)[1]), j] = A_fact[:, - 2] + 2] end end diff --git a/src/Kriging.jl b/src/Kriging.jl index 57500a62e..f48306069 100644 --- a/src/Kriging.jl +++ b/src/Kriging.jl @@ -46,12 +46,12 @@ function std_error_at_point(k::Kriging, val) d = length(k.x[1]) r = zeros(eltype(k.x[1]), n, 1) r = [let - sum = zero(eltype(k.x[1])) - for l in 1:d - sum = sum + k.theta[l] * norm(val[l] - k.x[i][l])^(k.p[l]) - end - exp(-sum) - end + sum = zero(eltype(k.x[1])) + for l in 1:d + sum = sum + k.theta[l] * norm(val[l] - k.x[i][l])^(k.p[l]) + end + exp(-sum) + end for i in 1:n] one = ones(eltype(k.x[1]), n, 1) @@ -102,7 +102,7 @@ Constructor for type Kriging. - theta: value > 0 modeling how much the function is changing in the i-th variable. """ function Kriging(x, y, lb::Number, ub::Number; p = 2.0, - theta = 0.5 / max(1e-6 * abs(ub - lb), std(x))^p) + theta = 0.5 / max(1e-6 * abs(ub - lb), std(x))^p) if length(x) != length(unique(x)) println("There exists a repetition in the samples, cannot build Kriging.") return @@ -168,8 +168,8 @@ Constructor for Kriging surrogate. changing in the i-th variable. """ function Kriging(x, y, lb, ub; p = 2.0 .* collect(one.(x[1])), - theta = [0.5 / max(1e-6 * norm(ub .- lb), std(x_i[i] for x_i in x))^p[i] - for i in 1:length(x[1])]) + theta = [0.5 / max(1e-6 * norm(ub .- lb), std(x_i[i] for x_i in x))^p[i] + for i in 1:length(x[1])]) if length(x) != length(unique(x)) println("There exists a repetition in the samples, cannot build Kriging.") return @@ -194,12 +194,12 @@ function _calc_kriging_coeffs(x, y, p, theta) d = length(x[1]) R = [let - sum = zero(eltype(x[1])) - for l in 1:d - sum = sum + theta[l] * norm(x[i][l] - x[j][l])^p[l] - end - exp(-sum) - end + sum = zero(eltype(x[1])) + for l in 1:d + sum = sum + theta[l] * norm(x[i][l] - x[j][l])^p[l] + end + exp(-sum) + end for j in 1:n, i in 1:n] # Estimate nugget based on maximum allowed condition number diff --git a/src/Lobachevsky.jl b/src/Lobachevsky.jl index 52fb0a123..30c890afb 100644 --- a/src/Lobachevsky.jl +++ b/src/Lobachevsky.jl @@ -45,7 +45,7 @@ end Lobachevsky interpolation, suggested parameters: 0 <= alpha <= 4, n must be even. """ function LobachevskySurrogate(x, y, lb::Number, ub::Number; alpha::Number = 1.0, n::Int = 4, - sparse = false) + sparse = false) if alpha > 4 || alpha < 0 error("Alpha must be between 0 and 4") end @@ -89,7 +89,7 @@ LobachevskySurrogate(x,y,alpha,n::Int,lb,ub,sparse = false) Build the Lobachevsky surrogate with parameters alpha and n. """ function LobachevskySurrogate(x, y, lb, ub; alpha = collect(one.(x[1])), n::Int = 4, - sparse = false) + sparse = false) if n % 2 != 0 error("Parameter n must be even") end @@ -199,5 +199,5 @@ function lobachevsky_integrate_dimension(loba::LobachevskySurrogate, lb, ub, dim new_ub = deleteat!(ub, dim) new_loba = deleteat!(loba.alpha, dim) return LobachevskySurrogate(new_x, loba.y, loba.alpha, loba.n, new_lb, new_ub, - new_coeff, loba.sparse) + new_coeff, loba.sparse) end diff --git a/src/Optimization.jl b/src/Optimization.jl index 90e05a2db..cc615a983 100755 --- a/src/Optimization.jl +++ b/src/Optimization.jl @@ -32,7 +32,7 @@ struct RTEA{K, Z, P, N, S} <: SurrogateOptimizationAlgorithm end function merit_function(point, w, surr::AbstractSurrogate, s_max, s_min, d_max, d_min, - box_size) + box_size) if length(point) == 1 D_x = box_size + 1 for i in 1:length(surr.x) @@ -84,8 +84,8 @@ a few values to achieve both exploitation and exploration. When w is close to zero, we do pure exploration, while w close to 1 corresponds to exploitation. """ function surrogate_optimize(obj::Function, ::SRBF, lb, ub, surr::AbstractSurrogate, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100, needs_gradient = false) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100, needs_gradient = false) scale = 0.2 success = 0 failure = 0 @@ -139,8 +139,8 @@ function surrogate_optimize(obj::Function, ::SRBF, lb, ub, surr::AbstractSurroga evaluation_of_merit_function = zeros(float(eltype(surr.x[1])), num_new_samples) @inbounds for r in 1:num_new_samples evaluation_of_merit_function[r] = merit_function(new_sample[r], w, surr, - s_max, s_min, d_max, d_min, - box_size) + s_max, s_min, d_max, d_min, + box_size) end new_addition = false adaptive_point_x = Tuple{} @@ -235,8 +235,8 @@ SRBF 1D: surrogate_optimize(obj::Function,::SRBF,lb::Number,ub::Number,surr::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100) """ function surrogate_optimize(obj::Function, ::SRBF, lb::Number, ub::Number, - surr::AbstractSurrogate, sample_type::SamplingAlgorithm; - maxiters = 100, num_new_samples = 100) + surr::AbstractSurrogate, sample_type::SamplingAlgorithm; + maxiters = 100, num_new_samples = 100) #Suggested by: #https://www.mathworks.com/help/gads/surrogate-optimization-algorithm.html scale = 0.2 @@ -292,7 +292,7 @@ function surrogate_optimize(obj::Function, ::SRBF, lb::Number, ub::Number, end #3) Evaluate merit function at the sampled points evaluation_of_merit_function = merit_function.(new_sample, w, surr, s_max, - s_min, d_max, d_min, box_size) + s_min, d_max, d_min, box_size) new_addition = false adaptive_point_x = zero(eltype(new_sample[1])) @@ -374,9 +374,9 @@ function surrogate_optimize(obj::Function, ::SRBF, lb::Number, ub::Number, end # Ask SRBF ND -function potential_optimal_points(::SRBF, strategy, lb, ub, surr::AbstractSurrogate, sample_type::SamplingAlgorithm, n_parallel; - num_new_samples = 500) - +function potential_optimal_points(::SRBF, strategy, lb, ub, surr::AbstractSurrogate, + sample_type::SamplingAlgorithm, n_parallel; + num_new_samples = 500) scale = 0.2 w_range = [0.3, 0.5, 0.7, 0.95] w_cycle = Iterators.cycle(w_range) @@ -427,7 +427,6 @@ function potential_optimal_points(::SRBF, strategy, lb, ub, surr::AbstractSurrog tmp_surr = deepcopy(surr) - new_addition = 0 diff_x = zeros(eltype(surr.x[1]), d) @@ -481,9 +480,10 @@ function potential_optimal_points(::SRBF, strategy, lb, ub, surr::AbstractSurrog end # Ask SRBF 1D -function potential_optimal_points(::SRBF, strategy, lb::Number, ub::Number, surr::AbstractSurrogate, - sample_type::SamplingAlgorithm, n_parallel; - num_new_samples = 500) +function potential_optimal_points(::SRBF, strategy, lb::Number, ub::Number, + surr::AbstractSurrogate, + sample_type::SamplingAlgorithm, n_parallel; + num_new_samples = 500) scale = 0.2 success = 0 w_range = [0.3, 0.5, 0.7, 0.95] @@ -580,7 +580,6 @@ function potential_optimal_points(::SRBF, strategy, lb::Number, ub::Number, surr return (proposed_points_x, merit_of_proposed_points) end - """ This is an implementation of Lower Confidence Bound (LCB), a popular acquisition function in Bayesian optimization. @@ -589,8 +588,8 @@ Under a Gaussian process (GP) prior, the goal is to minimize: default value ``k = 2``. """ function surrogate_optimize(obj::Function, ::LCBS, lb::Number, ub::Number, krig, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100, k = 2.0) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100, k = 2.0) dtol = 1e-3 * norm(ub - lb) for i in 1:maxiters new_sample = sample(num_new_samples, lb, ub, sample_type) @@ -650,8 +649,8 @@ Under a Gaussian process (GP) prior, the goal is to minimize: default value ``k = 2``. """ function surrogate_optimize(obj::Function, ::LCBS, lb, ub, krig, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100, k = 2.0) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100, k = 2.0) dtol = 1e-3 * norm(ub - lb) for i in 1:maxiters d = length(krig.x) @@ -710,8 +709,8 @@ end Expected improvement method 1D """ function surrogate_optimize(obj::Function, ::EI, lb::Number, ub::Number, krig, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100) dtol = 1e-3 * norm(ub - lb) eps = 0.01 for i in 1:maxiters @@ -776,9 +775,9 @@ function surrogate_optimize(obj::Function, ::EI, lb::Number, ub::Number, krig, end # Ask EI 1D & ND -function potential_optimal_points(::EI, strategy, lb, ub, krig, sample_type::SamplingAlgorithm, n_parallel::Number; - num_new_samples = 100) - +function potential_optimal_points(::EI, strategy, lb, ub, krig, + sample_type::SamplingAlgorithm, n_parallel::Number; + num_new_samples = 100) lb = krig.lb ub = krig.ub @@ -853,8 +852,8 @@ maximize expected improvement: """ function surrogate_optimize(obj::Function, ::EI, lb, ub, krig, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100) dtol = 1e-3 * norm(ub - lb) eps = 0.01 for i in 1:maxiters @@ -935,7 +934,7 @@ function adjust_step_size(sigma_n, sigma_min, C_success, t_success, C_fail, t_fa end function select_evaluation_point_1D(new_points1, surr1::AbstractSurrogate, numb_iters, - maxiters) + maxiters) v = [0.3, 0.5, 0.8, 0.95] k = 4 n = length(surr1.x) @@ -996,8 +995,8 @@ surrogates and dynamic coordinate search in high-dimensional expensive black-box """ function surrogate_optimize(obj::Function, ::DYCORS, lb::Number, ub::Number, - surr1::AbstractSurrogate, sample_type::SamplingAlgorithm; - maxiters = 100, num_new_samples = 100) + surr1::AbstractSurrogate, sample_type::SamplingAlgorithm; + maxiters = 100, num_new_samples = 100) x_best = argmin(surr1.y) y_best = minimum(surr1.y) sigma_n = 0.2 * norm(ub - lb) @@ -1019,14 +1018,14 @@ function surrogate_optimize(obj::Function, ::DYCORS, lb::Number, ub::Number, if new_points[i] > ub #reflection new_points[i] = max(lb, - maximum(surr1.x) - - norm(new_points[i] - maximum(surr1.x))) + maximum(surr1.x) - + norm(new_points[i] - maximum(surr1.x))) end if new_points[i] < lb #reflection new_points[i] = min(ub, - minimum(surr1.x) + - norm(new_points[i] - minimum(surr1.x))) + minimum(surr1.x) + + norm(new_points[i] - minimum(surr1.x))) end end end @@ -1043,7 +1042,7 @@ function surrogate_optimize(obj::Function, ::DYCORS, lb::Number, ub::Number, end sigma_n, C_success, C_fail = adjust_step_size(sigma_n, sigma_min, C_success, - t_success, C_fail, t_fail) + t_success, C_fail, t_fail) if f_new < y_best x_best = x_new @@ -1056,7 +1055,7 @@ function surrogate_optimize(obj::Function, ::DYCORS, lb::Number, ub::Number, end function select_evaluation_point_ND(new_points, surrn::AbstractSurrogate, numb_iters, - maxiters) + maxiters) v = [0.3, 0.5, 0.8, 0.95] k = 4 n = size(surrn.x, 1) @@ -1124,8 +1123,8 @@ to perturb a given coordinate and decrease this probability after each function evaluation so fewer coordinates are perturbed later in the optimization. """ function surrogate_optimize(obj::Function, ::DYCORS, lb, ub, surrn::AbstractSurrogate, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = 100) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = 100) x_best = collect(surrn.x[argmin(surrn.y)]) y_best = minimum(surrn.y) sigma_n = 0.2 * norm(ub - lb) @@ -1160,13 +1159,13 @@ function surrogate_optimize(obj::Function, ::DYCORS, lb, ub, surrn::AbstractSurr while new_points[i, j] < lb[j] || new_points[i, j] > ub[j] if new_points[i, j] > ub[j] new_points[i, j] = max(lb[j], - maximum(surrn.x)[j] - - norm(new_points[i, j] - maximum(surrn.x)[j])) + maximum(surrn.x)[j] - + norm(new_points[i, j] - maximum(surrn.x)[j])) end if new_points[i, j] < lb[j] new_points[i, j] = min(ub[j], - minimum(surrn.x)[j] + - norm(new_points[i] - minimum(surrn.x)[j])) + minimum(surrn.x)[j] + + norm(new_points[i] - minimum(surrn.x)[j])) end end end @@ -1185,7 +1184,7 @@ function surrogate_optimize(obj::Function, ::DYCORS, lb, ub, surrn::AbstractSurr end sigma_n, C_success, C_fail = adjust_step_size(sigma_n, sigma_min, C_success, - t_success, C_fail, t_fail) + t_success, C_fail, t_fail) if f_new < y_best x_best = x_new @@ -1311,8 +1310,8 @@ SOP Surrogate optimization method, following closely the following papers: #Suggested number of new_samples = min(500*d,5000) """ function surrogate_optimize(obj::Function, sop1::SOP, lb::Number, ub::Number, - surrSOP::AbstractSurrogate, sample_type::SamplingAlgorithm; - maxiters = 100, num_new_samples = min(500 * 1, 5000)) + surrSOP::AbstractSurrogate, sample_type::SamplingAlgorithm; + maxiters = 100, num_new_samples = min(500 * 1, 5000)) d = length(lb) N_fail = 3 N_tenure = 5 @@ -1558,8 +1557,8 @@ function II_tier_ranking_ND(D::Dict, srgD::AbstractSurrogate) end function surrogate_optimize(obj::Function, sopd::SOP, lb, ub, surrSOPD::AbstractSurrogate, - sample_type::SamplingAlgorithm; maxiters = 100, - num_new_samples = min(500 * length(lb), 5000)) + sample_type::SamplingAlgorithm; maxiters = 100, + num_new_samples = min(500 * length(lb), 5000)) d = length(lb) N_fail = 3 N_tenure = 5 @@ -1741,7 +1740,7 @@ function _nonDominatedSorting(arr::Array{Float64, 2}) while !isempty(arr) s = size(arr, 1) red = dropdims(sum([_dominates(arr[i, :], arr[j, :]) for i in 1:s, j in 1:s], - dims = 1) .== 0, dims = 1) + dims = 1) .== 0, dims = 1) a = 1:s sel::Array{Int64, 1} = a[red] push!(fronts, ind[sel]) @@ -1753,8 +1752,8 @@ function _nonDominatedSorting(arr::Array{Float64, 2}) end function surrogate_optimize(obj::Function, sbm::SMB, lb::Number, ub::Number, - surrSMB::AbstractSurrogate, sample_type::SamplingAlgorithm; - maxiters = 100, n_new_look = 1000) + surrSMB::AbstractSurrogate, sample_type::SamplingAlgorithm; + maxiters = 100, n_new_look = 1000) #obj contains a function for each output dimension dim_out = length(surrSMB.y[1]) d = 1 @@ -1793,8 +1792,8 @@ function surrogate_optimize(obj::Function, sbm::SMB, lb::Number, ub::Number, end function surrogate_optimize(obj::Function, smb::SMB, lb, ub, surrSMBND::AbstractSurrogate, - sample_type::SamplingAlgorithm; maxiters = 100, - n_new_look = 1000) + sample_type::SamplingAlgorithm; maxiters = 100, + n_new_look = 1000) #obj contains a function for each output dimension dim_out = length(surrSMBND.y[1]) d = length(lb) @@ -1834,8 +1833,8 @@ end # RTEA (Noisy model based multi objective optimization + standard rtea by fieldsen), use this for very noisy objective functions because there are a lot of re-evaluations function surrogate_optimize(obj, rtea::RTEA, lb::Number, ub::Number, - surrRTEA::AbstractSurrogate, sample_type::SamplingAlgorithm; - maxiters = 100, n_new_look = 1000) + surrRTEA::AbstractSurrogate, sample_type::SamplingAlgorithm; + maxiters = 100, n_new_look = 1000) Z = rtea.z K = rtea.k p_cross = rtea.p @@ -1940,8 +1939,8 @@ function surrogate_optimize(obj, rtea::RTEA, lb::Number, ub::Number, end function surrogate_optimize(obj, rtea::RTEA, lb, ub, surrRTEAND::AbstractSurrogate, - sample_type::SamplingAlgorithm; maxiters = 100, - n_new_look = 1000) + sample_type::SamplingAlgorithm; maxiters = 100, + n_new_look = 1000) Z = rtea.z K = rtea.k p_cross = rtea.p @@ -2047,7 +2046,7 @@ function surrogate_optimize(obj, rtea::RTEA, lb, ub, surrRTEAND::AbstractSurroga end function surrogate_optimize(obj::Function, ::EI, lb, ub, krig, sample_type::SectionSample; - maxiters = 100, num_new_samples = 100) + maxiters = 100, num_new_samples = 100) dtol = 1e-3 * norm(ub - lb) eps = 0.01 for i in 1:maxiters @@ -2095,7 +2094,7 @@ function surrogate_optimize(obj::Function, ::EI, lb, ub, krig, sample_type::Sect if length(new_sample) == 0 println("Out of sampling points.") return section_sampler_returner(sample_type, krig.x, krig.y, lb, ub, - krig) + krig) end else point_found = true @@ -2115,12 +2114,12 @@ function surrogate_optimize(obj::Function, ::EI, lb, ub, krig, sample_type::Sect end function section_sampler_returner(sample_type::SectionSample, surrn_x, surrn_y, - lb, ub, surrn) + lb, ub, surrn) d_fixed = fixed_dimensions(sample_type) @assert length(surrn_y) == size(surrn_x)[1] surrn_xy = [(surrn_x[y], surrn_y[y]) for y in 1:length(surrn_y)] section_surr1_xy = filter(xyz -> xyz[1][d_fixed] == Tuple(sample_type.x0[d_fixed]), - surrn_xy) + surrn_xy) section_surr1_x = [xy[1] for xy in section_surr1_xy] section_surr1_y = [xy[2] for xy in section_surr1_xy] if length(section_surr1_xy) == 0 diff --git a/src/PolynomialChaos.jl b/src/PolynomialChaos.jl index 76e89e1df..81e72600b 100644 --- a/src/PolynomialChaos.jl +++ b/src/PolynomialChaos.jl @@ -20,7 +20,7 @@ function _calculatepce_coeff(x, y, num_of_multi_indexes, op::AbstractCanonicalOr end function PolynomialChaosSurrogate(x, y, lb::Number, ub::Number; - op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2)) + op::AbstractCanonicalOrthoPoly = GaussOrthoPoly(2)) n = length(x) poly_degree = op.deg num_of_multi_indexes = 1 + poly_degree @@ -53,9 +53,9 @@ function _calculatepce_coeff(x, y, num_of_multi_indexes, op::MultiOrthoPoly) end function PolynomialChaosSurrogate(x, y, lb, ub; - op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2) - for j in 1:length(lb)], - 2)) + op::MultiOrthoPoly = MultiOrthoPoly([GaussOrthoPoly(2) + for j in 1:length(lb)], + 2)) n = length(x) d = length(lb) poly_degree = op.deg @@ -75,7 +75,7 @@ function (pcND::PolynomialChaosSurrogate)(val) sum = sum + pcND.coeff[i] * first(PolyChaos.evaluate(pcND.ortopolys.ind[i, :], collect(val), - pcND.ortopolys)) + pcND.ortopolys)) end return sum end @@ -86,12 +86,12 @@ function add_point!(polych::PolynomialChaosSurrogate, x_new, y_new) polych.x = vcat(polych.x, x_new) polych.y = vcat(polych.y, y_new) polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes, - polych.ortopolys) + polych.ortopolys) else polych.x = vcat(polych.x, x_new) polych.y = vcat(polych.y, y_new) polych.coeff = _calculatepce_coeff(polych.x, polych.y, polych.num_of_multi_indexes, - polych.ortopolys) + polych.ortopolys) end nothing end diff --git a/src/Radials.jl b/src/Radials.jl index 88178a899..d60f3ca40 100644 --- a/src/Radials.jl +++ b/src/Radials.jl @@ -25,9 +25,9 @@ cubicRadial() = RadialFunction(1, z -> norm(z)^3) multiquadricRadial(c = 1.0) = RadialFunction(1, z -> sqrt((c * norm(z))^2 + 1)) thinplateRadial() = RadialFunction(2, z -> begin - result = norm(z)^2 * log(norm(z)) - ifelse(iszero(z), zero(result), result) - end) + result = norm(z)^2 * log(norm(z)) + ifelse(iszero(z), zero(result), result) +end) """ RadialBasis(x,y,lb,ub,rad::RadialFunction, scale_factor::Float = 1.0) @@ -45,7 +45,7 @@ https://en.wikipedia.org/wiki/Polyharmonic_spline """ function RadialBasis(x, y, lb, ub; rad::RadialFunction = linearRadial(), - scale_factor::Real = 0.5, sparse = false) + scale_factor::Real = 0.5, sparse = false) q = rad.q phi = rad.phi coeff = _calc_coeffs(x, y, lb, ub, phi, q, scale_factor, sparse) @@ -110,9 +110,9 @@ using Zygote: @nograd, Buffer function _make_combination(n, d, ix) exponents_combinations = [e for e - in collect(Iterators.product(Iterators.repeated(0:n, - d)...))[:] - if sum(e) <= n] + in collect(Iterators.product(Iterators.repeated(0:n, + d)...))[:] + if sum(e) <= n] return exponents_combinations[ix + 1] end @@ -144,7 +144,7 @@ function multivar_poly_basis(x, ix, d, n) else prod(a^d for (a, d) - in zip(x, _make_combination(n, d, ix))) + in zip(x, _make_combination(n, d, ix))) end end @@ -179,12 +179,12 @@ function _add_tmp_to_approx!(approx, i, tmp, rad::RadialBasis; f = identity) end # specialise when only single output dimension function _make_approx(val, - ::RadialBasis{F, Q, X, <:AbstractArray{<:Number}}) where {F, Q, X} + ::RadialBasis{F, Q, X, <:AbstractArray{<:Number}}) where {F, Q, X} return Ref(zero(eltype(val))) end function _add_tmp_to_approx!(approx::Base.RefValue, i, tmp, - rad::RadialBasis{F, Q, X, <:AbstractArray{<:Number}}; - f = identity) where {F, Q, X} + rad::RadialBasis{F, Q, X, <:AbstractArray{<:Number}}; + f = identity) where {F, Q, X} @inbounds @simd ivdep for j in 1:size(rad.coeff, 1) approx[] += rad.coeff[j, i] * f(tmp) end @@ -242,6 +242,6 @@ function add_point!(rad::RadialBasis, new_x, new_y) append!(rad.y, new_y) end rad.coeff = _calc_coeffs(rad.x, rad.y, rad.lb, rad.ub, rad.phi, rad.dim_poly, - rad.scale_factor, rad.sparse) + rad.scale_factor, rad.sparse) nothing end diff --git a/src/Sampling.jl b/src/Sampling.jl index da53e1f9b..0eda577e0 100644 --- a/src/Sampling.jl +++ b/src/Sampling.jl @@ -15,21 +15,25 @@ function sample(args...; kwargs...) end end - #### SectionSample #### """ SectionSample{T}(x0, sa) `SectionSample(x0, sampler)` where `sampler` is any sampler above and `x0` is a vector of either `NaN` for a free dimension or some scalar for a constrained dimension. """ -struct SectionSample{R<:Real,I<:Integer,VR<:AbstractVector{R},VI<:AbstractVector{I}} <: SamplingAlgorithm +struct SectionSample{ + R <: Real, + I <: Integer, + VR <: AbstractVector{R}, + VI <: AbstractVector{I}, +} <: SamplingAlgorithm x0::VR sa::SamplingAlgorithm fixed_dims::VI end fixed_dimensions(section_sampler::SectionSample)::Vector{Int64} = findall(x -> x == false, - isnan.(section_sampler.x0)) + isnan.(section_sampler.x0)) free_dimensions(section_sampler::SectionSample)::Vector{Int64} = findall(x -> x == true, - isnan.(section_sampler.x0)) + isnan.(section_sampler.x0)) """ sample(n,lb,ub,K::SectionSample) Returns Tuples constrained to a section. @@ -39,8 +43,11 @@ The sampler is defined as in e.g. `section_sampler_y_is_10 = SectionSample([NaN64, NaN64, 10.0, 10.0], UniformSample())` where the first argument is a Vector{T} in which numbers are fixed coordinates and `NaN`s correspond to free dimensions, and the second argument is a SamplingAlgorithm which is used to sample in the free dimensions. """ -function sample(n::Integer, lb::T, ub::T, section_sampler::SectionSample) where - T <: Union{Base.AbstractVecOrTuple, Number} +function sample(n::Integer, + lb::T, + ub::T, + section_sampler::SectionSample) where { + T <: Union{Base.AbstractVecOrTuple, Number}} @assert n>0 ZERO_SAMPLES_MESSAGE QuasiMonteCarlo._check_sequence(lb, ub, length(lb)) if lb isa Number @@ -60,12 +67,16 @@ function sample(n::Integer, lb::T, ub::T, section_sampler::SectionSample) where out_as_vec[d, y] = new_samples[xi, y] end end - return isone(size(out_as_vec, 1)) ? vec(out_as_vec) : collect(reinterpret(reshape, NTuple{size(out_as_vec, 1), eltype(out_as_vec)}, out_as_vec)) + return isone(size(out_as_vec, 1)) ? vec(out_as_vec) : + collect(reinterpret(reshape, + NTuple{size(out_as_vec, 1), eltype(out_as_vec)}, + out_as_vec)) end end -SectionSample(x0::AbstractVector, sa::SamplingAlgorithm) = +function SectionSample(x0::AbstractVector, sa::SamplingAlgorithm) SectionSample(x0, sa, findall(isnan, x0)) +end """ SectionSample(n, d, K::SectionSample) @@ -75,16 +86,19 @@ The sampler is defined `SectionSample([NaN64, NaN64, 10.0, 10.0], UniformSample())` where the first argument is a Vector{T} in which numbers are fixed coordinates and `NaN`s correspond to free dimensions, and the second argument is a SamplingAlgorithm which is used to sample in the free dimensions. """ -function sample(n::Integer, d::Integer, section_sampler::SectionSample, T=eltype(section_sampler.x0)) +function sample(n::Integer, + d::Integer, + section_sampler::SectionSample, + T = eltype(section_sampler.x0)) QuasiMonteCarlo._check_sequence(n) @assert eltype(section_sampler.x0) == T @assert length(section_sampler.fixed_dims) == d return sample(n, section_sampler) end -@views function sample(n::Integer, section_sampler::SectionSample{T}) where T +@views function sample(n::Integer, section_sampler::SectionSample{T}) where {T} samples = Matrix{T}(undef, n, length(section_sampler.x0)) fixed_dims = section_sampler.fixed_dims - samples[:,fixed_dims] .= sample(n, length(fixed_dims), section_sampler.sa, T) + samples[:, fixed_dims] .= sample(n, length(fixed_dims), section_sampler.sa, T) return vec(samples) -end \ No newline at end of file +end diff --git a/src/Surrogates.jl b/src/Surrogates.jl index 09f840132..0d388a6a4 100755 --- a/src/Surrogates.jl +++ b/src/Surrogates.jl @@ -28,7 +28,7 @@ current_surrogates = ["Kriging", "LinearSurrogate", "LobachevskySurrogate", #Radial structure: function RadialBasisStructure(; radial_function, scale_factor, sparse) return (name = "RadialBasis", radial_function = radial_function, - scale_factor = scale_factor, sparse = sparse) + scale_factor = scale_factor, sparse = sparse) end #Kriging structure: @@ -58,7 +58,7 @@ end #Neural structure function NeuralStructure(; model, loss, opt, n_echos) return (name = "NeuralSurrogate", model = model, loss = loss, opt = opt, - n_echos = n_echos) + n_echos = n_echos) end #Random forest structure @@ -84,22 +84,23 @@ end export current_surrogates export GEKPLS export RadialBasisStructure, KrigingStructure, LinearStructure, InverseDistanceStructure -export LobachevskyStructure, NeuralStructure, RandomForestStructure, - SecondOrderPolynomialStructure +export LobachevskyStructure, + NeuralStructure, RandomForestStructure, + SecondOrderPolynomialStructure export WendlandStructure export AbstractSurrogate, SamplingAlgorithm export Kriging, RadialBasis, add_point!, current_estimate, std_error_at_point # Parallelization Strategies export potential_optimal_points export MinimumConstantLiar, MaximumConstantLiar, MeanConstantLiar, KrigingBeliever, - KrigingBelieverUpperBound, KrigingBelieverLowerBound + KrigingBelieverUpperBound, KrigingBelieverLowerBound # radial basis functions export linearRadial, cubicRadial, multiquadricRadial, thinplateRadial # samplers export sample, GridSample, RandomSample, SobolSample, LatinHypercubeSample, - HaltonSample + HaltonSample export RandomSample, KroneckerSample, GoldenSample, SectionSample # Optimization algorithms @@ -111,8 +112,9 @@ export InverseDistanceSurrogate export SecondOrderPolynomialSurrogate export Wendland export RadialBasisStructure, KrigingStructure, LinearStructure, InverseDistanceStructure -export LobachevskyStructure, NeuralStructure, RandomForestStructure, - SecondOrderPolynomialStructure +export LobachevskyStructure, + NeuralStructure, RandomForestStructure, + SecondOrderPolynomialStructure export WendlandStructure #export MOE export VariableFidelitySurrogate diff --git a/src/VariableFidelity.jl b/src/VariableFidelity.jl index de713c770..d90928d38 100644 --- a/src/VariableFidelity.jl +++ b/src/VariableFidelity.jl @@ -9,13 +9,13 @@ mutable struct VariableFidelitySurrogate{X, Y, L, U, N, F, E} <: AbstractSurroga end function VariableFidelitySurrogate(x, y, lb, ub; - num_high_fidel = Int(floor(length(x) / 2)), - low_fid_structure = RadialBasisStructure(radial_function = linearRadial(), - scale_factor = 1.0, - sparse = false), - high_fid_structure = RadialBasisStructure(radial_function = cubicRadial(), - scale_factor = 1.0, - sparse = false)) + num_high_fidel = Int(floor(length(x) / 2)), + low_fid_structure = RadialBasisStructure(radial_function = linearRadial(), + scale_factor = 1.0, + sparse = false), + high_fid_structure = RadialBasisStructure(radial_function = cubicRadial(), + scale_factor = 1.0, + sparse = false)) x_high = x[1:num_high_fidel] x_low = x[(num_high_fidel + 1):end] y_high = y[1:num_high_fidel] @@ -25,48 +25,48 @@ function VariableFidelitySurrogate(x, y, lb, ub; if low_fid_structure[1] == "RadialBasis" #fit and append to local_surr low_fid_surr = RadialBasis(x_low, y_low, lb, ub, - rad = low_fid_structure.radial_function, - scale_factor = low_fid_structure.scale_factor, - sparse = low_fid_structure.sparse) + rad = low_fid_structure.radial_function, + scale_factor = low_fid_structure.scale_factor, + sparse = low_fid_structure.sparse) elseif low_fid_structure[1] == "Kriging" low_fid_surr = Kriging(x_low, y_low, lb, ub, p = low_fid_structure.p, - theta = low_fid_structure.theta) + theta = low_fid_structure.theta) elseif low_fid_structure[1] == "GEK" low_fid_surr = GEK(x_low, y_low, lb, ub, p = low_fid_structure.p, - theta = low_fid_structure.theta) + theta = low_fid_structure.theta) elseif low_fid_structure == "LinearSurrogate" low_fid_surr = LinearSurrogate(x_low, y_low, lb, ub) elseif low_fid_structure[1] == "InverseDistanceSurrogate" low_fid_surr = InverseDistanceSurrogate(x_low, y_low, lb, ub, - p = low_fid_structure.p) + p = low_fid_structure.p) elseif low_fid_structure[1] == "LobachevskySurrogate" low_fid_surr = LobachevskySurrogate(x_low, y_low, lb, ub, - alpha = low_fid_structure.alpha, - n = low_fid_structure.n, - sparse = low_fid_structure.sparse) + alpha = low_fid_structure.alpha, + n = low_fid_structure.n, + sparse = low_fid_structure.sparse) elseif low_fid_structure[1] == "NeuralSurrogate" low_fid_surr = NeuralSurrogate(x_low, y_low, lb, ub, - model = low_fid_structure.model, - loss = low_fid_structure.loss, - opt = low_fid_structure.opt, - n_echos = low_fid_structure.n_echos) + model = low_fid_structure.model, + loss = low_fid_structure.loss, + opt = low_fid_structure.opt, + n_echos = low_fid_structure.n_echos) elseif low_fid_structure[1] == "RandomForestSurrogate" low_fid_surr = RandomForestSurrogate(x_low, y_low, lb, ub, - num_round = low_fid_structure.num_round) + num_round = low_fid_structure.num_round) elseif low_fid_structure == "SecondOrderPolynomialSurrogate" low_fid_surr = SecondOrderPolynomialSurrogate(x_low, y_low, lb, ub) elseif low_fid_structure[1] == "Wendland" low_fid_surr = Wendand(x_low, y_low, lb, ub, eps = low_fid_surr.eps, - maxiters = low_fid_surr.maxiters, tol = low_fid_surr.tol) + maxiters = low_fid_surr.maxiters, tol = low_fid_surr.tol) else throw("A surrogate with the name provided does not exist or is not currently supported with VariableFidelity") end @@ -80,12 +80,12 @@ function VariableFidelitySurrogate(x, y, lb, ub; if high_fid_structure[1] == "RadialBasis" #fit and append to local_surr eps = RadialBasis(x_high, y_eps, lb, ub, rad = high_fid_structure.radial_function, - scale_factor = high_fid_structure.scale_factor, - sparse = high_fid_structure.sparse) + scale_factor = high_fid_structure.scale_factor, + sparse = high_fid_structure.sparse) elseif high_fid_structure[1] == "Kriging" eps = Kriging(x_high, y_eps, lb, ub, p = high_fid_structure.p, - theta = high_fid_structure.theta) + theta = high_fid_structure.theta) elseif high_fid_structure == "LinearSurrogate" eps = LinearSurrogate(x_high, y_eps, lb, ub) @@ -95,24 +95,24 @@ function VariableFidelitySurrogate(x, y, lb, ub; elseif high_fid_structure[1] == "LobachevskySurrogate" eps = LobachevskySurrogate(x_high, y_eps, lb, ub, alpha = high_fid_structure.alpha, - n = high_fid_structure.n, - sparse = high_fid_structure.sparse) + n = high_fid_structure.n, + sparse = high_fid_structure.sparse) elseif high_fid_structure[1] == "NeuralSurrogate" eps = NeuralSurrogate(x_high, y_eps, lb, ub, model = high_fid_structure.model, - loss = high_fid_structure.loss, opt = high_fid_structure.opt, - n_echos = high_fid_structure.n_echos) + loss = high_fid_structure.loss, opt = high_fid_structure.opt, + n_echos = high_fid_structure.n_echos) elseif high_fid_structure[1] == "RandomForestSurrogate" eps = RandomForestSurrogate(x_high, y_eps, lb, ub, - num_round = high_fid_structure.num_round) + num_round = high_fid_structure.num_round) elseif high_fid_structure == "SecondOrderPolynomialSurrogate" eps = SecondOrderPolynomialSurrogate(x_high, y_eps, lb, ub) elseif high_fid_structure[1] == "Wendland" eps = Wendand(x_high, y_eps, lb, ub, eps = high_fid_structure.eps, - maxiters = high_fid_structure.maxiters, tol = high_fid_structure.tol) + maxiters = high_fid_structure.maxiters, tol = high_fid_structure.tol) else throw("A surrogate with the name provided does not exist or is not currently supported with VariableFidelity") end diff --git a/src/VirtualStrategy.jl b/src/VirtualStrategy.jl index cc63a0c19..33ce1877a 100644 --- a/src/VirtualStrategy.jl +++ b/src/VirtualStrategy.jl @@ -1,17 +1,26 @@ # Minimum Constant Liar -function calculate_liars(::MinimumConstantLiar, tmp_surr::AbstractSurrogate, surr::AbstractSurrogate, new_x) +function calculate_liars(::MinimumConstantLiar, + tmp_surr::AbstractSurrogate, + surr::AbstractSurrogate, + new_x) new_y = minimum(surr.y) add_point!(tmp_surr, new_x, new_y) end # Maximum Constant Liar -function calculate_liars(::MaximumConstantLiar, tmp_surr::AbstractSurrogate, surr::AbstractSurrogate, new_x) +function calculate_liars(::MaximumConstantLiar, + tmp_surr::AbstractSurrogate, + surr::AbstractSurrogate, + new_x) new_y = maximum(surr.y) add_point!(tmp_surr, new_x, new_y) end # Mean Constant Liar -function calculate_liars(::MeanConstantLiar, tmp_surr::AbstractSurrogate, surr::AbstractSurrogate, new_x) +function calculate_liars(::MeanConstantLiar, + tmp_surr::AbstractSurrogate, + surr::AbstractSurrogate, + new_x) new_y = mean(surr.y) add_point!(tmp_surr, new_x, new_y) end @@ -32,4 +41,4 @@ end function calculate_liars(::KrigingBelieverLowerBound, tmp_k::Kriging, k::Kriging, new_x) new_y = k(new_x) - 3 * std_error_at_point(k, new_x) add_point!(tmp_k, new_x, new_y) -end \ No newline at end of file +end diff --git a/test/GEKPLS.jl b/test/GEKPLS.jl index 7d9a3a8ba..2c2aadf1d 100644 --- a/test/GEKPLS.jl +++ b/test/GEKPLS.jl @@ -83,7 +83,7 @@ y_true = welded_beam.(x_test) @testset "Test 4: Welded Beam Function Test (dimensions = 3; n_comp = 3; extra_points = 2)" begin n_comp = 3 delta_x = 0.0001 - extra_points = 2 + extra_points = 2 initial_theta = [0.01 for i in 1:n_comp] g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta) y_pred = g.(x_test) @@ -175,8 +175,8 @@ end extra_points = 2 initial_theta = [0.01 for i in 1:n_comp] g = GEKPLS(initial_x_vec, initial_y, initial_grads, n_comp, delta_x, lb, ub, - extra_points, - initial_theta) + extra_points, + initial_theta) n_test = 100 x_test = sample(n_test, lb, ub, GoldenSample()) y_true = sphere_function.(x_test) @@ -209,8 +209,8 @@ end y = sphere_function.(x) g = GEKPLS(x, y, grads, n_comp, delta_x, lb, ub, extra_points, initial_theta) x_point, minima = surrogate_optimize(sphere_function, SRBF(), lb, ub, g, - RandomSample(); maxiters = 20, - num_new_samples = 20, needs_gradient = true) + RandomSample(); maxiters = 20, + num_new_samples = 20, needs_gradient = true) @test isapprox(minima, 0.0, atol = 0.0001) end diff --git a/test/SectionSampleTests.jl b/test/SectionSampleTests.jl index 46336f1af..be6d656c8 100644 --- a/test/SectionSampleTests.jl +++ b/test/SectionSampleTests.jl @@ -25,16 +25,16 @@ isapprox(f([0, 0, 0]), f_hat([0, 0, 0])) """ The global minimum is at (0,0) """ (xy_min, f_hat_min) = surrogate_optimize(f, - DYCORS(), lb, ub, - f_hat, - SobolSample()) + DYCORS(), lb, ub, + f_hat, + SobolSample()) isapprox(xy_min[1], 0.0, atol = 1e-1) """ The minimum on the (0,10) section is around (0,10) """ section_sampler_z_is_10 = SectionSample([NaN64, NaN64, 10.0], - Surrogates.RandomSample()) + Surrogates.RandomSample()) @test [3] == Surrogates.fixed_dimensions(section_sampler_z_is_10) @test [1, 2] == Surrogates.free_dimensions(section_sampler_z_is_10) @@ -42,9 +42,9 @@ section_sampler_z_is_10 = SectionSample([NaN64, NaN64, 10.0], Surrogates.sample(5, lb, ub, section_sampler_z_is_10) (xy_min, f_hat_min) = surrogate_optimize(f, - EI(), lb, ub, - f_hat, - section_sampler_z_is_10, maxiters = 1000) + EI(), lb, ub, + f_hat, + section_sampler_z_is_10, maxiters = 1000) isapprox(xy_min[1], 0.0, atol = 0.1) isapprox(xy_min[2], 0.0, atol = 0.1) diff --git a/test/VariableFidelity.jl b/test/VariableFidelity.jl index 378c7b843..fe1cd49f1 100644 --- a/test/VariableFidelity.jl +++ b/test/VariableFidelity.jl @@ -13,10 +13,10 @@ add_point!(my_varfid, 3.0, 6.0) val = my_varfid(3.0) my_varfid_change_struct = VariableFidelitySurrogate(x, y, lb, ub, num_high_fidel = 2, - low_fid_structure = InverseDistanceStructure(p = 1.0), - high_fid_structure = RadialBasisStructure(radial_function = linearRadial(), - scale_factor = 1.0, - sparse = false)) + low_fid_structure = InverseDistanceStructure(p = 1.0), + high_fid_structure = RadialBasisStructure(radial_function = linearRadial(), + scale_factor = 1.0, + sparse = false)) #ND n = 10 lb = [0.0, 0.0] @@ -28,7 +28,7 @@ my_varfidND = VariableFidelitySurrogate(x, y, lb, ub) val = my_varfidND((2.0, 2.0)) add_point!(my_varfidND, (3.0, 3.0), 9.0) my_varfidND_change_struct = VariableFidelitySurrogate(x, y, lb, ub, num_high_fidel = 2, - low_fid_structure = InverseDistanceStructure(p = 1.0), - high_fid_structure = RadialBasisStructure(radial_function = linearRadial(), - scale_factor = 1.0, - sparse = false)) + low_fid_structure = InverseDistanceStructure(p = 1.0), + high_fid_structure = RadialBasisStructure(radial_function = linearRadial(), + scale_factor = 1.0, + sparse = false)) diff --git a/test/optimization.jl b/test/optimization.jl index 11413d0fb..85492edc5 100644 --- a/test/optimization.jl +++ b/test/optimization.jl @@ -23,7 +23,7 @@ x = [2.5, 4.0, 6.0] y = [6.0, 9.0, 13.0] my_k_SRBF1 = Kriging(x, y, lb, ub; p) xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_k_SRBF1, - RandomSample()) + RandomSample()) #Using RadialBasis @@ -31,19 +31,19 @@ x = [2.5, 4.0, 6.0] y = [6.0, 9.0, 13.0] my_rad_SRBF1 = RadialBasis(x, y, a, b, rad = linearRadial()) (xstar, fstar) = surrogate_optimize(objective_function, SRBF(), a, b, my_rad_SRBF1, - RandomSample()) + RandomSample()) x = [2.5, 4.0, 6.0] y = [6.0, 9.0, 13.0] my_wend_1d = Wendland(x, y, lb, ub) xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_wend_1d, - RandomSample()) + RandomSample()) x = [2.5, 4.0, 6.0] y = [6.0, 9.0, 13.0] my_earth1d = EarthSurrogate(x, y, lb, ub) xstar, fstar = surrogate_optimize(objective_function, SRBF(), a, b, my_earth1d, - HaltonSample()) + HaltonSample()) ##### ND ##### objective_function_ND = z -> 3 * norm(z) + 1 @@ -57,7 +57,7 @@ y = objective_function_ND.(x) my_k_SRBFN = Kriging(x, y, lb, ub) #Every optimization method now returns the y_min and its position x_min, y_min = surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_k_SRBFN, - RandomSample()) + RandomSample()) #Radials lb = [1.0, 1.0] @@ -84,7 +84,7 @@ objective_function_ND = z -> 3 * norm(z) + 1 y = objective_function_ND.(x) my_linear_ND = LinearSurrogate(x, y, lb, ub) surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_linear_ND, SobolSample(), - maxiters = 15) + maxiters = 15) #= #SVM @@ -106,7 +106,7 @@ my_p = 2.5 y = objective_function_ND.(x) my_inverse_ND = InverseDistanceSurrogate(x, y, lb, ub, p = my_p) surrogate_optimize(objective_function_ND, SRBF(), lb, ub, my_inverse_ND, SobolSample(), - maxiters = 15) + maxiters = 15) #SecondOrderPolynomialSurrogate lb = [0.0, 0.0] @@ -116,7 +116,7 @@ x = sample(15, lb, ub, RandomSample()) y = obj_ND.(x) my_second_order_poly_ND = SecondOrderPolynomialSurrogate(x, y, lb, ub) surrogate_optimize(obj_ND, SRBF(), lb, ub, my_second_order_poly_ND, SobolSample(), - maxiters = 15) + maxiters = 15) ####### LCBS ######### ######1D###### @@ -156,7 +156,7 @@ x = sample(5, lb, ub, SobolSample()) y = objective_function.(x) my_k_EI1 = Kriging(x, y, lb, ub; p = 2) surrogate_optimize(objective_function, EI(), lb, ub, my_k_EI1, SobolSample(), - maxiters = 200, num_new_samples = 155) + maxiters = 200, num_new_samples = 155) # Check that EI is correctly minimizing the objective y_min, index_min = findmin(my_k_EI1.y) @@ -241,15 +241,15 @@ ub = [6.0, 6.0] my_k_DYCORSN = Kriging(x, y, lb, ub) surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_k_DYCORSN, RandomSample(), - maxiters = 30) + maxiters = 30) my_rad_DYCORSN = RadialBasis(x, y, lb, ub, rad = linearRadial()) surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_rad_DYCORSN, RandomSample(), - maxiters = 30) + maxiters = 30) my_wend_ND = Wendland(x, y, lb, ub) surrogate_optimize(objective_function_ND, DYCORS(), lb, ub, my_wend_ND, RandomSample(), - maxiters = 30) + maxiters = 30) ### SOP ### # 1D @@ -262,7 +262,7 @@ ub = 6.0 num_centers = 2 my_k_SOP1 = Kriging(x, y, lb, ub, p = 1.9) surrogate_optimize(objective_function, SOP(num_centers), lb, ub, my_k_SOP1, SobolSample(), - maxiters = 60) + maxiters = 60) #ND objective_function_ND = z -> 2 * norm(z) + 1 x = [(2.3, 2.2), (1.4, 1.5)] @@ -274,7 +274,7 @@ ub = [6.0, 6.0] my_k_SOPND = Kriging(x, y, lb, ub) num_centers = 2 surrogate_optimize(objective_function_ND, SOP(num_centers), lb, ub, my_k_SOPND, - SobolSample(), maxiters = 20) + SobolSample(), maxiters = 20) #multi optimization #= diff --git a/test/parallel.jl b/test/parallel.jl index 7cc133e52..f3472b958 100755 --- a/test/parallel.jl +++ b/test/parallel.jl @@ -1,7 +1,6 @@ using Surrogates using Test -using Revise - +using Revise #1D lb = 0.0 @@ -10,13 +9,18 @@ f = x -> log(x) * exp(x) x = sample(5, lb, ub, SobolSample()) y = f.(x) - # Test lengths of new_x and EI (1D) # TODO my_k = Kriging(x, y, lb, ub) -new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, SobolSample(), 3) +new_x, eis = potential_optimal_points(EI(), + MeanConstantLiar(), + lb, + ub, + my_k, + SobolSample(), + 3) @test length(new_x) == 3 @test length(eis) == 3 @@ -24,11 +28,16 @@ new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, So # Test lengths of new_x and SRBF (1D) my_surr = RadialBasis(x, y, lb, ub) -new_x, eis = potential_optimal_points(SRBF(), MeanConstantLiar(), lb, ub, my_surr, SobolSample(), 3) +new_x, eis = potential_optimal_points(SRBF(), + MeanConstantLiar(), + lb, + ub, + my_surr, + SobolSample(), + 3) @test length(new_x) == 3 @test length(eis) == 3 - # Test lengths of new_x and EI (ND) lb = [0.0, 0.0, 1.0] @@ -39,7 +48,13 @@ y = f.(x) my_k = Kriging(x, y, lb, ub) -new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, SobolSample(), 5) +new_x, eis = potential_optimal_points(EI(), + MeanConstantLiar(), + lb, + ub, + my_k, + SobolSample(), + 5) @test length(new_x) == 5 @test length(eis) == 5 @@ -49,7 +64,13 @@ new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, So # Test lengths of new_x and SRBF (ND) my_surr = RadialBasis(x, y, lb, ub) -new_x, eis = potential_optimal_points(SRBF(), MeanConstantLiar(), lb, ub, my_surr, SobolSample(), 5) +new_x, eis = potential_optimal_points(SRBF(), + MeanConstantLiar(), + lb, + ub, + my_surr, + SobolSample(), + 5) @test length(new_x) == 5 @test length(eis) == 5 @@ -57,5 +78,10 @@ new_x, eis = potential_optimal_points(SRBF(), MeanConstantLiar(), lb, ub, my_sur @test length(new_x[1]) == 3 # # Check hyperparameter validation for potential_optimal_points -@test_throws ArgumentError new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, SobolSample(), -1) - +@test_throws ArgumentError new_x, eis=potential_optimal_points(EI(), + MeanConstantLiar(), + lb, + ub, + my_k, + SobolSample(), + -1) diff --git a/test/runtests.jl b/test/runtests.jl index 25abaf606..b94a25ac8 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,7 @@ using Surrogates using Test using SafeTestsets using Pkg -VERSION <= v"1.7" && Pkg.add(name="Statistics", version=VERSION) +VERSION <= v"1.7" && Pkg.add(name = "Statistics", version = VERSION) function dev_subpkg(subpkg) subpkg_path = joinpath(dirname(@__DIR__), "lib", subpkg) @@ -22,20 +22,48 @@ end end end @testset "Algorithms" begin - @time @safetestset "GEKPLS" begin include("GEKPLS.jl") end - @time @safetestset "Radials" begin include("Radials.jl") end - @time @safetestset "Kriging" begin include("Kriging.jl") end - @time @safetestset "Sampling" begin include("sampling.jl") end - @time @safetestset "Optimization" begin include("optimization.jl") end - @time @safetestset "LinearSurrogate" begin include("linearSurrogate.jl") end - @time @safetestset "Lobachevsky" begin include("lobachevsky.jl") end - @time @safetestset "InverseDistanceSurrogate" begin include("inverseDistanceSurrogate.jl") end - @time @safetestset "SecondOrderPolynomialSurrogate" begin include("secondOrderPolynomialSurrogate.jl") end + @time @safetestset "GEKPLS" begin + include("GEKPLS.jl") + end + @time @safetestset "Radials" begin + include("Radials.jl") + end + @time @safetestset "Kriging" begin + include("Kriging.jl") + end + @time @safetestset "Sampling" begin + include("sampling.jl") + end + @time @safetestset "Optimization" begin + include("optimization.jl") + end + @time @safetestset "LinearSurrogate" begin + include("linearSurrogate.jl") + end + @time @safetestset "Lobachevsky" begin + include("lobachevsky.jl") + end + @time @safetestset "InverseDistanceSurrogate" begin + include("inverseDistanceSurrogate.jl") + end + @time @safetestset "SecondOrderPolynomialSurrogate" begin + include("secondOrderPolynomialSurrogate.jl") + end # @time @safetestset "AD_Compatibility" begin include("AD_compatibility.jl") end - @time @safetestset "Wendland" begin include("Wendland.jl") end - @time @safetestset "VariableFidelity" begin include("VariableFidelity.jl") end - @time @safetestset "Earth" begin include("earth.jl") end - @time @safetestset "Gradient Enhanced Kriging" begin include("GEK.jl") end - @time @safetestset "Section Samplers" begin include("SectionSampleTests.jl") end + @time @safetestset "Wendland" begin + include("Wendland.jl") + end + @time @safetestset "VariableFidelity" begin + include("VariableFidelity.jl") + end + @time @safetestset "Earth" begin + include("earth.jl") + end + @time @safetestset "Gradient Enhanced Kriging" begin + include("GEK.jl") + end + @time @safetestset "Section Samplers" begin + include("SectionSampleTests.jl") + end end -end \ No newline at end of file +end From 9c24b65b37a583aaae0d937c216bc407b91dd6f3 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 05:14:26 +0000 Subject: [PATCH 3/5] ci: drop 1.6 CI workflow --- .github/workflows/CI.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 31ebfc6a9..5cdcaa9ed 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -15,7 +15,6 @@ jobs: - Core version: - '1' - - '1.6' steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v1 From f6f72677ec92cd7bfdfc211a56cdfc4295683ba5 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 05:17:49 +0000 Subject: [PATCH 4/5] fixup! chore: format the repo --- lib/SurrogatesMOE/X-QMC2.csv | 151 ----------------------------------- lib/SurrogatesMOE/X-QMC3.csv | 151 ----------------------------------- 2 files changed, 302 deletions(-) delete mode 100644 lib/SurrogatesMOE/X-QMC2.csv delete mode 100644 lib/SurrogatesMOE/X-QMC3.csv diff --git a/lib/SurrogatesMOE/X-QMC2.csv b/lib/SurrogatesMOE/X-QMC2.csv deleted file mode 100644 index 026e09ab5..000000000 --- a/lib/SurrogatesMOE/X-QMC2.csv +++ /dev/null @@ -1,151 +0,0 @@ -1,2 --0.9765625,-0.3359375 -0.0234375,0.6640625 -0.5234375,-0.8359375 --0.4765625,0.1640625 --0.2265625,-0.5859375 -0.7734375,0.4140625 -0.2734375,-0.0859375 --0.7265625,0.9140625 --0.6015625,-0.9609375 -0.3984375,0.0390625 -0.8984375,-0.4609375 --0.1015625,0.5390625 --0.3515625,-0.2109375 -0.6484375,0.7890625 -0.1484375,-0.7109375 --0.8515625,0.2890625 --0.7890625,-0.6484375 -0.2109375,0.3515625 -0.7109375,-0.1484375 --0.2890625,0.8515625 --0.0390625,-0.3984375 -0.9609375,0.6015625 -0.4609375,-0.8984375 --0.5390625,0.1015625 --0.6640625,-0.0234375 -0.3359375,0.9765625 -0.8359375,-0.5234375 --0.1640625,0.4765625 --0.4140625,-0.7734375 -0.5859375,0.2265625 -0.0859375,-0.2734375 --0.9140625,0.7265625 --0.8828125,-0.8671875 -0.1171875,0.1328125 -0.6171875,-0.3671875 --0.3828125,0.6328125 --0.1328125,-0.1171875 -0.8671875,0.8828125 -0.3671875,-0.6171875 --0.6328125,0.3828125 --0.5078125,-0.4921875 -0.4921875,0.5078125 -0.9921875,-0.9921875 --0.0078125,0.0078125 --0.2578125,-0.7421875 -0.7421875,0.2578125 -0.2421875,-0.2421875 --0.7578125,0.7578125 --0.8203125,-0.1796875 -0.1796875,0.8203125 -0.6796875,-0.6796875 --0.3203125,0.3203125 --0.0703125,-0.9296875 -0.9296875,0.0703125 -0.4296875,-0.4296875 --0.5703125,0.5703125 --0.6953125,-0.5546875 -0.3046875,0.4453125 -0.8046875,-0.0546875 --0.1953125,0.9453125 --0.4453125,-0.3046875 -0.5546875,0.6953125 -0.0546875,-0.8046875 --0.9453125,0.1953125 --0.9609375,-0.6015625 -0.0390625,0.3984375 -0.5390625,-0.1015625 --0.4609375,0.8984375 --0.2109375,-0.3515625 -0.7890625,0.6484375 -0.2890625,-0.8515625 --0.7109375,0.1484375 --0.5859375,-0.2265625 -0.4140625,0.7734375 -0.9140625,-0.7265625 --0.0859375,0.2734375 --0.3359375,-0.9765625 -0.6640625,0.0234375 -0.1640625,-0.4765625 --0.8359375,0.5234375 --0.7734375,-0.4140625 -0.2265625,0.5859375 -0.7265625,-0.9140625 --0.2734375,0.0859375 --0.0234375,-0.6640625 -0.9765625,0.3359375 -0.4765625,-0.1640625 --0.5234375,0.8359375 --0.6484375,-0.7890625 -0.3515625,0.2109375 -0.8515625,-0.2890625 --0.1484375,0.7109375 --0.3984375,-0.0390625 -0.6015625,0.9609375 -0.1015625,-0.5390625 --0.8984375,0.4609375 --0.9296875,-0.0703125 -0.0703125,0.9296875 -0.5703125,-0.5703125 --0.4296875,0.4296875 --0.1796875,-0.8203125 -0.8203125,0.1796875 -0.3203125,-0.3203125 --0.6796875,0.6796875 --0.5546875,-0.6953125 -0.4453125,0.3046875 -0.9453125,-0.1953125 --0.0546875,0.8046875 --0.3046875,-0.4453125 -0.6953125,0.5546875 -0.1953125,-0.9453125 --0.8046875,0.0546875 --0.8671875,-0.8828125 -0.1328125,0.1171875 -0.6328125,-0.3828125 --0.3671875,0.6171875 --0.1171875,-0.1328125 -0.8828125,0.8671875 -0.3828125,-0.6328125 --0.6171875,0.3671875 --0.7421875,-0.2578125 -0.2578125,0.7421875 -0.7578125,-0.7578125 --0.2421875,0.2421875 --0.4921875,-0.5078125 -0.5078125,0.4921875 -0.0078125,-0.0078125 --0.9921875,0.9921875 --0.98828125,-0.00390625 -0.01171875,0.99609375 -0.51171875,-0.50390625 --0.48828125,0.49609375 --0.23828125,-0.75390625 -0.76171875,0.24609375 -0.26171875,-0.25390625 --0.73828125,0.74609375 --0.61328125,-0.62890625 -0.38671875,0.37109375 -0.88671875,-0.12890625 --0.11328125,0.87109375 --0.36328125,-0.37890625 -0.63671875,0.62109375 -0.13671875,-0.87890625 --0.86328125,0.12109375 --0.80078125,-0.94140625 -0.19921875,0.05859375 -0.69921875,-0.44140625 --0.30078125,0.55859375 --0.05078125,-0.19140625 -0.94921875,0.80859375 diff --git a/lib/SurrogatesMOE/X-QMC3.csv b/lib/SurrogatesMOE/X-QMC3.csv deleted file mode 100644 index 026e09ab5..000000000 --- a/lib/SurrogatesMOE/X-QMC3.csv +++ /dev/null @@ -1,151 +0,0 @@ -1,2 --0.9765625,-0.3359375 -0.0234375,0.6640625 -0.5234375,-0.8359375 --0.4765625,0.1640625 --0.2265625,-0.5859375 -0.7734375,0.4140625 -0.2734375,-0.0859375 --0.7265625,0.9140625 --0.6015625,-0.9609375 -0.3984375,0.0390625 -0.8984375,-0.4609375 --0.1015625,0.5390625 --0.3515625,-0.2109375 -0.6484375,0.7890625 -0.1484375,-0.7109375 --0.8515625,0.2890625 --0.7890625,-0.6484375 -0.2109375,0.3515625 -0.7109375,-0.1484375 --0.2890625,0.8515625 --0.0390625,-0.3984375 -0.9609375,0.6015625 -0.4609375,-0.8984375 --0.5390625,0.1015625 --0.6640625,-0.0234375 -0.3359375,0.9765625 -0.8359375,-0.5234375 --0.1640625,0.4765625 --0.4140625,-0.7734375 -0.5859375,0.2265625 -0.0859375,-0.2734375 --0.9140625,0.7265625 --0.8828125,-0.8671875 -0.1171875,0.1328125 -0.6171875,-0.3671875 --0.3828125,0.6328125 --0.1328125,-0.1171875 -0.8671875,0.8828125 -0.3671875,-0.6171875 --0.6328125,0.3828125 --0.5078125,-0.4921875 -0.4921875,0.5078125 -0.9921875,-0.9921875 --0.0078125,0.0078125 --0.2578125,-0.7421875 -0.7421875,0.2578125 -0.2421875,-0.2421875 --0.7578125,0.7578125 --0.8203125,-0.1796875 -0.1796875,0.8203125 -0.6796875,-0.6796875 --0.3203125,0.3203125 --0.0703125,-0.9296875 -0.9296875,0.0703125 -0.4296875,-0.4296875 --0.5703125,0.5703125 --0.6953125,-0.5546875 -0.3046875,0.4453125 -0.8046875,-0.0546875 --0.1953125,0.9453125 --0.4453125,-0.3046875 -0.5546875,0.6953125 -0.0546875,-0.8046875 --0.9453125,0.1953125 --0.9609375,-0.6015625 -0.0390625,0.3984375 -0.5390625,-0.1015625 --0.4609375,0.8984375 --0.2109375,-0.3515625 -0.7890625,0.6484375 -0.2890625,-0.8515625 --0.7109375,0.1484375 --0.5859375,-0.2265625 -0.4140625,0.7734375 -0.9140625,-0.7265625 --0.0859375,0.2734375 --0.3359375,-0.9765625 -0.6640625,0.0234375 -0.1640625,-0.4765625 --0.8359375,0.5234375 --0.7734375,-0.4140625 -0.2265625,0.5859375 -0.7265625,-0.9140625 --0.2734375,0.0859375 --0.0234375,-0.6640625 -0.9765625,0.3359375 -0.4765625,-0.1640625 --0.5234375,0.8359375 --0.6484375,-0.7890625 -0.3515625,0.2109375 -0.8515625,-0.2890625 --0.1484375,0.7109375 --0.3984375,-0.0390625 -0.6015625,0.9609375 -0.1015625,-0.5390625 --0.8984375,0.4609375 --0.9296875,-0.0703125 -0.0703125,0.9296875 -0.5703125,-0.5703125 --0.4296875,0.4296875 --0.1796875,-0.8203125 -0.8203125,0.1796875 -0.3203125,-0.3203125 --0.6796875,0.6796875 --0.5546875,-0.6953125 -0.4453125,0.3046875 -0.9453125,-0.1953125 --0.0546875,0.8046875 --0.3046875,-0.4453125 -0.6953125,0.5546875 -0.1953125,-0.9453125 --0.8046875,0.0546875 --0.8671875,-0.8828125 -0.1328125,0.1171875 -0.6328125,-0.3828125 --0.3671875,0.6171875 --0.1171875,-0.1328125 -0.8828125,0.8671875 -0.3828125,-0.6328125 --0.6171875,0.3671875 --0.7421875,-0.2578125 -0.2578125,0.7421875 -0.7578125,-0.7578125 --0.2421875,0.2421875 --0.4921875,-0.5078125 -0.5078125,0.4921875 -0.0078125,-0.0078125 --0.9921875,0.9921875 --0.98828125,-0.00390625 -0.01171875,0.99609375 -0.51171875,-0.50390625 --0.48828125,0.49609375 --0.23828125,-0.75390625 -0.76171875,0.24609375 -0.26171875,-0.25390625 --0.73828125,0.74609375 --0.61328125,-0.62890625 -0.38671875,0.37109375 -0.88671875,-0.12890625 --0.11328125,0.87109375 --0.36328125,-0.37890625 -0.63671875,0.62109375 -0.13671875,-0.87890625 --0.86328125,0.12109375 --0.80078125,-0.94140625 -0.19921875,0.05859375 -0.69921875,-0.44140625 --0.30078125,0.55859375 --0.05078125,-0.19140625 -0.94921875,0.80859375 From a9c4fa0a45ed605be23d81eee3e43597c7713831 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 05:48:47 +0000 Subject: [PATCH 5/5] docs: fix Reproducibility section --- docs/src/index.md | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/docs/src/index.md b/docs/src/index.md index 096a3a04b..51538ba63 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -113,56 +113,59 @@ surrogate_optimize(f,SRBF(),lb,ub,my_lobachevsky,RandomSample()) value = my_lobachevsky(5.0) ``` ## Reproducibility + ```@raw html
The documentation of this SciML package was built using these direct dependencies, ``` + ```@example using Pkg # hide Pkg.status() # hide ``` + ```@raw html
``` + ```@raw html
and using this machine and Julia version. ``` + ```@example using InteractiveUtils # hide versioninfo() # hide ``` + ```@raw html
``` + ```@raw html
A more complete overview of all dependencies and their versions is also provided. ``` + ```@example using Pkg # hide -Pkg.status(;mode = PKGMODE_MANIFEST) # hide +Pkg.status(; mode = PKGMODE_MANIFEST) # hide ``` + ```@raw html
``` -```@raw html -You can also download the -manifest file and the -project file. +using Markdown +version = TOML.parse(read("../../Project.toml", String))["version"] +name = TOML.parse(read("../../Project.toml", String))["name"] +link_manifest = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version * + "/assets/Manifest.toml" +link_project = "https://github.com/SciML/" * name * ".jl/tree/gh-pages/v" * version * + "/assets/Project.toml" +Markdown.parse("""You can also download the +[manifest]($link_manifest) +file and the +[project]($link_project) +file. +""") ```