From 9826638cb587e9e463f93f61dd0cb88e1ae83a26 Mon Sep 17 00:00:00 2001 From: Sathvik Bhagavan Date: Wed, 13 Dec 2023 03:03:32 +0000 Subject: [PATCH] docs: update docs to use HaltonSample --- docs/src/InverseDistance.md | 4 ++-- docs/src/moe.md | 3 +-- docs/src/parallel.md | 16 ++++++++-------- docs/src/polychaos.md | 4 ++-- docs/src/samples.md | 3 +-- docs/src/secondorderpoly.md | 2 +- 6 files changed, 15 insertions(+), 17 deletions(-) diff --git a/docs/src/InverseDistance.md b/docs/src/InverseDistance.md index 8f64ccd37..f90bc3f29 100644 --- a/docs/src/InverseDistance.md +++ b/docs/src/InverseDistance.md @@ -15,7 +15,7 @@ default() ### Sampling -We choose to sample f in 25 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Low Discrepancy, this can be done by passing `LowDiscrepancySample()` to the `sample` function. +We choose to sample f in 25 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Low Discrepancy, this can be done by passing `HaltonSample()` to the `sample` function. ```@example Inverse_Distance1D f(x) = sin(x) + sin(x)^2 + sin(x)^3 @@ -23,7 +23,7 @@ f(x) = sin(x) + sin(x)^2 + sin(x)^3 n_samples = 25 lower_bound = 0.0 upper_bound = 10.0 -x = sample(n_samples, lower_bound, upper_bound, LowDiscrepancySample(;base=2)) +x = sample(n_samples, lower_bound, upper_bound, HaltonSample()) y = f.(x) scatter(x, y, label="Sampled points", xlims=(lower_bound, upper_bound), legend=:top) diff --git a/docs/src/moe.md b/docs/src/moe.md index 1a96b7ad1..0bcd432f1 100644 --- a/docs/src/moe.md +++ b/docs/src/moe.md @@ -92,7 +92,7 @@ end lb = [-1.0, -1.0] ub = [1.0, 1.0] n = 150 -x = sample(n, lb, ub, SobolSample()) +x = sample(n, lb, ub, RandomSample()) y = discont_NDIM.(x) x_test = sample(10, lb, ub, GoldenSample()) @@ -110,7 +110,6 @@ rbf = RadialBasis(x, y, lb, ub) rbf_pred_vals = rbf.(x_test) rbf_rmse = rmse(true_vals, rbf_pred_vals) println(rbf_rmse > moe_rmse) - ``` ### Usage Notes - Example With Other Surrogates diff --git a/docs/src/parallel.md b/docs/src/parallel.md index 2388e1eec..9bff2f4e4 100755 --- a/docs/src/parallel.md +++ b/docs/src/parallel.md @@ -17,24 +17,24 @@ To ensure that points of interest returned by `potential_optimal_points` are suf The following strategies are available for virtual point selection for all optimization algorithms: -- "Minimum Constant Liar (CLmin)": +- "Minimum Constant Liar (MinimumConstantLiar)": - The virtual point is assigned using the lowest known value of the merit function across all evaluated points. -- "Mean Constant Liar (CLmean)": +- "Mean Constant Liar (MeanConstantLiar)": - The virtual point is assigned using the mean of the merit function across all evaluated points. -- "Maximum Constant Liar (CLmax)": +- "Maximum Constant Liar (MaximumConstantLiar)": - The virtual point is assigned using the great known value of the merit function across all evaluated points. For Kriging surrogates, specifically, the above and follow strategies are available: -- "Kriging Believer (KB)": +- "Kriging Believer (KrigingBeliever): - The virtual point is assigned using the mean of the Kriging surrogate at the virtual point. -- "Kriging Believer Upper Bound (KBUB)": +- "Kriging Believer Upper Bound (KrigingBelieverUpperBound)": - The virtual point is assigned using 3$\sigma$ above the mean of the Kriging surrogate at the virtual point. -- "Kriging Believer Lower Bound (KBLB)": +- "Kriging Believer Lower Bound (KrigingBelieverLowerBound)": - The virtual point is assigned using 3$\sigma$ below the mean of the Kriging surrogate at the virtual point. -In general, CLmin and KBLB tend to favor exploitation while CLmax and KBUB tend to favor exploration. CLmean and KB tend to be a compromise between the two. +In general, MinimumConstantLiar and KrigingBelieverLowerBound tend to favor exploitation while MaximumConstantLiar and KrigingBelieverUpperBound tend to favor exploration. MeanConstantLiar and KrigingBeliever tend to be a compromise between the two. ## Examples @@ -50,7 +50,7 @@ y = f.(x) my_k = Kriging(x, y, lb, ub) for _ in 1:10 - new_x, eis = potential_optimal_points(EI(), lb, ub, my_k, SobolSample(), 3, CLmean!) + new_x, eis = potential_optimal_points(EI(), MeanConstantLiar(), lb, ub, my_k, SobolSample(), 3) add_point!(my_k, new_x, f.(new_x)) end ``` diff --git a/docs/src/polychaos.md b/docs/src/polychaos.md index e8b6d1110..24b368579 100644 --- a/docs/src/polychaos.md +++ b/docs/src/polychaos.md @@ -9,7 +9,7 @@ we are trying to fit. Under the hood, PolyChaos.jl has been used. It is possible to specify a type of polynomial for each dimension of the problem. ### Sampling -We choose to sample f in 25 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Low Discrepancy, this can be done by passing `LowDiscrepancySample()` to the `sample` function. +We choose to sample f in 25 points between 0 and 10 using the `sample` function. The sampling points are chosen using a Low Discrepancy, this can be done by passing `HaltonSample()` to the `sample` function. ```@example polychaos using Surrogates @@ -20,7 +20,7 @@ default() n = 20 lower_bound = 1.0 upper_bound = 6.0 -x = sample(n,lower_bound,upper_bound,LowDiscrepancySample(2)) +x = sample(n,lower_bound,upper_bound,HaltonSample()) f = x -> log(x)*x + sin(x) y = f.(x) scatter(x, y, label="Sampled points", xlims=(lower_bound, upper_bound), legend=:top) diff --git a/docs/src/samples.md b/docs/src/samples.md index 073191159..2a92a9d89 100644 --- a/docs/src/samples.md +++ b/docs/src/samples.md @@ -32,8 +32,7 @@ sample(n,lb,ub,::LatinHypercubeSample) * Low Discrepancy sample ``` -LowDiscrepancySample{T} -sample(n,lb,ub,S::LowDiscrepancySample) +sample(n,lb,ub,S::HaltonSample) ``` * Sample on section diff --git a/docs/src/secondorderpoly.md b/docs/src/secondorderpoly.md index ef2329986..97826e852 100644 --- a/docs/src/secondorderpoly.md +++ b/docs/src/secondorderpoly.md @@ -18,7 +18,7 @@ f = x -> 3*sin(x) + 10/x lb = 3.0 ub = 6.0 n = 10 -x = sample(n,lb,ub,LowDiscrepancySample(2)) +x = sample(n,lb,ub,HaltonSample()) y = f.(x) scatter(x, y, label="Sampled points", xlims=(lb, ub)) plot!(f, label="True function", xlims=(lb, ub))