diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 700707ce..1e8a051e 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,3 +5,6 @@ updates: directory: "/" # Location of package manifests schedule: interval: "weekly" + ignore: + - dependency-name: "crate-ci/typos" + update-types: ["version-update:semver-patch"] diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml new file mode 100644 index 00000000..74af4eff --- /dev/null +++ b/.github/workflows/SpellCheck.yml @@ -0,0 +1,13 @@ +name: Spell Check + +on: [pull_request] + +jobs: + typos-check: + name: Spell Check with Typos + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + - name: Check spelling + uses: crate-ci/typos@v1.16.23 \ No newline at end of file diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000..9a032fd3 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,2 @@ +[default.extend-words] +ND = "ND" \ No newline at end of file diff --git a/Project.toml b/Project.toml index 448be3e3..1c154f1e 100644 --- a/Project.toml +++ b/Project.toml @@ -22,6 +22,7 @@ GLM = "1.3" IterativeSolvers = "0.9" PolyChaos = "0.2" QuasiMonteCarlo = "0.3" +Statistics = "1" Zygote = "0.4, 0.5, 0.6" julia = "1.9" diff --git a/docs/src/Salustowicz.md b/docs/src/Salustowicz.md index 3f32b47c..c89d7316 100644 --- a/docs/src/Salustowicz.md +++ b/docs/src/Salustowicz.md @@ -5,7 +5,7 @@ The true underlying function HyGP had to approximate is the 1D Salustowicz funct The Salustowicz benchmark function is as follows: -``f(x) = e^{(-x)} x^3 cos(x) sin(x) (cos(x) sin^2(x) - 1)`` +``f(x) = e^{-x} x^3 \cos(x) \sin(x) (\cos(x) \sin^2(x) - 1)`` Let's import these two packages `Surrogates` and `Plots`: diff --git a/docs/src/ackley.md b/docs/src/ackley.md index 92f6154c..3c903c76 100644 --- a/docs/src/ackley.md +++ b/docs/src/ackley.md @@ -1,7 +1,7 @@ # Ackley Function The Ackley function is defined as: -``f(x) = -a*exp(-b\sqrt{\frac{1}{d}\sum_{i=1}^d x_i^2}) - exp(\frac{1}{d} \sum_{i=1}^d cos(cx_i)) + a + exp(1)`` +``f(x) = -a*\exp(-b\sqrt{\frac{1}{d}\sum_{i=1}^d x_i^2}) - \exp(\frac{1}{d} \sum_{i=1}^d \cos(cx_i)) + a + \exp(1)`` Usually the recommended values are: ``a = 20``, ``b = 0.2`` and ``c = 2\pi`` Let's see the 1D case. @@ -16,7 +16,7 @@ Now, let's define the `Ackley` function: ```@example ackley function ackley(x) - a, b, c = 20.0, -0.2, 2.0*π + a, b, c = 20.0, 0.2, 2.0*π len_recip = inv(length(x)) sum_sqrs = zero(eltype(x)) sum_cos = sum_sqrs @@ -24,7 +24,7 @@ function ackley(x) sum_cos += cos(c*i) sum_sqrs += i^2 end - return (-a * exp(b * sqrt(len_recip*sum_sqrs)) - + return (-a * exp(-b * sqrt(len_recip*sum_sqrs)) - exp(len_recip*sum_cos) + a + 2.71) end ``` diff --git a/docs/src/gramacylee.md b/docs/src/gramacylee.md index cdc6810a..1221673f 100644 --- a/docs/src/gramacylee.md +++ b/docs/src/gramacylee.md @@ -4,7 +4,7 @@ Gramacy & Lee Function is a continuous function. It is not convex. The function ``x \in [-0.5, 2.5]``. The Gramacy & Lee is as follows: -``f(x) = \frac{sin(10\pi x)}{2x} + (x-1)^4``. +``f(x) = \frac{\sin(10\pi x)}{2x} + (x-1)^4``. Let's import these two packages `Surrogates` and `Plots`: diff --git a/docs/src/optimizations.md b/docs/src/optimizations.md index 8a248e06..90b51786 100644 --- a/docs/src/optimizations.md +++ b/docs/src/optimizations.md @@ -28,5 +28,5 @@ surrogate_optimize(obj::Function,sop1::SOP,lb::Number,ub::Number,surrSOP::Abstra To add another optimization method, you just need to define a new SurrogateOptimizationAlgorithm and write its corresponding algorithm, overloading the following: ``` -surrogate_optimize(obj::Function,::NewOptimizatonType,lb,ub,surr::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100) +surrogate_optimize(obj::Function,::NewOptimizationType,lb,ub,surr::AbstractSurrogate,sample_type::SamplingAlgorithm;maxiters=100,num_new_samples=100) ``` diff --git a/docs/src/randomforest.md b/docs/src/randomforest.md index bd336a19..8609bb85 100644 --- a/docs/src/randomforest.md +++ b/docs/src/randomforest.md @@ -32,7 +32,7 @@ plot!(f, label="True function", xlims=(lower_bound, upper_bound), legend=:top) With our sampled points we can build the Random forests surrogate using the `RandomForestSurrogate` function. -`randomforest_surrogate` behaves like an ordinary function which we can simply plot. Addtionally you can specify the number of trees created +`randomforest_surrogate` behaves like an ordinary function which we can simply plot. Additionally you can specify the number of trees created using the parameter num_round ```@example RandomForestSurrogate_tutorial diff --git a/docs/src/surrogate.md b/docs/src/surrogate.md index 5af888fd..0260fc78 100644 --- a/docs/src/surrogate.md +++ b/docs/src/surrogate.md @@ -48,7 +48,7 @@ It's great that you want to add another surrogate to the library! You will need to: 1. Define a new mutable struct and a constructor function -2. Define add\_point!(your\_surrogate::AbstactSurrogate,x\_new,y\_new) +2. Define add\_point!(your\_surrogate::AbstractSurrogate,x\_new,y\_new) 3. Define your\_surrogate(value) for the approximation **Example** diff --git a/docs/src/tensor_prod.md b/docs/src/tensor_prod.md index afc6c414..832603cc 100644 --- a/docs/src/tensor_prod.md +++ b/docs/src/tensor_prod.md @@ -1,6 +1,6 @@ # Tensor product function The tensor product function is defined as: -``f(x) = \prod_{i=1}^d cos(a\pi x_i)`` +``f(x) = \prod_{i=1}^d \cos(a\pi x_i)`` Let's import Surrogates and Plots: ```@example tensor diff --git a/docs/src/water_flow.md b/docs/src/water_flow.md index ed98f9e1..88565097 100644 --- a/docs/src/water_flow.md +++ b/docs/src/water_flow.md @@ -1,9 +1,9 @@ # Water flow function The water flow function is defined as: -``f(r_w,r,T_u,H_u,T_l,H_l,L,K_w) = \frac{2*\pi*T_u(H_u - H_l)}{log(\frac{r}{r_w})*[1 + \frac{2LT_u}{log(\frac{r}{r_w})*r_w^2*K_w}+ \frac{T_u}{T_l} ]}`` +``f(r_w,r,T_u,H_u,T_l,H_l,L,K_w) = \frac{2*\pi*T_u(H_u - H_l)}{\log(\frac{r}{r_w})*[1 + \frac{2LT_u}{\log(\frac{r}{r_w})*r_w^2*K_w}+ \frac{T_u}{T_l} ]}`` -It has 8 dimension. +It has 8 dimensions. ```@example water using Surrogates diff --git a/src/GEK.jl b/src/GEK.jl index d3f1f704..75da4ca3 100644 --- a/src/GEK.jl +++ b/src/GEK.jl @@ -93,7 +93,7 @@ end function GEK(x, y, lb::Number, ub::Number; p = 1.0, theta = 1.0) if length(x) != length(unique(x)) - println("There exists a repetion in the samples, cannot build Kriging.") + println("There exists a repetition in the samples, cannot build Kriging.") return end mu, b, sigma, inverse_of_R = _calc_gek_coeffs(x, y, p, theta) diff --git a/src/GEKPLS.jl b/src/GEKPLS.jl index 33dc2242..3d3246ac 100644 --- a/src/GEKPLS.jl +++ b/src/GEKPLS.jl @@ -201,8 +201,8 @@ function _ge_compute_pls(X, y, n_comp, grads, delta_x, xlimits, extra_points) bb_vals = bb_vals .* grads[i, :]' _y = y[i, :] .+ sum(bb_vals, dims = 2) - #_pls.fit(_X, _y) # relic from sklearn versiom; retained for future reference. - #coeff_pls[:, :, i] = _pls.x_rotations_ #relic from sklearn versiom; retained for future reference. + #_pls.fit(_X, _y) # relic from sklearn version; retained for future reference. + #coeff_pls[:, :, i] = _pls.x_rotations_ #relic from sklearn version; retained for future reference. coeff_pls[:, :, i] = _modified_pls(_X, _y, n_comp) #_modified_pls returns the equivalent of SKLearn's _pls.x_rotations_ if extra_points != 0 @@ -304,7 +304,7 @@ end ######end of bb design###### """ -We substract the mean from each variable. Then, we divide the values of each +We subtract the mean from each variable. Then, we divide the values of each variable by its standard deviation. Parameters diff --git a/src/Kriging.jl b/src/Kriging.jl index e0b3a367..57500a62 100644 --- a/src/Kriging.jl +++ b/src/Kriging.jl @@ -104,7 +104,7 @@ Constructor for type Kriging. function Kriging(x, y, lb::Number, ub::Number; p = 2.0, theta = 0.5 / max(1e-6 * abs(ub - lb), std(x))^p) if length(x) != length(unique(x)) - println("There exists a repetion in the samples, cannot build Kriging.") + println("There exists a repetition in the samples, cannot build Kriging.") return end diff --git a/src/Optimization.jl b/src/Optimization.jl index 964ace71..90e05a2d 100755 --- a/src/Optimization.jl +++ b/src/Optimization.jl @@ -1691,7 +1691,7 @@ function surrogate_optimize(obj::Function, sopd::SOP, lb, ub, surrSOPD::Abstract new_points_y[i] = y_best end - #new_points[i] is splitted in new_points_x and new_points_y now contains: + #new_points[i] is split in new_points_x and new_points_y now contains: #[x_1,y_1; x_2,y_2,...,x_{num_new_samples},y_{num_new_samples}] #2.4 Adaptive learning and tabu archive